xref: /openbmc/qemu/linux-user/syscall.c (revision 3ad0a769)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "strace.h"
131 #include "signal-common.h"
132 #include "loader.h"
133 #include "qemu/guest-random.h"
134 #include "qemu/selfmap.h"
135 #include "user/syscall-trace.h"
136 #include "qapi/error.h"
137 #include "fd-trans.h"
138 #include "tcg/tcg.h"
139 
140 #ifndef CLONE_IO
141 #define CLONE_IO                0x80000000      /* Clone io context */
142 #endif
143 
144 /* We can't directly call the host clone syscall, because this will
145  * badly confuse libc (breaking mutexes, for example). So we must
146  * divide clone flags into:
147  *  * flag combinations that look like pthread_create()
148  *  * flag combinations that look like fork()
149  *  * flags we can implement within QEMU itself
150  *  * flags we can't support and will return an error for
151  */
152 /* For thread creation, all these flags must be present; for
153  * fork, none must be present.
154  */
155 #define CLONE_THREAD_FLAGS                              \
156     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
157      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
158 
159 /* These flags are ignored:
160  * CLONE_DETACHED is now ignored by the kernel;
161  * CLONE_IO is just an optimisation hint to the I/O scheduler
162  */
163 #define CLONE_IGNORED_FLAGS                     \
164     (CLONE_DETACHED | CLONE_IO)
165 
166 /* Flags for fork which we can implement within QEMU itself */
167 #define CLONE_OPTIONAL_FORK_FLAGS               \
168     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
169      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
170 
171 /* Flags for thread creation which we can implement within QEMU itself */
172 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
173     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
174      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
175 
176 #define CLONE_INVALID_FORK_FLAGS                                        \
177     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
178 
179 #define CLONE_INVALID_THREAD_FLAGS                                      \
180     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
181        CLONE_IGNORED_FLAGS))
182 
183 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
184  * have almost all been allocated. We cannot support any of
185  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
186  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
187  * The checks against the invalid thread masks above will catch these.
188  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
189  */
190 
191 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
192  * once. This exercises the codepaths for restart.
193  */
194 //#define DEBUG_ERESTARTSYS
195 
196 //#include <linux/msdos_fs.h>
197 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
198 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
199 
200 #undef _syscall0
201 #undef _syscall1
202 #undef _syscall2
203 #undef _syscall3
204 #undef _syscall4
205 #undef _syscall5
206 #undef _syscall6
207 
208 #define _syscall0(type,name)		\
209 static type name (void)			\
210 {					\
211 	return syscall(__NR_##name);	\
212 }
213 
214 #define _syscall1(type,name,type1,arg1)		\
215 static type name (type1 arg1)			\
216 {						\
217 	return syscall(__NR_##name, arg1);	\
218 }
219 
220 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
221 static type name (type1 arg1,type2 arg2)		\
222 {							\
223 	return syscall(__NR_##name, arg1, arg2);	\
224 }
225 
226 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
227 static type name (type1 arg1,type2 arg2,type3 arg3)		\
228 {								\
229 	return syscall(__NR_##name, arg1, arg2, arg3);		\
230 }
231 
232 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
234 {										\
235 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
236 }
237 
238 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
239 		  type5,arg5)							\
240 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
241 {										\
242 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
243 }
244 
245 
246 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
247 		  type5,arg5,type6,arg6)					\
248 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
249                   type6 arg6)							\
250 {										\
251 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
252 }
253 
254 
255 #define __NR_sys_uname __NR_uname
256 #define __NR_sys_getcwd1 __NR_getcwd
257 #define __NR_sys_getdents __NR_getdents
258 #define __NR_sys_getdents64 __NR_getdents64
259 #define __NR_sys_getpriority __NR_getpriority
260 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
261 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
262 #define __NR_sys_syslog __NR_syslog
263 #if defined(__NR_futex)
264 # define __NR_sys_futex __NR_futex
265 #endif
266 #if defined(__NR_futex_time64)
267 # define __NR_sys_futex_time64 __NR_futex_time64
268 #endif
269 #define __NR_sys_inotify_init __NR_inotify_init
270 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
271 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
272 #define __NR_sys_statx __NR_statx
273 
274 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
275 #define __NR__llseek __NR_lseek
276 #endif
277 
278 /* Newer kernel ports have llseek() instead of _llseek() */
279 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
280 #define TARGET_NR__llseek TARGET_NR_llseek
281 #endif
282 
283 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
284 #ifndef TARGET_O_NONBLOCK_MASK
285 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
286 #endif
287 
288 #define __NR_sys_gettid __NR_gettid
289 _syscall0(int, sys_gettid)
290 
291 /* For the 64-bit guest on 32-bit host case we must emulate
292  * getdents using getdents64, because otherwise the host
293  * might hand us back more dirent records than we can fit
294  * into the guest buffer after structure format conversion.
295  * Otherwise we emulate getdents with getdents if the host has it.
296  */
297 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
298 #define EMULATE_GETDENTS_WITH_GETDENTS
299 #endif
300 
301 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
302 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
303 #endif
304 #if (defined(TARGET_NR_getdents) && \
305       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
306     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
307 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
308 #endif
309 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
310 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
311           loff_t *, res, uint, wh);
312 #endif
313 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
314 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
315           siginfo_t *, uinfo)
316 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
317 #ifdef __NR_exit_group
318 _syscall1(int,exit_group,int,error_code)
319 #endif
320 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
321 _syscall1(int,set_tid_address,int *,tidptr)
322 #endif
323 #if defined(__NR_futex)
324 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
325           const struct timespec *,timeout,int *,uaddr2,int,val3)
326 #endif
327 #if defined(__NR_futex_time64)
328 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
329           const struct timespec *,timeout,int *,uaddr2,int,val3)
330 #endif
331 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
332 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
335 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
336           unsigned long *, user_mask_ptr);
337 #define __NR_sys_getcpu __NR_getcpu
338 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
339 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
340           void *, arg);
341 _syscall2(int, capget, struct __user_cap_header_struct *, header,
342           struct __user_cap_data_struct *, data);
343 _syscall2(int, capset, struct __user_cap_header_struct *, header,
344           struct __user_cap_data_struct *, data);
345 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
346 _syscall2(int, ioprio_get, int, which, int, who)
347 #endif
348 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
349 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
350 #endif
351 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
352 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
353 #endif
354 
355 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
356 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
357           unsigned long, idx1, unsigned long, idx2)
358 #endif
359 
360 /*
361  * It is assumed that struct statx is architecture independent.
362  */
363 #if defined(TARGET_NR_statx) && defined(__NR_statx)
364 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
365           unsigned int, mask, struct target_statx *, statxbuf)
366 #endif
367 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
368 _syscall2(int, membarrier, int, cmd, int, flags)
369 #endif
370 
371 static const bitmask_transtbl fcntl_flags_tbl[] = {
372   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
373   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
374   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
375   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
376   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
377   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
378   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
379   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
380   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
381   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
382   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
383   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
384   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
385 #if defined(O_DIRECT)
386   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
387 #endif
388 #if defined(O_NOATIME)
389   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
390 #endif
391 #if defined(O_CLOEXEC)
392   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
393 #endif
394 #if defined(O_PATH)
395   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
396 #endif
397 #if defined(O_TMPFILE)
398   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
399 #endif
400   /* Don't terminate the list prematurely on 64-bit host+guest.  */
401 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
402   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
403 #endif
404   { 0, 0, 0, 0 }
405 };
406 
407 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
408 
409 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
410 #if defined(__NR_utimensat)
411 #define __NR_sys_utimensat __NR_utimensat
412 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
413           const struct timespec *,tsp,int,flags)
414 #else
415 static int sys_utimensat(int dirfd, const char *pathname,
416                          const struct timespec times[2], int flags)
417 {
418     errno = ENOSYS;
419     return -1;
420 }
421 #endif
422 #endif /* TARGET_NR_utimensat */
423 
424 #ifdef TARGET_NR_renameat2
425 #if defined(__NR_renameat2)
426 #define __NR_sys_renameat2 __NR_renameat2
427 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
428           const char *, new, unsigned int, flags)
429 #else
430 static int sys_renameat2(int oldfd, const char *old,
431                          int newfd, const char *new, int flags)
432 {
433     if (flags == 0) {
434         return renameat(oldfd, old, newfd, new);
435     }
436     errno = ENOSYS;
437     return -1;
438 }
439 #endif
440 #endif /* TARGET_NR_renameat2 */
441 
442 #ifdef CONFIG_INOTIFY
443 #include <sys/inotify.h>
444 
445 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
446 static int sys_inotify_init(void)
447 {
448   return (inotify_init());
449 }
450 #endif
451 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
452 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
453 {
454   return (inotify_add_watch(fd, pathname, mask));
455 }
456 #endif
457 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
458 static int sys_inotify_rm_watch(int fd, int32_t wd)
459 {
460   return (inotify_rm_watch(fd, wd));
461 }
462 #endif
463 #ifdef CONFIG_INOTIFY1
464 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
465 static int sys_inotify_init1(int flags)
466 {
467   return (inotify_init1(flags));
468 }
469 #endif
470 #endif
471 #else
472 /* Userspace can usually survive runtime without inotify */
473 #undef TARGET_NR_inotify_init
474 #undef TARGET_NR_inotify_init1
475 #undef TARGET_NR_inotify_add_watch
476 #undef TARGET_NR_inotify_rm_watch
477 #endif /* CONFIG_INOTIFY  */
478 
479 #if defined(TARGET_NR_prlimit64)
480 #ifndef __NR_prlimit64
481 # define __NR_prlimit64 -1
482 #endif
483 #define __NR_sys_prlimit64 __NR_prlimit64
484 /* The glibc rlimit structure may not be that used by the underlying syscall */
485 struct host_rlimit64 {
486     uint64_t rlim_cur;
487     uint64_t rlim_max;
488 };
489 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
490           const struct host_rlimit64 *, new_limit,
491           struct host_rlimit64 *, old_limit)
492 #endif
493 
494 
495 #if defined(TARGET_NR_timer_create)
496 /* Maximum of 32 active POSIX timers allowed at any one time. */
497 static timer_t g_posix_timers[32] = { 0, } ;
498 
499 static inline int next_free_host_timer(void)
500 {
501     int k ;
502     /* FIXME: Does finding the next free slot require a lock? */
503     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
504         if (g_posix_timers[k] == 0) {
505             g_posix_timers[k] = (timer_t) 1;
506             return k;
507         }
508     }
509     return -1;
510 }
511 #endif
512 
513 static inline int host_to_target_errno(int host_errno)
514 {
515     switch (host_errno) {
516 #define E(X)  case X: return TARGET_##X;
517 #include "errnos.c.inc"
518 #undef E
519     default:
520         return host_errno;
521     }
522 }
523 
524 static inline int target_to_host_errno(int target_errno)
525 {
526     switch (target_errno) {
527 #define E(X)  case TARGET_##X: return X;
528 #include "errnos.c.inc"
529 #undef E
530     default:
531         return target_errno;
532     }
533 }
534 
535 static inline abi_long get_errno(abi_long ret)
536 {
537     if (ret == -1)
538         return -host_to_target_errno(errno);
539     else
540         return ret;
541 }
542 
543 const char *target_strerror(int err)
544 {
545     if (err == TARGET_ERESTARTSYS) {
546         return "To be restarted";
547     }
548     if (err == TARGET_QEMU_ESIGRETURN) {
549         return "Successful exit from sigreturn";
550     }
551 
552     return strerror(target_to_host_errno(err));
553 }
554 
555 #define safe_syscall0(type, name) \
556 static type safe_##name(void) \
557 { \
558     return safe_syscall(__NR_##name); \
559 }
560 
561 #define safe_syscall1(type, name, type1, arg1) \
562 static type safe_##name(type1 arg1) \
563 { \
564     return safe_syscall(__NR_##name, arg1); \
565 }
566 
567 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
568 static type safe_##name(type1 arg1, type2 arg2) \
569 { \
570     return safe_syscall(__NR_##name, arg1, arg2); \
571 }
572 
573 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
574 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
575 { \
576     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
577 }
578 
579 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
580     type4, arg4) \
581 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
582 { \
583     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
584 }
585 
586 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
587     type4, arg4, type5, arg5) \
588 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
589     type5 arg5) \
590 { \
591     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
592 }
593 
594 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
595     type4, arg4, type5, arg5, type6, arg6) \
596 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
597     type5 arg5, type6 arg6) \
598 { \
599     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
600 }
601 
602 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
603 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
604 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
605               int, flags, mode_t, mode)
606 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
607 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
608               struct rusage *, rusage)
609 #endif
610 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
611               int, options, struct rusage *, rusage)
612 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
613 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
614     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
615 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
616               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
617 #endif
618 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
619 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
620               struct timespec *, tsp, const sigset_t *, sigmask,
621               size_t, sigsetsize)
622 #endif
623 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
624               int, maxevents, int, timeout, const sigset_t *, sigmask,
625               size_t, sigsetsize)
626 #if defined(__NR_futex)
627 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
628               const struct timespec *,timeout,int *,uaddr2,int,val3)
629 #endif
630 #if defined(__NR_futex_time64)
631 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
632               const struct timespec *,timeout,int *,uaddr2,int,val3)
633 #endif
634 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
635 safe_syscall2(int, kill, pid_t, pid, int, sig)
636 safe_syscall2(int, tkill, int, tid, int, sig)
637 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
638 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
639 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
640 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
641               unsigned long, pos_l, unsigned long, pos_h)
642 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
643               unsigned long, pos_l, unsigned long, pos_h)
644 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
645               socklen_t, addrlen)
646 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
647               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
648 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
649               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
650 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
651 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
652 safe_syscall2(int, flock, int, fd, int, operation)
653 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
654 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
655               const struct timespec *, uts, size_t, sigsetsize)
656 #endif
657 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
658               int, flags)
659 #if defined(TARGET_NR_nanosleep)
660 safe_syscall2(int, nanosleep, const struct timespec *, req,
661               struct timespec *, rem)
662 #endif
663 #if defined(TARGET_NR_clock_nanosleep) || \
664     defined(TARGET_NR_clock_nanosleep_time64)
665 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
666               const struct timespec *, req, struct timespec *, rem)
667 #endif
668 #ifdef __NR_ipc
669 #ifdef __s390x__
670 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
671               void *, ptr)
672 #else
673 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
674               void *, ptr, long, fifth)
675 #endif
676 #endif
677 #ifdef __NR_msgsnd
678 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
679               int, flags)
680 #endif
681 #ifdef __NR_msgrcv
682 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
683               long, msgtype, int, flags)
684 #endif
685 #ifdef __NR_semtimedop
686 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
687               unsigned, nsops, const struct timespec *, timeout)
688 #endif
689 #if defined(TARGET_NR_mq_timedsend) || \
690     defined(TARGET_NR_mq_timedsend_time64)
691 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
692               size_t, len, unsigned, prio, const struct timespec *, timeout)
693 #endif
694 #if defined(TARGET_NR_mq_timedreceive) || \
695     defined(TARGET_NR_mq_timedreceive_time64)
696 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
697               size_t, len, unsigned *, prio, const struct timespec *, timeout)
698 #endif
699 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
700 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
701               int, outfd, loff_t *, poutoff, size_t, length,
702               unsigned int, flags)
703 #endif
704 
705 /* We do ioctl like this rather than via safe_syscall3 to preserve the
706  * "third argument might be integer or pointer or not present" behaviour of
707  * the libc function.
708  */
709 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
710 /* Similarly for fcntl. Note that callers must always:
711  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
712  *  use the flock64 struct rather than unsuffixed flock
713  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
714  */
715 #ifdef __NR_fcntl64
716 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
717 #else
718 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
719 #endif
720 
721 static inline int host_to_target_sock_type(int host_type)
722 {
723     int target_type;
724 
725     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
726     case SOCK_DGRAM:
727         target_type = TARGET_SOCK_DGRAM;
728         break;
729     case SOCK_STREAM:
730         target_type = TARGET_SOCK_STREAM;
731         break;
732     default:
733         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
734         break;
735     }
736 
737 #if defined(SOCK_CLOEXEC)
738     if (host_type & SOCK_CLOEXEC) {
739         target_type |= TARGET_SOCK_CLOEXEC;
740     }
741 #endif
742 
743 #if defined(SOCK_NONBLOCK)
744     if (host_type & SOCK_NONBLOCK) {
745         target_type |= TARGET_SOCK_NONBLOCK;
746     }
747 #endif
748 
749     return target_type;
750 }
751 
752 static abi_ulong target_brk;
753 static abi_ulong target_original_brk;
754 static abi_ulong brk_page;
755 
756 void target_set_brk(abi_ulong new_brk)
757 {
758     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
759     brk_page = HOST_PAGE_ALIGN(target_brk);
760 }
761 
762 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
763 #define DEBUGF_BRK(message, args...)
764 
765 /* do_brk() must return target values and target errnos. */
766 abi_long do_brk(abi_ulong new_brk)
767 {
768     abi_long mapped_addr;
769     abi_ulong new_alloc_size;
770 
771     /* brk pointers are always untagged */
772 
773     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
774 
775     if (!new_brk) {
776         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
777         return target_brk;
778     }
779     if (new_brk < target_original_brk) {
780         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
781                    target_brk);
782         return target_brk;
783     }
784 
785     /* If the new brk is less than the highest page reserved to the
786      * target heap allocation, set it and we're almost done...  */
787     if (new_brk <= brk_page) {
788         /* Heap contents are initialized to zero, as for anonymous
789          * mapped pages.  */
790         if (new_brk > target_brk) {
791             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
792         }
793 	target_brk = new_brk;
794         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
795 	return target_brk;
796     }
797 
798     /* We need to allocate more memory after the brk... Note that
799      * we don't use MAP_FIXED because that will map over the top of
800      * any existing mapping (like the one with the host libc or qemu
801      * itself); instead we treat "mapped but at wrong address" as
802      * a failure and unmap again.
803      */
804     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
805     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
806                                         PROT_READ|PROT_WRITE,
807                                         MAP_ANON|MAP_PRIVATE, 0, 0));
808 
809     if (mapped_addr == brk_page) {
810         /* Heap contents are initialized to zero, as for anonymous
811          * mapped pages.  Technically the new pages are already
812          * initialized to zero since they *are* anonymous mapped
813          * pages, however we have to take care with the contents that
814          * come from the remaining part of the previous page: it may
815          * contains garbage data due to a previous heap usage (grown
816          * then shrunken).  */
817         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
818 
819         target_brk = new_brk;
820         brk_page = HOST_PAGE_ALIGN(target_brk);
821         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
822             target_brk);
823         return target_brk;
824     } else if (mapped_addr != -1) {
825         /* Mapped but at wrong address, meaning there wasn't actually
826          * enough space for this brk.
827          */
828         target_munmap(mapped_addr, new_alloc_size);
829         mapped_addr = -1;
830         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
831     }
832     else {
833         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
834     }
835 
836 #if defined(TARGET_ALPHA)
837     /* We (partially) emulate OSF/1 on Alpha, which requires we
838        return a proper errno, not an unchanged brk value.  */
839     return -TARGET_ENOMEM;
840 #endif
841     /* For everything else, return the previous break. */
842     return target_brk;
843 }
844 
845 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
846     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
847 static inline abi_long copy_from_user_fdset(fd_set *fds,
848                                             abi_ulong target_fds_addr,
849                                             int n)
850 {
851     int i, nw, j, k;
852     abi_ulong b, *target_fds;
853 
854     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
855     if (!(target_fds = lock_user(VERIFY_READ,
856                                  target_fds_addr,
857                                  sizeof(abi_ulong) * nw,
858                                  1)))
859         return -TARGET_EFAULT;
860 
861     FD_ZERO(fds);
862     k = 0;
863     for (i = 0; i < nw; i++) {
864         /* grab the abi_ulong */
865         __get_user(b, &target_fds[i]);
866         for (j = 0; j < TARGET_ABI_BITS; j++) {
867             /* check the bit inside the abi_ulong */
868             if ((b >> j) & 1)
869                 FD_SET(k, fds);
870             k++;
871         }
872     }
873 
874     unlock_user(target_fds, target_fds_addr, 0);
875 
876     return 0;
877 }
878 
879 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
880                                                  abi_ulong target_fds_addr,
881                                                  int n)
882 {
883     if (target_fds_addr) {
884         if (copy_from_user_fdset(fds, target_fds_addr, n))
885             return -TARGET_EFAULT;
886         *fds_ptr = fds;
887     } else {
888         *fds_ptr = NULL;
889     }
890     return 0;
891 }
892 
893 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
894                                           const fd_set *fds,
895                                           int n)
896 {
897     int i, nw, j, k;
898     abi_long v;
899     abi_ulong *target_fds;
900 
901     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
902     if (!(target_fds = lock_user(VERIFY_WRITE,
903                                  target_fds_addr,
904                                  sizeof(abi_ulong) * nw,
905                                  0)))
906         return -TARGET_EFAULT;
907 
908     k = 0;
909     for (i = 0; i < nw; i++) {
910         v = 0;
911         for (j = 0; j < TARGET_ABI_BITS; j++) {
912             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
913             k++;
914         }
915         __put_user(v, &target_fds[i]);
916     }
917 
918     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
919 
920     return 0;
921 }
922 #endif
923 
924 #if defined(__alpha__)
925 #define HOST_HZ 1024
926 #else
927 #define HOST_HZ 100
928 #endif
929 
930 static inline abi_long host_to_target_clock_t(long ticks)
931 {
932 #if HOST_HZ == TARGET_HZ
933     return ticks;
934 #else
935     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
936 #endif
937 }
938 
939 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
940                                              const struct rusage *rusage)
941 {
942     struct target_rusage *target_rusage;
943 
944     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
945         return -TARGET_EFAULT;
946     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
947     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
948     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
949     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
950     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
951     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
952     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
953     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
954     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
955     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
956     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
957     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
958     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
959     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
960     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
961     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
962     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
963     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
964     unlock_user_struct(target_rusage, target_addr, 1);
965 
966     return 0;
967 }
968 
969 #ifdef TARGET_NR_setrlimit
970 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
971 {
972     abi_ulong target_rlim_swap;
973     rlim_t result;
974 
975     target_rlim_swap = tswapal(target_rlim);
976     if (target_rlim_swap == TARGET_RLIM_INFINITY)
977         return RLIM_INFINITY;
978 
979     result = target_rlim_swap;
980     if (target_rlim_swap != (rlim_t)result)
981         return RLIM_INFINITY;
982 
983     return result;
984 }
985 #endif
986 
987 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
988 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
989 {
990     abi_ulong target_rlim_swap;
991     abi_ulong result;
992 
993     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
994         target_rlim_swap = TARGET_RLIM_INFINITY;
995     else
996         target_rlim_swap = rlim;
997     result = tswapal(target_rlim_swap);
998 
999     return result;
1000 }
1001 #endif
1002 
1003 static inline int target_to_host_resource(int code)
1004 {
1005     switch (code) {
1006     case TARGET_RLIMIT_AS:
1007         return RLIMIT_AS;
1008     case TARGET_RLIMIT_CORE:
1009         return RLIMIT_CORE;
1010     case TARGET_RLIMIT_CPU:
1011         return RLIMIT_CPU;
1012     case TARGET_RLIMIT_DATA:
1013         return RLIMIT_DATA;
1014     case TARGET_RLIMIT_FSIZE:
1015         return RLIMIT_FSIZE;
1016     case TARGET_RLIMIT_LOCKS:
1017         return RLIMIT_LOCKS;
1018     case TARGET_RLIMIT_MEMLOCK:
1019         return RLIMIT_MEMLOCK;
1020     case TARGET_RLIMIT_MSGQUEUE:
1021         return RLIMIT_MSGQUEUE;
1022     case TARGET_RLIMIT_NICE:
1023         return RLIMIT_NICE;
1024     case TARGET_RLIMIT_NOFILE:
1025         return RLIMIT_NOFILE;
1026     case TARGET_RLIMIT_NPROC:
1027         return RLIMIT_NPROC;
1028     case TARGET_RLIMIT_RSS:
1029         return RLIMIT_RSS;
1030     case TARGET_RLIMIT_RTPRIO:
1031         return RLIMIT_RTPRIO;
1032     case TARGET_RLIMIT_SIGPENDING:
1033         return RLIMIT_SIGPENDING;
1034     case TARGET_RLIMIT_STACK:
1035         return RLIMIT_STACK;
1036     default:
1037         return code;
1038     }
1039 }
1040 
1041 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1042                                               abi_ulong target_tv_addr)
1043 {
1044     struct target_timeval *target_tv;
1045 
1046     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1047         return -TARGET_EFAULT;
1048     }
1049 
1050     __get_user(tv->tv_sec, &target_tv->tv_sec);
1051     __get_user(tv->tv_usec, &target_tv->tv_usec);
1052 
1053     unlock_user_struct(target_tv, target_tv_addr, 0);
1054 
1055     return 0;
1056 }
1057 
1058 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1059                                             const struct timeval *tv)
1060 {
1061     struct target_timeval *target_tv;
1062 
1063     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1064         return -TARGET_EFAULT;
1065     }
1066 
1067     __put_user(tv->tv_sec, &target_tv->tv_sec);
1068     __put_user(tv->tv_usec, &target_tv->tv_usec);
1069 
1070     unlock_user_struct(target_tv, target_tv_addr, 1);
1071 
1072     return 0;
1073 }
1074 
1075 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1076 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1077                                                 abi_ulong target_tv_addr)
1078 {
1079     struct target__kernel_sock_timeval *target_tv;
1080 
1081     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1082         return -TARGET_EFAULT;
1083     }
1084 
1085     __get_user(tv->tv_sec, &target_tv->tv_sec);
1086     __get_user(tv->tv_usec, &target_tv->tv_usec);
1087 
1088     unlock_user_struct(target_tv, target_tv_addr, 0);
1089 
1090     return 0;
1091 }
1092 #endif
1093 
1094 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1095                                               const struct timeval *tv)
1096 {
1097     struct target__kernel_sock_timeval *target_tv;
1098 
1099     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1100         return -TARGET_EFAULT;
1101     }
1102 
1103     __put_user(tv->tv_sec, &target_tv->tv_sec);
1104     __put_user(tv->tv_usec, &target_tv->tv_usec);
1105 
1106     unlock_user_struct(target_tv, target_tv_addr, 1);
1107 
1108     return 0;
1109 }
1110 
1111 #if defined(TARGET_NR_futex) || \
1112     defined(TARGET_NR_rt_sigtimedwait) || \
1113     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1114     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1115     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1116     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1117     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1118     defined(TARGET_NR_timer_settime) || \
1119     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1120 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1121                                                abi_ulong target_addr)
1122 {
1123     struct target_timespec *target_ts;
1124 
1125     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1126         return -TARGET_EFAULT;
1127     }
1128     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1129     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1130     unlock_user_struct(target_ts, target_addr, 0);
1131     return 0;
1132 }
1133 #endif
1134 
1135 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1136     defined(TARGET_NR_timer_settime64) || \
1137     defined(TARGET_NR_mq_timedsend_time64) || \
1138     defined(TARGET_NR_mq_timedreceive_time64) || \
1139     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1140     defined(TARGET_NR_clock_nanosleep_time64) || \
1141     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1142     defined(TARGET_NR_utimensat) || \
1143     defined(TARGET_NR_utimensat_time64) || \
1144     defined(TARGET_NR_semtimedop_time64) || \
1145     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1146 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1147                                                  abi_ulong target_addr)
1148 {
1149     struct target__kernel_timespec *target_ts;
1150 
1151     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1152         return -TARGET_EFAULT;
1153     }
1154     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1155     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1156     /* in 32bit mode, this drops the padding */
1157     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1158     unlock_user_struct(target_ts, target_addr, 0);
1159     return 0;
1160 }
1161 #endif
1162 
1163 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1164                                                struct timespec *host_ts)
1165 {
1166     struct target_timespec *target_ts;
1167 
1168     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1169         return -TARGET_EFAULT;
1170     }
1171     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1172     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1173     unlock_user_struct(target_ts, target_addr, 1);
1174     return 0;
1175 }
1176 
1177 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1178                                                  struct timespec *host_ts)
1179 {
1180     struct target__kernel_timespec *target_ts;
1181 
1182     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1183         return -TARGET_EFAULT;
1184     }
1185     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1186     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1187     unlock_user_struct(target_ts, target_addr, 1);
1188     return 0;
1189 }
1190 
1191 #if defined(TARGET_NR_gettimeofday)
1192 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1193                                              struct timezone *tz)
1194 {
1195     struct target_timezone *target_tz;
1196 
1197     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1198         return -TARGET_EFAULT;
1199     }
1200 
1201     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1202     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1203 
1204     unlock_user_struct(target_tz, target_tz_addr, 1);
1205 
1206     return 0;
1207 }
1208 #endif
1209 
1210 #if defined(TARGET_NR_settimeofday)
1211 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1212                                                abi_ulong target_tz_addr)
1213 {
1214     struct target_timezone *target_tz;
1215 
1216     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1217         return -TARGET_EFAULT;
1218     }
1219 
1220     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1221     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1222 
1223     unlock_user_struct(target_tz, target_tz_addr, 0);
1224 
1225     return 0;
1226 }
1227 #endif
1228 
1229 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1230 #include <mqueue.h>
1231 
1232 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1233                                               abi_ulong target_mq_attr_addr)
1234 {
1235     struct target_mq_attr *target_mq_attr;
1236 
1237     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1238                           target_mq_attr_addr, 1))
1239         return -TARGET_EFAULT;
1240 
1241     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1242     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1243     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1244     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1245 
1246     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1247 
1248     return 0;
1249 }
1250 
1251 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1252                                             const struct mq_attr *attr)
1253 {
1254     struct target_mq_attr *target_mq_attr;
1255 
1256     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1257                           target_mq_attr_addr, 0))
1258         return -TARGET_EFAULT;
1259 
1260     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1261     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1262     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1263     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1264 
1265     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1266 
1267     return 0;
1268 }
1269 #endif
1270 
1271 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1272 /* do_select() must return target values and target errnos. */
1273 static abi_long do_select(int n,
1274                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1275                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1276 {
1277     fd_set rfds, wfds, efds;
1278     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1279     struct timeval tv;
1280     struct timespec ts, *ts_ptr;
1281     abi_long ret;
1282 
1283     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1284     if (ret) {
1285         return ret;
1286     }
1287     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1288     if (ret) {
1289         return ret;
1290     }
1291     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1292     if (ret) {
1293         return ret;
1294     }
1295 
1296     if (target_tv_addr) {
1297         if (copy_from_user_timeval(&tv, target_tv_addr))
1298             return -TARGET_EFAULT;
1299         ts.tv_sec = tv.tv_sec;
1300         ts.tv_nsec = tv.tv_usec * 1000;
1301         ts_ptr = &ts;
1302     } else {
1303         ts_ptr = NULL;
1304     }
1305 
1306     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1307                                   ts_ptr, NULL));
1308 
1309     if (!is_error(ret)) {
1310         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1311             return -TARGET_EFAULT;
1312         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1313             return -TARGET_EFAULT;
1314         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1315             return -TARGET_EFAULT;
1316 
1317         if (target_tv_addr) {
1318             tv.tv_sec = ts.tv_sec;
1319             tv.tv_usec = ts.tv_nsec / 1000;
1320             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1321                 return -TARGET_EFAULT;
1322             }
1323         }
1324     }
1325 
1326     return ret;
1327 }
1328 
1329 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1330 static abi_long do_old_select(abi_ulong arg1)
1331 {
1332     struct target_sel_arg_struct *sel;
1333     abi_ulong inp, outp, exp, tvp;
1334     long nsel;
1335 
1336     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1337         return -TARGET_EFAULT;
1338     }
1339 
1340     nsel = tswapal(sel->n);
1341     inp = tswapal(sel->inp);
1342     outp = tswapal(sel->outp);
1343     exp = tswapal(sel->exp);
1344     tvp = tswapal(sel->tvp);
1345 
1346     unlock_user_struct(sel, arg1, 0);
1347 
1348     return do_select(nsel, inp, outp, exp, tvp);
1349 }
1350 #endif
1351 #endif
1352 
1353 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1354 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1355                             abi_long arg4, abi_long arg5, abi_long arg6,
1356                             bool time64)
1357 {
1358     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1359     fd_set rfds, wfds, efds;
1360     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1361     struct timespec ts, *ts_ptr;
1362     abi_long ret;
1363 
1364     /*
1365      * The 6th arg is actually two args smashed together,
1366      * so we cannot use the C library.
1367      */
1368     sigset_t set;
1369     struct {
1370         sigset_t *set;
1371         size_t size;
1372     } sig, *sig_ptr;
1373 
1374     abi_ulong arg_sigset, arg_sigsize, *arg7;
1375     target_sigset_t *target_sigset;
1376 
1377     n = arg1;
1378     rfd_addr = arg2;
1379     wfd_addr = arg3;
1380     efd_addr = arg4;
1381     ts_addr = arg5;
1382 
1383     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1384     if (ret) {
1385         return ret;
1386     }
1387     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1388     if (ret) {
1389         return ret;
1390     }
1391     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1392     if (ret) {
1393         return ret;
1394     }
1395 
1396     /*
1397      * This takes a timespec, and not a timeval, so we cannot
1398      * use the do_select() helper ...
1399      */
1400     if (ts_addr) {
1401         if (time64) {
1402             if (target_to_host_timespec64(&ts, ts_addr)) {
1403                 return -TARGET_EFAULT;
1404             }
1405         } else {
1406             if (target_to_host_timespec(&ts, ts_addr)) {
1407                 return -TARGET_EFAULT;
1408             }
1409         }
1410             ts_ptr = &ts;
1411     } else {
1412         ts_ptr = NULL;
1413     }
1414 
1415     /* Extract the two packed args for the sigset */
1416     if (arg6) {
1417         sig_ptr = &sig;
1418         sig.size = SIGSET_T_SIZE;
1419 
1420         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1421         if (!arg7) {
1422             return -TARGET_EFAULT;
1423         }
1424         arg_sigset = tswapal(arg7[0]);
1425         arg_sigsize = tswapal(arg7[1]);
1426         unlock_user(arg7, arg6, 0);
1427 
1428         if (arg_sigset) {
1429             sig.set = &set;
1430             if (arg_sigsize != sizeof(*target_sigset)) {
1431                 /* Like the kernel, we enforce correct size sigsets */
1432                 return -TARGET_EINVAL;
1433             }
1434             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1435                                       sizeof(*target_sigset), 1);
1436             if (!target_sigset) {
1437                 return -TARGET_EFAULT;
1438             }
1439             target_to_host_sigset(&set, target_sigset);
1440             unlock_user(target_sigset, arg_sigset, 0);
1441         } else {
1442             sig.set = NULL;
1443         }
1444     } else {
1445         sig_ptr = NULL;
1446     }
1447 
1448     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1449                                   ts_ptr, sig_ptr));
1450 
1451     if (!is_error(ret)) {
1452         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1453             return -TARGET_EFAULT;
1454         }
1455         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1456             return -TARGET_EFAULT;
1457         }
1458         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1459             return -TARGET_EFAULT;
1460         }
1461         if (time64) {
1462             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1463                 return -TARGET_EFAULT;
1464             }
1465         } else {
1466             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1467                 return -TARGET_EFAULT;
1468             }
1469         }
1470     }
1471     return ret;
1472 }
1473 #endif
1474 
1475 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1476     defined(TARGET_NR_ppoll_time64)
1477 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1478                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1479 {
1480     struct target_pollfd *target_pfd;
1481     unsigned int nfds = arg2;
1482     struct pollfd *pfd;
1483     unsigned int i;
1484     abi_long ret;
1485 
1486     pfd = NULL;
1487     target_pfd = NULL;
1488     if (nfds) {
1489         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1490             return -TARGET_EINVAL;
1491         }
1492         target_pfd = lock_user(VERIFY_WRITE, arg1,
1493                                sizeof(struct target_pollfd) * nfds, 1);
1494         if (!target_pfd) {
1495             return -TARGET_EFAULT;
1496         }
1497 
1498         pfd = alloca(sizeof(struct pollfd) * nfds);
1499         for (i = 0; i < nfds; i++) {
1500             pfd[i].fd = tswap32(target_pfd[i].fd);
1501             pfd[i].events = tswap16(target_pfd[i].events);
1502         }
1503     }
1504     if (ppoll) {
1505         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1506         target_sigset_t *target_set;
1507         sigset_t _set, *set = &_set;
1508 
1509         if (arg3) {
1510             if (time64) {
1511                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1512                     unlock_user(target_pfd, arg1, 0);
1513                     return -TARGET_EFAULT;
1514                 }
1515             } else {
1516                 if (target_to_host_timespec(timeout_ts, arg3)) {
1517                     unlock_user(target_pfd, arg1, 0);
1518                     return -TARGET_EFAULT;
1519                 }
1520             }
1521         } else {
1522             timeout_ts = NULL;
1523         }
1524 
1525         if (arg4) {
1526             if (arg5 != sizeof(target_sigset_t)) {
1527                 unlock_user(target_pfd, arg1, 0);
1528                 return -TARGET_EINVAL;
1529             }
1530 
1531             target_set = lock_user(VERIFY_READ, arg4,
1532                                    sizeof(target_sigset_t), 1);
1533             if (!target_set) {
1534                 unlock_user(target_pfd, arg1, 0);
1535                 return -TARGET_EFAULT;
1536             }
1537             target_to_host_sigset(set, target_set);
1538         } else {
1539             set = NULL;
1540         }
1541 
1542         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1543                                    set, SIGSET_T_SIZE));
1544 
1545         if (!is_error(ret) && arg3) {
1546             if (time64) {
1547                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1548                     return -TARGET_EFAULT;
1549                 }
1550             } else {
1551                 if (host_to_target_timespec(arg3, timeout_ts)) {
1552                     return -TARGET_EFAULT;
1553                 }
1554             }
1555         }
1556         if (arg4) {
1557             unlock_user(target_set, arg4, 0);
1558         }
1559     } else {
1560           struct timespec ts, *pts;
1561 
1562           if (arg3 >= 0) {
1563               /* Convert ms to secs, ns */
1564               ts.tv_sec = arg3 / 1000;
1565               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1566               pts = &ts;
1567           } else {
1568               /* -ve poll() timeout means "infinite" */
1569               pts = NULL;
1570           }
1571           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1572     }
1573 
1574     if (!is_error(ret)) {
1575         for (i = 0; i < nfds; i++) {
1576             target_pfd[i].revents = tswap16(pfd[i].revents);
1577         }
1578     }
1579     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1580     return ret;
1581 }
1582 #endif
1583 
1584 static abi_long do_pipe2(int host_pipe[], int flags)
1585 {
1586 #ifdef CONFIG_PIPE2
1587     return pipe2(host_pipe, flags);
1588 #else
1589     return -ENOSYS;
1590 #endif
1591 }
1592 
1593 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1594                         int flags, int is_pipe2)
1595 {
1596     int host_pipe[2];
1597     abi_long ret;
1598     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1599 
1600     if (is_error(ret))
1601         return get_errno(ret);
1602 
1603     /* Several targets have special calling conventions for the original
1604        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1605     if (!is_pipe2) {
1606 #if defined(TARGET_ALPHA)
1607         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1608         return host_pipe[0];
1609 #elif defined(TARGET_MIPS)
1610         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1611         return host_pipe[0];
1612 #elif defined(TARGET_SH4)
1613         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1614         return host_pipe[0];
1615 #elif defined(TARGET_SPARC)
1616         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1617         return host_pipe[0];
1618 #endif
1619     }
1620 
1621     if (put_user_s32(host_pipe[0], pipedes)
1622         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1623         return -TARGET_EFAULT;
1624     return get_errno(ret);
1625 }
1626 
1627 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1628                                               abi_ulong target_addr,
1629                                               socklen_t len)
1630 {
1631     struct target_ip_mreqn *target_smreqn;
1632 
1633     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1634     if (!target_smreqn)
1635         return -TARGET_EFAULT;
1636     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1637     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1638     if (len == sizeof(struct target_ip_mreqn))
1639         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1640     unlock_user(target_smreqn, target_addr, 0);
1641 
1642     return 0;
1643 }
1644 
1645 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1646                                                abi_ulong target_addr,
1647                                                socklen_t len)
1648 {
1649     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1650     sa_family_t sa_family;
1651     struct target_sockaddr *target_saddr;
1652 
1653     if (fd_trans_target_to_host_addr(fd)) {
1654         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1655     }
1656 
1657     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1658     if (!target_saddr)
1659         return -TARGET_EFAULT;
1660 
1661     sa_family = tswap16(target_saddr->sa_family);
1662 
1663     /* Oops. The caller might send a incomplete sun_path; sun_path
1664      * must be terminated by \0 (see the manual page), but
1665      * unfortunately it is quite common to specify sockaddr_un
1666      * length as "strlen(x->sun_path)" while it should be
1667      * "strlen(...) + 1". We'll fix that here if needed.
1668      * Linux kernel has a similar feature.
1669      */
1670 
1671     if (sa_family == AF_UNIX) {
1672         if (len < unix_maxlen && len > 0) {
1673             char *cp = (char*)target_saddr;
1674 
1675             if ( cp[len-1] && !cp[len] )
1676                 len++;
1677         }
1678         if (len > unix_maxlen)
1679             len = unix_maxlen;
1680     }
1681 
1682     memcpy(addr, target_saddr, len);
1683     addr->sa_family = sa_family;
1684     if (sa_family == AF_NETLINK) {
1685         struct sockaddr_nl *nladdr;
1686 
1687         nladdr = (struct sockaddr_nl *)addr;
1688         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1689         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1690     } else if (sa_family == AF_PACKET) {
1691 	struct target_sockaddr_ll *lladdr;
1692 
1693 	lladdr = (struct target_sockaddr_ll *)addr;
1694 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1695 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1696     }
1697     unlock_user(target_saddr, target_addr, 0);
1698 
1699     return 0;
1700 }
1701 
1702 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1703                                                struct sockaddr *addr,
1704                                                socklen_t len)
1705 {
1706     struct target_sockaddr *target_saddr;
1707 
1708     if (len == 0) {
1709         return 0;
1710     }
1711     assert(addr);
1712 
1713     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1714     if (!target_saddr)
1715         return -TARGET_EFAULT;
1716     memcpy(target_saddr, addr, len);
1717     if (len >= offsetof(struct target_sockaddr, sa_family) +
1718         sizeof(target_saddr->sa_family)) {
1719         target_saddr->sa_family = tswap16(addr->sa_family);
1720     }
1721     if (addr->sa_family == AF_NETLINK &&
1722         len >= sizeof(struct target_sockaddr_nl)) {
1723         struct target_sockaddr_nl *target_nl =
1724                (struct target_sockaddr_nl *)target_saddr;
1725         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1726         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1727     } else if (addr->sa_family == AF_PACKET) {
1728         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1729         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1730         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1731     } else if (addr->sa_family == AF_INET6 &&
1732                len >= sizeof(struct target_sockaddr_in6)) {
1733         struct target_sockaddr_in6 *target_in6 =
1734                (struct target_sockaddr_in6 *)target_saddr;
1735         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1736     }
1737     unlock_user(target_saddr, target_addr, len);
1738 
1739     return 0;
1740 }
1741 
1742 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1743                                            struct target_msghdr *target_msgh)
1744 {
1745     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1746     abi_long msg_controllen;
1747     abi_ulong target_cmsg_addr;
1748     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1749     socklen_t space = 0;
1750 
1751     msg_controllen = tswapal(target_msgh->msg_controllen);
1752     if (msg_controllen < sizeof (struct target_cmsghdr))
1753         goto the_end;
1754     target_cmsg_addr = tswapal(target_msgh->msg_control);
1755     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1756     target_cmsg_start = target_cmsg;
1757     if (!target_cmsg)
1758         return -TARGET_EFAULT;
1759 
1760     while (cmsg && target_cmsg) {
1761         void *data = CMSG_DATA(cmsg);
1762         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1763 
1764         int len = tswapal(target_cmsg->cmsg_len)
1765             - sizeof(struct target_cmsghdr);
1766 
1767         space += CMSG_SPACE(len);
1768         if (space > msgh->msg_controllen) {
1769             space -= CMSG_SPACE(len);
1770             /* This is a QEMU bug, since we allocated the payload
1771              * area ourselves (unlike overflow in host-to-target
1772              * conversion, which is just the guest giving us a buffer
1773              * that's too small). It can't happen for the payload types
1774              * we currently support; if it becomes an issue in future
1775              * we would need to improve our allocation strategy to
1776              * something more intelligent than "twice the size of the
1777              * target buffer we're reading from".
1778              */
1779             qemu_log_mask(LOG_UNIMP,
1780                           ("Unsupported ancillary data %d/%d: "
1781                            "unhandled msg size\n"),
1782                           tswap32(target_cmsg->cmsg_level),
1783                           tswap32(target_cmsg->cmsg_type));
1784             break;
1785         }
1786 
1787         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1788             cmsg->cmsg_level = SOL_SOCKET;
1789         } else {
1790             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1791         }
1792         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1793         cmsg->cmsg_len = CMSG_LEN(len);
1794 
1795         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1796             int *fd = (int *)data;
1797             int *target_fd = (int *)target_data;
1798             int i, numfds = len / sizeof(int);
1799 
1800             for (i = 0; i < numfds; i++) {
1801                 __get_user(fd[i], target_fd + i);
1802             }
1803         } else if (cmsg->cmsg_level == SOL_SOCKET
1804                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1805             struct ucred *cred = (struct ucred *)data;
1806             struct target_ucred *target_cred =
1807                 (struct target_ucred *)target_data;
1808 
1809             __get_user(cred->pid, &target_cred->pid);
1810             __get_user(cred->uid, &target_cred->uid);
1811             __get_user(cred->gid, &target_cred->gid);
1812         } else {
1813             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1814                           cmsg->cmsg_level, cmsg->cmsg_type);
1815             memcpy(data, target_data, len);
1816         }
1817 
1818         cmsg = CMSG_NXTHDR(msgh, cmsg);
1819         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1820                                          target_cmsg_start);
1821     }
1822     unlock_user(target_cmsg, target_cmsg_addr, 0);
1823  the_end:
1824     msgh->msg_controllen = space;
1825     return 0;
1826 }
1827 
1828 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1829                                            struct msghdr *msgh)
1830 {
1831     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1832     abi_long msg_controllen;
1833     abi_ulong target_cmsg_addr;
1834     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1835     socklen_t space = 0;
1836 
1837     msg_controllen = tswapal(target_msgh->msg_controllen);
1838     if (msg_controllen < sizeof (struct target_cmsghdr))
1839         goto the_end;
1840     target_cmsg_addr = tswapal(target_msgh->msg_control);
1841     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1842     target_cmsg_start = target_cmsg;
1843     if (!target_cmsg)
1844         return -TARGET_EFAULT;
1845 
1846     while (cmsg && target_cmsg) {
1847         void *data = CMSG_DATA(cmsg);
1848         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1849 
1850         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1851         int tgt_len, tgt_space;
1852 
1853         /* We never copy a half-header but may copy half-data;
1854          * this is Linux's behaviour in put_cmsg(). Note that
1855          * truncation here is a guest problem (which we report
1856          * to the guest via the CTRUNC bit), unlike truncation
1857          * in target_to_host_cmsg, which is a QEMU bug.
1858          */
1859         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1860             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1861             break;
1862         }
1863 
1864         if (cmsg->cmsg_level == SOL_SOCKET) {
1865             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1866         } else {
1867             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1868         }
1869         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1870 
1871         /* Payload types which need a different size of payload on
1872          * the target must adjust tgt_len here.
1873          */
1874         tgt_len = len;
1875         switch (cmsg->cmsg_level) {
1876         case SOL_SOCKET:
1877             switch (cmsg->cmsg_type) {
1878             case SO_TIMESTAMP:
1879                 tgt_len = sizeof(struct target_timeval);
1880                 break;
1881             default:
1882                 break;
1883             }
1884             break;
1885         default:
1886             break;
1887         }
1888 
1889         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1890             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1891             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1892         }
1893 
1894         /* We must now copy-and-convert len bytes of payload
1895          * into tgt_len bytes of destination space. Bear in mind
1896          * that in both source and destination we may be dealing
1897          * with a truncated value!
1898          */
1899         switch (cmsg->cmsg_level) {
1900         case SOL_SOCKET:
1901             switch (cmsg->cmsg_type) {
1902             case SCM_RIGHTS:
1903             {
1904                 int *fd = (int *)data;
1905                 int *target_fd = (int *)target_data;
1906                 int i, numfds = tgt_len / sizeof(int);
1907 
1908                 for (i = 0; i < numfds; i++) {
1909                     __put_user(fd[i], target_fd + i);
1910                 }
1911                 break;
1912             }
1913             case SO_TIMESTAMP:
1914             {
1915                 struct timeval *tv = (struct timeval *)data;
1916                 struct target_timeval *target_tv =
1917                     (struct target_timeval *)target_data;
1918 
1919                 if (len != sizeof(struct timeval) ||
1920                     tgt_len != sizeof(struct target_timeval)) {
1921                     goto unimplemented;
1922                 }
1923 
1924                 /* copy struct timeval to target */
1925                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1926                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1927                 break;
1928             }
1929             case SCM_CREDENTIALS:
1930             {
1931                 struct ucred *cred = (struct ucred *)data;
1932                 struct target_ucred *target_cred =
1933                     (struct target_ucred *)target_data;
1934 
1935                 __put_user(cred->pid, &target_cred->pid);
1936                 __put_user(cred->uid, &target_cred->uid);
1937                 __put_user(cred->gid, &target_cred->gid);
1938                 break;
1939             }
1940             default:
1941                 goto unimplemented;
1942             }
1943             break;
1944 
1945         case SOL_IP:
1946             switch (cmsg->cmsg_type) {
1947             case IP_TTL:
1948             {
1949                 uint32_t *v = (uint32_t *)data;
1950                 uint32_t *t_int = (uint32_t *)target_data;
1951 
1952                 if (len != sizeof(uint32_t) ||
1953                     tgt_len != sizeof(uint32_t)) {
1954                     goto unimplemented;
1955                 }
1956                 __put_user(*v, t_int);
1957                 break;
1958             }
1959             case IP_RECVERR:
1960             {
1961                 struct errhdr_t {
1962                    struct sock_extended_err ee;
1963                    struct sockaddr_in offender;
1964                 };
1965                 struct errhdr_t *errh = (struct errhdr_t *)data;
1966                 struct errhdr_t *target_errh =
1967                     (struct errhdr_t *)target_data;
1968 
1969                 if (len != sizeof(struct errhdr_t) ||
1970                     tgt_len != sizeof(struct errhdr_t)) {
1971                     goto unimplemented;
1972                 }
1973                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1974                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1975                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1976                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1977                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1978                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1979                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1980                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1981                     (void *) &errh->offender, sizeof(errh->offender));
1982                 break;
1983             }
1984             default:
1985                 goto unimplemented;
1986             }
1987             break;
1988 
1989         case SOL_IPV6:
1990             switch (cmsg->cmsg_type) {
1991             case IPV6_HOPLIMIT:
1992             {
1993                 uint32_t *v = (uint32_t *)data;
1994                 uint32_t *t_int = (uint32_t *)target_data;
1995 
1996                 if (len != sizeof(uint32_t) ||
1997                     tgt_len != sizeof(uint32_t)) {
1998                     goto unimplemented;
1999                 }
2000                 __put_user(*v, t_int);
2001                 break;
2002             }
2003             case IPV6_RECVERR:
2004             {
2005                 struct errhdr6_t {
2006                    struct sock_extended_err ee;
2007                    struct sockaddr_in6 offender;
2008                 };
2009                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2010                 struct errhdr6_t *target_errh =
2011                     (struct errhdr6_t *)target_data;
2012 
2013                 if (len != sizeof(struct errhdr6_t) ||
2014                     tgt_len != sizeof(struct errhdr6_t)) {
2015                     goto unimplemented;
2016                 }
2017                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2018                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2019                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2020                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2021                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2022                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2023                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2024                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2025                     (void *) &errh->offender, sizeof(errh->offender));
2026                 break;
2027             }
2028             default:
2029                 goto unimplemented;
2030             }
2031             break;
2032 
2033         default:
2034         unimplemented:
2035             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2036                           cmsg->cmsg_level, cmsg->cmsg_type);
2037             memcpy(target_data, data, MIN(len, tgt_len));
2038             if (tgt_len > len) {
2039                 memset(target_data + len, 0, tgt_len - len);
2040             }
2041         }
2042 
2043         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2044         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2045         if (msg_controllen < tgt_space) {
2046             tgt_space = msg_controllen;
2047         }
2048         msg_controllen -= tgt_space;
2049         space += tgt_space;
2050         cmsg = CMSG_NXTHDR(msgh, cmsg);
2051         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2052                                          target_cmsg_start);
2053     }
2054     unlock_user(target_cmsg, target_cmsg_addr, space);
2055  the_end:
2056     target_msgh->msg_controllen = tswapal(space);
2057     return 0;
2058 }
2059 
2060 /* do_setsockopt() Must return target values and target errnos. */
2061 static abi_long do_setsockopt(int sockfd, int level, int optname,
2062                               abi_ulong optval_addr, socklen_t optlen)
2063 {
2064     abi_long ret;
2065     int val;
2066     struct ip_mreqn *ip_mreq;
2067     struct ip_mreq_source *ip_mreq_source;
2068 
2069     switch(level) {
2070     case SOL_TCP:
2071     case SOL_UDP:
2072         /* TCP and UDP options all take an 'int' value.  */
2073         if (optlen < sizeof(uint32_t))
2074             return -TARGET_EINVAL;
2075 
2076         if (get_user_u32(val, optval_addr))
2077             return -TARGET_EFAULT;
2078         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2079         break;
2080     case SOL_IP:
2081         switch(optname) {
2082         case IP_TOS:
2083         case IP_TTL:
2084         case IP_HDRINCL:
2085         case IP_ROUTER_ALERT:
2086         case IP_RECVOPTS:
2087         case IP_RETOPTS:
2088         case IP_PKTINFO:
2089         case IP_MTU_DISCOVER:
2090         case IP_RECVERR:
2091         case IP_RECVTTL:
2092         case IP_RECVTOS:
2093 #ifdef IP_FREEBIND
2094         case IP_FREEBIND:
2095 #endif
2096         case IP_MULTICAST_TTL:
2097         case IP_MULTICAST_LOOP:
2098             val = 0;
2099             if (optlen >= sizeof(uint32_t)) {
2100                 if (get_user_u32(val, optval_addr))
2101                     return -TARGET_EFAULT;
2102             } else if (optlen >= 1) {
2103                 if (get_user_u8(val, optval_addr))
2104                     return -TARGET_EFAULT;
2105             }
2106             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2107             break;
2108         case IP_ADD_MEMBERSHIP:
2109         case IP_DROP_MEMBERSHIP:
2110             if (optlen < sizeof (struct target_ip_mreq) ||
2111                 optlen > sizeof (struct target_ip_mreqn))
2112                 return -TARGET_EINVAL;
2113 
2114             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2115             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2116             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2117             break;
2118 
2119         case IP_BLOCK_SOURCE:
2120         case IP_UNBLOCK_SOURCE:
2121         case IP_ADD_SOURCE_MEMBERSHIP:
2122         case IP_DROP_SOURCE_MEMBERSHIP:
2123             if (optlen != sizeof (struct target_ip_mreq_source))
2124                 return -TARGET_EINVAL;
2125 
2126             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2127             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2128             unlock_user (ip_mreq_source, optval_addr, 0);
2129             break;
2130 
2131         default:
2132             goto unimplemented;
2133         }
2134         break;
2135     case SOL_IPV6:
2136         switch (optname) {
2137         case IPV6_MTU_DISCOVER:
2138         case IPV6_MTU:
2139         case IPV6_V6ONLY:
2140         case IPV6_RECVPKTINFO:
2141         case IPV6_UNICAST_HOPS:
2142         case IPV6_MULTICAST_HOPS:
2143         case IPV6_MULTICAST_LOOP:
2144         case IPV6_RECVERR:
2145         case IPV6_RECVHOPLIMIT:
2146         case IPV6_2292HOPLIMIT:
2147         case IPV6_CHECKSUM:
2148         case IPV6_ADDRFORM:
2149         case IPV6_2292PKTINFO:
2150         case IPV6_RECVTCLASS:
2151         case IPV6_RECVRTHDR:
2152         case IPV6_2292RTHDR:
2153         case IPV6_RECVHOPOPTS:
2154         case IPV6_2292HOPOPTS:
2155         case IPV6_RECVDSTOPTS:
2156         case IPV6_2292DSTOPTS:
2157         case IPV6_TCLASS:
2158         case IPV6_ADDR_PREFERENCES:
2159 #ifdef IPV6_RECVPATHMTU
2160         case IPV6_RECVPATHMTU:
2161 #endif
2162 #ifdef IPV6_TRANSPARENT
2163         case IPV6_TRANSPARENT:
2164 #endif
2165 #ifdef IPV6_FREEBIND
2166         case IPV6_FREEBIND:
2167 #endif
2168 #ifdef IPV6_RECVORIGDSTADDR
2169         case IPV6_RECVORIGDSTADDR:
2170 #endif
2171             val = 0;
2172             if (optlen < sizeof(uint32_t)) {
2173                 return -TARGET_EINVAL;
2174             }
2175             if (get_user_u32(val, optval_addr)) {
2176                 return -TARGET_EFAULT;
2177             }
2178             ret = get_errno(setsockopt(sockfd, level, optname,
2179                                        &val, sizeof(val)));
2180             break;
2181         case IPV6_PKTINFO:
2182         {
2183             struct in6_pktinfo pki;
2184 
2185             if (optlen < sizeof(pki)) {
2186                 return -TARGET_EINVAL;
2187             }
2188 
2189             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2190                 return -TARGET_EFAULT;
2191             }
2192 
2193             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2194 
2195             ret = get_errno(setsockopt(sockfd, level, optname,
2196                                        &pki, sizeof(pki)));
2197             break;
2198         }
2199         case IPV6_ADD_MEMBERSHIP:
2200         case IPV6_DROP_MEMBERSHIP:
2201         {
2202             struct ipv6_mreq ipv6mreq;
2203 
2204             if (optlen < sizeof(ipv6mreq)) {
2205                 return -TARGET_EINVAL;
2206             }
2207 
2208             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2209                 return -TARGET_EFAULT;
2210             }
2211 
2212             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2213 
2214             ret = get_errno(setsockopt(sockfd, level, optname,
2215                                        &ipv6mreq, sizeof(ipv6mreq)));
2216             break;
2217         }
2218         default:
2219             goto unimplemented;
2220         }
2221         break;
2222     case SOL_ICMPV6:
2223         switch (optname) {
2224         case ICMPV6_FILTER:
2225         {
2226             struct icmp6_filter icmp6f;
2227 
2228             if (optlen > sizeof(icmp6f)) {
2229                 optlen = sizeof(icmp6f);
2230             }
2231 
2232             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2233                 return -TARGET_EFAULT;
2234             }
2235 
2236             for (val = 0; val < 8; val++) {
2237                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2238             }
2239 
2240             ret = get_errno(setsockopt(sockfd, level, optname,
2241                                        &icmp6f, optlen));
2242             break;
2243         }
2244         default:
2245             goto unimplemented;
2246         }
2247         break;
2248     case SOL_RAW:
2249         switch (optname) {
2250         case ICMP_FILTER:
2251         case IPV6_CHECKSUM:
2252             /* those take an u32 value */
2253             if (optlen < sizeof(uint32_t)) {
2254                 return -TARGET_EINVAL;
2255             }
2256 
2257             if (get_user_u32(val, optval_addr)) {
2258                 return -TARGET_EFAULT;
2259             }
2260             ret = get_errno(setsockopt(sockfd, level, optname,
2261                                        &val, sizeof(val)));
2262             break;
2263 
2264         default:
2265             goto unimplemented;
2266         }
2267         break;
2268 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2269     case SOL_ALG:
2270         switch (optname) {
2271         case ALG_SET_KEY:
2272         {
2273             char *alg_key = g_malloc(optlen);
2274 
2275             if (!alg_key) {
2276                 return -TARGET_ENOMEM;
2277             }
2278             if (copy_from_user(alg_key, optval_addr, optlen)) {
2279                 g_free(alg_key);
2280                 return -TARGET_EFAULT;
2281             }
2282             ret = get_errno(setsockopt(sockfd, level, optname,
2283                                        alg_key, optlen));
2284             g_free(alg_key);
2285             break;
2286         }
2287         case ALG_SET_AEAD_AUTHSIZE:
2288         {
2289             ret = get_errno(setsockopt(sockfd, level, optname,
2290                                        NULL, optlen));
2291             break;
2292         }
2293         default:
2294             goto unimplemented;
2295         }
2296         break;
2297 #endif
2298     case TARGET_SOL_SOCKET:
2299         switch (optname) {
2300         case TARGET_SO_RCVTIMEO:
2301         {
2302                 struct timeval tv;
2303 
2304                 optname = SO_RCVTIMEO;
2305 
2306 set_timeout:
2307                 if (optlen != sizeof(struct target_timeval)) {
2308                     return -TARGET_EINVAL;
2309                 }
2310 
2311                 if (copy_from_user_timeval(&tv, optval_addr)) {
2312                     return -TARGET_EFAULT;
2313                 }
2314 
2315                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2316                                 &tv, sizeof(tv)));
2317                 return ret;
2318         }
2319         case TARGET_SO_SNDTIMEO:
2320                 optname = SO_SNDTIMEO;
2321                 goto set_timeout;
2322         case TARGET_SO_ATTACH_FILTER:
2323         {
2324                 struct target_sock_fprog *tfprog;
2325                 struct target_sock_filter *tfilter;
2326                 struct sock_fprog fprog;
2327                 struct sock_filter *filter;
2328                 int i;
2329 
2330                 if (optlen != sizeof(*tfprog)) {
2331                     return -TARGET_EINVAL;
2332                 }
2333                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2334                     return -TARGET_EFAULT;
2335                 }
2336                 if (!lock_user_struct(VERIFY_READ, tfilter,
2337                                       tswapal(tfprog->filter), 0)) {
2338                     unlock_user_struct(tfprog, optval_addr, 1);
2339                     return -TARGET_EFAULT;
2340                 }
2341 
2342                 fprog.len = tswap16(tfprog->len);
2343                 filter = g_try_new(struct sock_filter, fprog.len);
2344                 if (filter == NULL) {
2345                     unlock_user_struct(tfilter, tfprog->filter, 1);
2346                     unlock_user_struct(tfprog, optval_addr, 1);
2347                     return -TARGET_ENOMEM;
2348                 }
2349                 for (i = 0; i < fprog.len; i++) {
2350                     filter[i].code = tswap16(tfilter[i].code);
2351                     filter[i].jt = tfilter[i].jt;
2352                     filter[i].jf = tfilter[i].jf;
2353                     filter[i].k = tswap32(tfilter[i].k);
2354                 }
2355                 fprog.filter = filter;
2356 
2357                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2358                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2359                 g_free(filter);
2360 
2361                 unlock_user_struct(tfilter, tfprog->filter, 1);
2362                 unlock_user_struct(tfprog, optval_addr, 1);
2363                 return ret;
2364         }
2365 	case TARGET_SO_BINDTODEVICE:
2366 	{
2367 		char *dev_ifname, *addr_ifname;
2368 
2369 		if (optlen > IFNAMSIZ - 1) {
2370 		    optlen = IFNAMSIZ - 1;
2371 		}
2372 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2373 		if (!dev_ifname) {
2374 		    return -TARGET_EFAULT;
2375 		}
2376 		optname = SO_BINDTODEVICE;
2377 		addr_ifname = alloca(IFNAMSIZ);
2378 		memcpy(addr_ifname, dev_ifname, optlen);
2379 		addr_ifname[optlen] = 0;
2380 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2381                                            addr_ifname, optlen));
2382 		unlock_user (dev_ifname, optval_addr, 0);
2383 		return ret;
2384 	}
2385         case TARGET_SO_LINGER:
2386         {
2387                 struct linger lg;
2388                 struct target_linger *tlg;
2389 
2390                 if (optlen != sizeof(struct target_linger)) {
2391                     return -TARGET_EINVAL;
2392                 }
2393                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2394                     return -TARGET_EFAULT;
2395                 }
2396                 __get_user(lg.l_onoff, &tlg->l_onoff);
2397                 __get_user(lg.l_linger, &tlg->l_linger);
2398                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2399                                 &lg, sizeof(lg)));
2400                 unlock_user_struct(tlg, optval_addr, 0);
2401                 return ret;
2402         }
2403             /* Options with 'int' argument.  */
2404         case TARGET_SO_DEBUG:
2405 		optname = SO_DEBUG;
2406 		break;
2407         case TARGET_SO_REUSEADDR:
2408 		optname = SO_REUSEADDR;
2409 		break;
2410 #ifdef SO_REUSEPORT
2411         case TARGET_SO_REUSEPORT:
2412                 optname = SO_REUSEPORT;
2413                 break;
2414 #endif
2415         case TARGET_SO_TYPE:
2416 		optname = SO_TYPE;
2417 		break;
2418         case TARGET_SO_ERROR:
2419 		optname = SO_ERROR;
2420 		break;
2421         case TARGET_SO_DONTROUTE:
2422 		optname = SO_DONTROUTE;
2423 		break;
2424         case TARGET_SO_BROADCAST:
2425 		optname = SO_BROADCAST;
2426 		break;
2427         case TARGET_SO_SNDBUF:
2428 		optname = SO_SNDBUF;
2429 		break;
2430         case TARGET_SO_SNDBUFFORCE:
2431                 optname = SO_SNDBUFFORCE;
2432                 break;
2433         case TARGET_SO_RCVBUF:
2434 		optname = SO_RCVBUF;
2435 		break;
2436         case TARGET_SO_RCVBUFFORCE:
2437                 optname = SO_RCVBUFFORCE;
2438                 break;
2439         case TARGET_SO_KEEPALIVE:
2440 		optname = SO_KEEPALIVE;
2441 		break;
2442         case TARGET_SO_OOBINLINE:
2443 		optname = SO_OOBINLINE;
2444 		break;
2445         case TARGET_SO_NO_CHECK:
2446 		optname = SO_NO_CHECK;
2447 		break;
2448         case TARGET_SO_PRIORITY:
2449 		optname = SO_PRIORITY;
2450 		break;
2451 #ifdef SO_BSDCOMPAT
2452         case TARGET_SO_BSDCOMPAT:
2453 		optname = SO_BSDCOMPAT;
2454 		break;
2455 #endif
2456         case TARGET_SO_PASSCRED:
2457 		optname = SO_PASSCRED;
2458 		break;
2459         case TARGET_SO_PASSSEC:
2460                 optname = SO_PASSSEC;
2461                 break;
2462         case TARGET_SO_TIMESTAMP:
2463 		optname = SO_TIMESTAMP;
2464 		break;
2465         case TARGET_SO_RCVLOWAT:
2466 		optname = SO_RCVLOWAT;
2467 		break;
2468         default:
2469             goto unimplemented;
2470         }
2471 	if (optlen < sizeof(uint32_t))
2472             return -TARGET_EINVAL;
2473 
2474 	if (get_user_u32(val, optval_addr))
2475             return -TARGET_EFAULT;
2476 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2477         break;
2478 #ifdef SOL_NETLINK
2479     case SOL_NETLINK:
2480         switch (optname) {
2481         case NETLINK_PKTINFO:
2482         case NETLINK_ADD_MEMBERSHIP:
2483         case NETLINK_DROP_MEMBERSHIP:
2484         case NETLINK_BROADCAST_ERROR:
2485         case NETLINK_NO_ENOBUFS:
2486 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2487         case NETLINK_LISTEN_ALL_NSID:
2488         case NETLINK_CAP_ACK:
2489 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2490 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2491         case NETLINK_EXT_ACK:
2492 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2493 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2494         case NETLINK_GET_STRICT_CHK:
2495 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2496             break;
2497         default:
2498             goto unimplemented;
2499         }
2500         val = 0;
2501         if (optlen < sizeof(uint32_t)) {
2502             return -TARGET_EINVAL;
2503         }
2504         if (get_user_u32(val, optval_addr)) {
2505             return -TARGET_EFAULT;
2506         }
2507         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2508                                    sizeof(val)));
2509         break;
2510 #endif /* SOL_NETLINK */
2511     default:
2512     unimplemented:
2513         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2514                       level, optname);
2515         ret = -TARGET_ENOPROTOOPT;
2516     }
2517     return ret;
2518 }
2519 
2520 /* do_getsockopt() Must return target values and target errnos. */
2521 static abi_long do_getsockopt(int sockfd, int level, int optname,
2522                               abi_ulong optval_addr, abi_ulong optlen)
2523 {
2524     abi_long ret;
2525     int len, val;
2526     socklen_t lv;
2527 
2528     switch(level) {
2529     case TARGET_SOL_SOCKET:
2530         level = SOL_SOCKET;
2531         switch (optname) {
2532         /* These don't just return a single integer */
2533         case TARGET_SO_PEERNAME:
2534             goto unimplemented;
2535         case TARGET_SO_RCVTIMEO: {
2536             struct timeval tv;
2537             socklen_t tvlen;
2538 
2539             optname = SO_RCVTIMEO;
2540 
2541 get_timeout:
2542             if (get_user_u32(len, optlen)) {
2543                 return -TARGET_EFAULT;
2544             }
2545             if (len < 0) {
2546                 return -TARGET_EINVAL;
2547             }
2548 
2549             tvlen = sizeof(tv);
2550             ret = get_errno(getsockopt(sockfd, level, optname,
2551                                        &tv, &tvlen));
2552             if (ret < 0) {
2553                 return ret;
2554             }
2555             if (len > sizeof(struct target_timeval)) {
2556                 len = sizeof(struct target_timeval);
2557             }
2558             if (copy_to_user_timeval(optval_addr, &tv)) {
2559                 return -TARGET_EFAULT;
2560             }
2561             if (put_user_u32(len, optlen)) {
2562                 return -TARGET_EFAULT;
2563             }
2564             break;
2565         }
2566         case TARGET_SO_SNDTIMEO:
2567             optname = SO_SNDTIMEO;
2568             goto get_timeout;
2569         case TARGET_SO_PEERCRED: {
2570             struct ucred cr;
2571             socklen_t crlen;
2572             struct target_ucred *tcr;
2573 
2574             if (get_user_u32(len, optlen)) {
2575                 return -TARGET_EFAULT;
2576             }
2577             if (len < 0) {
2578                 return -TARGET_EINVAL;
2579             }
2580 
2581             crlen = sizeof(cr);
2582             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2583                                        &cr, &crlen));
2584             if (ret < 0) {
2585                 return ret;
2586             }
2587             if (len > crlen) {
2588                 len = crlen;
2589             }
2590             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2591                 return -TARGET_EFAULT;
2592             }
2593             __put_user(cr.pid, &tcr->pid);
2594             __put_user(cr.uid, &tcr->uid);
2595             __put_user(cr.gid, &tcr->gid);
2596             unlock_user_struct(tcr, optval_addr, 1);
2597             if (put_user_u32(len, optlen)) {
2598                 return -TARGET_EFAULT;
2599             }
2600             break;
2601         }
2602         case TARGET_SO_PEERSEC: {
2603             char *name;
2604 
2605             if (get_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             if (len < 0) {
2609                 return -TARGET_EINVAL;
2610             }
2611             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2612             if (!name) {
2613                 return -TARGET_EFAULT;
2614             }
2615             lv = len;
2616             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2617                                        name, &lv));
2618             if (put_user_u32(lv, optlen)) {
2619                 ret = -TARGET_EFAULT;
2620             }
2621             unlock_user(name, optval_addr, lv);
2622             break;
2623         }
2624         case TARGET_SO_LINGER:
2625         {
2626             struct linger lg;
2627             socklen_t lglen;
2628             struct target_linger *tlg;
2629 
2630             if (get_user_u32(len, optlen)) {
2631                 return -TARGET_EFAULT;
2632             }
2633             if (len < 0) {
2634                 return -TARGET_EINVAL;
2635             }
2636 
2637             lglen = sizeof(lg);
2638             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2639                                        &lg, &lglen));
2640             if (ret < 0) {
2641                 return ret;
2642             }
2643             if (len > lglen) {
2644                 len = lglen;
2645             }
2646             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2647                 return -TARGET_EFAULT;
2648             }
2649             __put_user(lg.l_onoff, &tlg->l_onoff);
2650             __put_user(lg.l_linger, &tlg->l_linger);
2651             unlock_user_struct(tlg, optval_addr, 1);
2652             if (put_user_u32(len, optlen)) {
2653                 return -TARGET_EFAULT;
2654             }
2655             break;
2656         }
2657         /* Options with 'int' argument.  */
2658         case TARGET_SO_DEBUG:
2659             optname = SO_DEBUG;
2660             goto int_case;
2661         case TARGET_SO_REUSEADDR:
2662             optname = SO_REUSEADDR;
2663             goto int_case;
2664 #ifdef SO_REUSEPORT
2665         case TARGET_SO_REUSEPORT:
2666             optname = SO_REUSEPORT;
2667             goto int_case;
2668 #endif
2669         case TARGET_SO_TYPE:
2670             optname = SO_TYPE;
2671             goto int_case;
2672         case TARGET_SO_ERROR:
2673             optname = SO_ERROR;
2674             goto int_case;
2675         case TARGET_SO_DONTROUTE:
2676             optname = SO_DONTROUTE;
2677             goto int_case;
2678         case TARGET_SO_BROADCAST:
2679             optname = SO_BROADCAST;
2680             goto int_case;
2681         case TARGET_SO_SNDBUF:
2682             optname = SO_SNDBUF;
2683             goto int_case;
2684         case TARGET_SO_RCVBUF:
2685             optname = SO_RCVBUF;
2686             goto int_case;
2687         case TARGET_SO_KEEPALIVE:
2688             optname = SO_KEEPALIVE;
2689             goto int_case;
2690         case TARGET_SO_OOBINLINE:
2691             optname = SO_OOBINLINE;
2692             goto int_case;
2693         case TARGET_SO_NO_CHECK:
2694             optname = SO_NO_CHECK;
2695             goto int_case;
2696         case TARGET_SO_PRIORITY:
2697             optname = SO_PRIORITY;
2698             goto int_case;
2699 #ifdef SO_BSDCOMPAT
2700         case TARGET_SO_BSDCOMPAT:
2701             optname = SO_BSDCOMPAT;
2702             goto int_case;
2703 #endif
2704         case TARGET_SO_PASSCRED:
2705             optname = SO_PASSCRED;
2706             goto int_case;
2707         case TARGET_SO_TIMESTAMP:
2708             optname = SO_TIMESTAMP;
2709             goto int_case;
2710         case TARGET_SO_RCVLOWAT:
2711             optname = SO_RCVLOWAT;
2712             goto int_case;
2713         case TARGET_SO_ACCEPTCONN:
2714             optname = SO_ACCEPTCONN;
2715             goto int_case;
2716         case TARGET_SO_PROTOCOL:
2717             optname = SO_PROTOCOL;
2718             goto int_case;
2719         case TARGET_SO_DOMAIN:
2720             optname = SO_DOMAIN;
2721             goto int_case;
2722         default:
2723             goto int_case;
2724         }
2725         break;
2726     case SOL_TCP:
2727     case SOL_UDP:
2728         /* TCP and UDP options all take an 'int' value.  */
2729     int_case:
2730         if (get_user_u32(len, optlen))
2731             return -TARGET_EFAULT;
2732         if (len < 0)
2733             return -TARGET_EINVAL;
2734         lv = sizeof(lv);
2735         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2736         if (ret < 0)
2737             return ret;
2738         if (optname == SO_TYPE) {
2739             val = host_to_target_sock_type(val);
2740         }
2741         if (len > lv)
2742             len = lv;
2743         if (len == 4) {
2744             if (put_user_u32(val, optval_addr))
2745                 return -TARGET_EFAULT;
2746         } else {
2747             if (put_user_u8(val, optval_addr))
2748                 return -TARGET_EFAULT;
2749         }
2750         if (put_user_u32(len, optlen))
2751             return -TARGET_EFAULT;
2752         break;
2753     case SOL_IP:
2754         switch(optname) {
2755         case IP_TOS:
2756         case IP_TTL:
2757         case IP_HDRINCL:
2758         case IP_ROUTER_ALERT:
2759         case IP_RECVOPTS:
2760         case IP_RETOPTS:
2761         case IP_PKTINFO:
2762         case IP_MTU_DISCOVER:
2763         case IP_RECVERR:
2764         case IP_RECVTOS:
2765 #ifdef IP_FREEBIND
2766         case IP_FREEBIND:
2767 #endif
2768         case IP_MULTICAST_TTL:
2769         case IP_MULTICAST_LOOP:
2770             if (get_user_u32(len, optlen))
2771                 return -TARGET_EFAULT;
2772             if (len < 0)
2773                 return -TARGET_EINVAL;
2774             lv = sizeof(lv);
2775             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2776             if (ret < 0)
2777                 return ret;
2778             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2779                 len = 1;
2780                 if (put_user_u32(len, optlen)
2781                     || put_user_u8(val, optval_addr))
2782                     return -TARGET_EFAULT;
2783             } else {
2784                 if (len > sizeof(int))
2785                     len = sizeof(int);
2786                 if (put_user_u32(len, optlen)
2787                     || put_user_u32(val, optval_addr))
2788                     return -TARGET_EFAULT;
2789             }
2790             break;
2791         default:
2792             ret = -TARGET_ENOPROTOOPT;
2793             break;
2794         }
2795         break;
2796     case SOL_IPV6:
2797         switch (optname) {
2798         case IPV6_MTU_DISCOVER:
2799         case IPV6_MTU:
2800         case IPV6_V6ONLY:
2801         case IPV6_RECVPKTINFO:
2802         case IPV6_UNICAST_HOPS:
2803         case IPV6_MULTICAST_HOPS:
2804         case IPV6_MULTICAST_LOOP:
2805         case IPV6_RECVERR:
2806         case IPV6_RECVHOPLIMIT:
2807         case IPV6_2292HOPLIMIT:
2808         case IPV6_CHECKSUM:
2809         case IPV6_ADDRFORM:
2810         case IPV6_2292PKTINFO:
2811         case IPV6_RECVTCLASS:
2812         case IPV6_RECVRTHDR:
2813         case IPV6_2292RTHDR:
2814         case IPV6_RECVHOPOPTS:
2815         case IPV6_2292HOPOPTS:
2816         case IPV6_RECVDSTOPTS:
2817         case IPV6_2292DSTOPTS:
2818         case IPV6_TCLASS:
2819         case IPV6_ADDR_PREFERENCES:
2820 #ifdef IPV6_RECVPATHMTU
2821         case IPV6_RECVPATHMTU:
2822 #endif
2823 #ifdef IPV6_TRANSPARENT
2824         case IPV6_TRANSPARENT:
2825 #endif
2826 #ifdef IPV6_FREEBIND
2827         case IPV6_FREEBIND:
2828 #endif
2829 #ifdef IPV6_RECVORIGDSTADDR
2830         case IPV6_RECVORIGDSTADDR:
2831 #endif
2832             if (get_user_u32(len, optlen))
2833                 return -TARGET_EFAULT;
2834             if (len < 0)
2835                 return -TARGET_EINVAL;
2836             lv = sizeof(lv);
2837             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2838             if (ret < 0)
2839                 return ret;
2840             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2841                 len = 1;
2842                 if (put_user_u32(len, optlen)
2843                     || put_user_u8(val, optval_addr))
2844                     return -TARGET_EFAULT;
2845             } else {
2846                 if (len > sizeof(int))
2847                     len = sizeof(int);
2848                 if (put_user_u32(len, optlen)
2849                     || put_user_u32(val, optval_addr))
2850                     return -TARGET_EFAULT;
2851             }
2852             break;
2853         default:
2854             ret = -TARGET_ENOPROTOOPT;
2855             break;
2856         }
2857         break;
2858 #ifdef SOL_NETLINK
2859     case SOL_NETLINK:
2860         switch (optname) {
2861         case NETLINK_PKTINFO:
2862         case NETLINK_BROADCAST_ERROR:
2863         case NETLINK_NO_ENOBUFS:
2864 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2865         case NETLINK_LISTEN_ALL_NSID:
2866         case NETLINK_CAP_ACK:
2867 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2868 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2869         case NETLINK_EXT_ACK:
2870 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2871 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2872         case NETLINK_GET_STRICT_CHK:
2873 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2874             if (get_user_u32(len, optlen)) {
2875                 return -TARGET_EFAULT;
2876             }
2877             if (len != sizeof(val)) {
2878                 return -TARGET_EINVAL;
2879             }
2880             lv = len;
2881             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2882             if (ret < 0) {
2883                 return ret;
2884             }
2885             if (put_user_u32(lv, optlen)
2886                 || put_user_u32(val, optval_addr)) {
2887                 return -TARGET_EFAULT;
2888             }
2889             break;
2890 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2891         case NETLINK_LIST_MEMBERSHIPS:
2892         {
2893             uint32_t *results;
2894             int i;
2895             if (get_user_u32(len, optlen)) {
2896                 return -TARGET_EFAULT;
2897             }
2898             if (len < 0) {
2899                 return -TARGET_EINVAL;
2900             }
2901             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2902             if (!results && len > 0) {
2903                 return -TARGET_EFAULT;
2904             }
2905             lv = len;
2906             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2907             if (ret < 0) {
2908                 unlock_user(results, optval_addr, 0);
2909                 return ret;
2910             }
2911             /* swap host endianess to target endianess. */
2912             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2913                 results[i] = tswap32(results[i]);
2914             }
2915             if (put_user_u32(lv, optlen)) {
2916                 return -TARGET_EFAULT;
2917             }
2918             unlock_user(results, optval_addr, 0);
2919             break;
2920         }
2921 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2922         default:
2923             goto unimplemented;
2924         }
2925         break;
2926 #endif /* SOL_NETLINK */
2927     default:
2928     unimplemented:
2929         qemu_log_mask(LOG_UNIMP,
2930                       "getsockopt level=%d optname=%d not yet supported\n",
2931                       level, optname);
2932         ret = -TARGET_EOPNOTSUPP;
2933         break;
2934     }
2935     return ret;
2936 }
2937 
2938 /* Convert target low/high pair representing file offset into the host
2939  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2940  * as the kernel doesn't handle them either.
2941  */
2942 static void target_to_host_low_high(abi_ulong tlow,
2943                                     abi_ulong thigh,
2944                                     unsigned long *hlow,
2945                                     unsigned long *hhigh)
2946 {
2947     uint64_t off = tlow |
2948         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2949         TARGET_LONG_BITS / 2;
2950 
2951     *hlow = off;
2952     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2953 }
2954 
2955 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2956                                 abi_ulong count, int copy)
2957 {
2958     struct target_iovec *target_vec;
2959     struct iovec *vec;
2960     abi_ulong total_len, max_len;
2961     int i;
2962     int err = 0;
2963     bool bad_address = false;
2964 
2965     if (count == 0) {
2966         errno = 0;
2967         return NULL;
2968     }
2969     if (count > IOV_MAX) {
2970         errno = EINVAL;
2971         return NULL;
2972     }
2973 
2974     vec = g_try_new0(struct iovec, count);
2975     if (vec == NULL) {
2976         errno = ENOMEM;
2977         return NULL;
2978     }
2979 
2980     target_vec = lock_user(VERIFY_READ, target_addr,
2981                            count * sizeof(struct target_iovec), 1);
2982     if (target_vec == NULL) {
2983         err = EFAULT;
2984         goto fail2;
2985     }
2986 
2987     /* ??? If host page size > target page size, this will result in a
2988        value larger than what we can actually support.  */
2989     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2990     total_len = 0;
2991 
2992     for (i = 0; i < count; i++) {
2993         abi_ulong base = tswapal(target_vec[i].iov_base);
2994         abi_long len = tswapal(target_vec[i].iov_len);
2995 
2996         if (len < 0) {
2997             err = EINVAL;
2998             goto fail;
2999         } else if (len == 0) {
3000             /* Zero length pointer is ignored.  */
3001             vec[i].iov_base = 0;
3002         } else {
3003             vec[i].iov_base = lock_user(type, base, len, copy);
3004             /* If the first buffer pointer is bad, this is a fault.  But
3005              * subsequent bad buffers will result in a partial write; this
3006              * is realized by filling the vector with null pointers and
3007              * zero lengths. */
3008             if (!vec[i].iov_base) {
3009                 if (i == 0) {
3010                     err = EFAULT;
3011                     goto fail;
3012                 } else {
3013                     bad_address = true;
3014                 }
3015             }
3016             if (bad_address) {
3017                 len = 0;
3018             }
3019             if (len > max_len - total_len) {
3020                 len = max_len - total_len;
3021             }
3022         }
3023         vec[i].iov_len = len;
3024         total_len += len;
3025     }
3026 
3027     unlock_user(target_vec, target_addr, 0);
3028     return vec;
3029 
3030  fail:
3031     while (--i >= 0) {
3032         if (tswapal(target_vec[i].iov_len) > 0) {
3033             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3034         }
3035     }
3036     unlock_user(target_vec, target_addr, 0);
3037  fail2:
3038     g_free(vec);
3039     errno = err;
3040     return NULL;
3041 }
3042 
3043 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3044                          abi_ulong count, int copy)
3045 {
3046     struct target_iovec *target_vec;
3047     int i;
3048 
3049     target_vec = lock_user(VERIFY_READ, target_addr,
3050                            count * sizeof(struct target_iovec), 1);
3051     if (target_vec) {
3052         for (i = 0; i < count; i++) {
3053             abi_ulong base = tswapal(target_vec[i].iov_base);
3054             abi_long len = tswapal(target_vec[i].iov_len);
3055             if (len < 0) {
3056                 break;
3057             }
3058             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3059         }
3060         unlock_user(target_vec, target_addr, 0);
3061     }
3062 
3063     g_free(vec);
3064 }
3065 
3066 static inline int target_to_host_sock_type(int *type)
3067 {
3068     int host_type = 0;
3069     int target_type = *type;
3070 
3071     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3072     case TARGET_SOCK_DGRAM:
3073         host_type = SOCK_DGRAM;
3074         break;
3075     case TARGET_SOCK_STREAM:
3076         host_type = SOCK_STREAM;
3077         break;
3078     default:
3079         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3080         break;
3081     }
3082     if (target_type & TARGET_SOCK_CLOEXEC) {
3083 #if defined(SOCK_CLOEXEC)
3084         host_type |= SOCK_CLOEXEC;
3085 #else
3086         return -TARGET_EINVAL;
3087 #endif
3088     }
3089     if (target_type & TARGET_SOCK_NONBLOCK) {
3090 #if defined(SOCK_NONBLOCK)
3091         host_type |= SOCK_NONBLOCK;
3092 #elif !defined(O_NONBLOCK)
3093         return -TARGET_EINVAL;
3094 #endif
3095     }
3096     *type = host_type;
3097     return 0;
3098 }
3099 
3100 /* Try to emulate socket type flags after socket creation.  */
3101 static int sock_flags_fixup(int fd, int target_type)
3102 {
3103 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3104     if (target_type & TARGET_SOCK_NONBLOCK) {
3105         int flags = fcntl(fd, F_GETFL);
3106         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3107             close(fd);
3108             return -TARGET_EINVAL;
3109         }
3110     }
3111 #endif
3112     return fd;
3113 }
3114 
3115 /* do_socket() Must return target values and target errnos. */
3116 static abi_long do_socket(int domain, int type, int protocol)
3117 {
3118     int target_type = type;
3119     int ret;
3120 
3121     ret = target_to_host_sock_type(&type);
3122     if (ret) {
3123         return ret;
3124     }
3125 
3126     if (domain == PF_NETLINK && !(
3127 #ifdef CONFIG_RTNETLINK
3128          protocol == NETLINK_ROUTE ||
3129 #endif
3130          protocol == NETLINK_KOBJECT_UEVENT ||
3131          protocol == NETLINK_AUDIT)) {
3132         return -TARGET_EPROTONOSUPPORT;
3133     }
3134 
3135     if (domain == AF_PACKET ||
3136         (domain == AF_INET && type == SOCK_PACKET)) {
3137         protocol = tswap16(protocol);
3138     }
3139 
3140     ret = get_errno(socket(domain, type, protocol));
3141     if (ret >= 0) {
3142         ret = sock_flags_fixup(ret, target_type);
3143         if (type == SOCK_PACKET) {
3144             /* Manage an obsolete case :
3145              * if socket type is SOCK_PACKET, bind by name
3146              */
3147             fd_trans_register(ret, &target_packet_trans);
3148         } else if (domain == PF_NETLINK) {
3149             switch (protocol) {
3150 #ifdef CONFIG_RTNETLINK
3151             case NETLINK_ROUTE:
3152                 fd_trans_register(ret, &target_netlink_route_trans);
3153                 break;
3154 #endif
3155             case NETLINK_KOBJECT_UEVENT:
3156                 /* nothing to do: messages are strings */
3157                 break;
3158             case NETLINK_AUDIT:
3159                 fd_trans_register(ret, &target_netlink_audit_trans);
3160                 break;
3161             default:
3162                 g_assert_not_reached();
3163             }
3164         }
3165     }
3166     return ret;
3167 }
3168 
3169 /* do_bind() Must return target values and target errnos. */
3170 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3171                         socklen_t addrlen)
3172 {
3173     void *addr;
3174     abi_long ret;
3175 
3176     if ((int)addrlen < 0) {
3177         return -TARGET_EINVAL;
3178     }
3179 
3180     addr = alloca(addrlen+1);
3181 
3182     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3183     if (ret)
3184         return ret;
3185 
3186     return get_errno(bind(sockfd, addr, addrlen));
3187 }
3188 
3189 /* do_connect() Must return target values and target errnos. */
3190 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3191                            socklen_t addrlen)
3192 {
3193     void *addr;
3194     abi_long ret;
3195 
3196     if ((int)addrlen < 0) {
3197         return -TARGET_EINVAL;
3198     }
3199 
3200     addr = alloca(addrlen+1);
3201 
3202     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3203     if (ret)
3204         return ret;
3205 
3206     return get_errno(safe_connect(sockfd, addr, addrlen));
3207 }
3208 
3209 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3210 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3211                                       int flags, int send)
3212 {
3213     abi_long ret, len;
3214     struct msghdr msg;
3215     abi_ulong count;
3216     struct iovec *vec;
3217     abi_ulong target_vec;
3218 
3219     if (msgp->msg_name) {
3220         msg.msg_namelen = tswap32(msgp->msg_namelen);
3221         msg.msg_name = alloca(msg.msg_namelen+1);
3222         ret = target_to_host_sockaddr(fd, msg.msg_name,
3223                                       tswapal(msgp->msg_name),
3224                                       msg.msg_namelen);
3225         if (ret == -TARGET_EFAULT) {
3226             /* For connected sockets msg_name and msg_namelen must
3227              * be ignored, so returning EFAULT immediately is wrong.
3228              * Instead, pass a bad msg_name to the host kernel, and
3229              * let it decide whether to return EFAULT or not.
3230              */
3231             msg.msg_name = (void *)-1;
3232         } else if (ret) {
3233             goto out2;
3234         }
3235     } else {
3236         msg.msg_name = NULL;
3237         msg.msg_namelen = 0;
3238     }
3239     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3240     msg.msg_control = alloca(msg.msg_controllen);
3241     memset(msg.msg_control, 0, msg.msg_controllen);
3242 
3243     msg.msg_flags = tswap32(msgp->msg_flags);
3244 
3245     count = tswapal(msgp->msg_iovlen);
3246     target_vec = tswapal(msgp->msg_iov);
3247 
3248     if (count > IOV_MAX) {
3249         /* sendrcvmsg returns a different errno for this condition than
3250          * readv/writev, so we must catch it here before lock_iovec() does.
3251          */
3252         ret = -TARGET_EMSGSIZE;
3253         goto out2;
3254     }
3255 
3256     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3257                      target_vec, count, send);
3258     if (vec == NULL) {
3259         ret = -host_to_target_errno(errno);
3260         goto out2;
3261     }
3262     msg.msg_iovlen = count;
3263     msg.msg_iov = vec;
3264 
3265     if (send) {
3266         if (fd_trans_target_to_host_data(fd)) {
3267             void *host_msg;
3268 
3269             host_msg = g_malloc(msg.msg_iov->iov_len);
3270             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3271             ret = fd_trans_target_to_host_data(fd)(host_msg,
3272                                                    msg.msg_iov->iov_len);
3273             if (ret >= 0) {
3274                 msg.msg_iov->iov_base = host_msg;
3275                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3276             }
3277             g_free(host_msg);
3278         } else {
3279             ret = target_to_host_cmsg(&msg, msgp);
3280             if (ret == 0) {
3281                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3282             }
3283         }
3284     } else {
3285         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3286         if (!is_error(ret)) {
3287             len = ret;
3288             if (fd_trans_host_to_target_data(fd)) {
3289                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3290                                                MIN(msg.msg_iov->iov_len, len));
3291             } else {
3292                 ret = host_to_target_cmsg(msgp, &msg);
3293             }
3294             if (!is_error(ret)) {
3295                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3296                 msgp->msg_flags = tswap32(msg.msg_flags);
3297                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3298                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3299                                     msg.msg_name, msg.msg_namelen);
3300                     if (ret) {
3301                         goto out;
3302                     }
3303                 }
3304 
3305                 ret = len;
3306             }
3307         }
3308     }
3309 
3310 out:
3311     unlock_iovec(vec, target_vec, count, !send);
3312 out2:
3313     return ret;
3314 }
3315 
3316 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3317                                int flags, int send)
3318 {
3319     abi_long ret;
3320     struct target_msghdr *msgp;
3321 
3322     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3323                           msgp,
3324                           target_msg,
3325                           send ? 1 : 0)) {
3326         return -TARGET_EFAULT;
3327     }
3328     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3329     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3330     return ret;
3331 }
3332 
3333 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3334  * so it might not have this *mmsg-specific flag either.
3335  */
3336 #ifndef MSG_WAITFORONE
3337 #define MSG_WAITFORONE 0x10000
3338 #endif
3339 
3340 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3341                                 unsigned int vlen, unsigned int flags,
3342                                 int send)
3343 {
3344     struct target_mmsghdr *mmsgp;
3345     abi_long ret = 0;
3346     int i;
3347 
3348     if (vlen > UIO_MAXIOV) {
3349         vlen = UIO_MAXIOV;
3350     }
3351 
3352     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3353     if (!mmsgp) {
3354         return -TARGET_EFAULT;
3355     }
3356 
3357     for (i = 0; i < vlen; i++) {
3358         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3359         if (is_error(ret)) {
3360             break;
3361         }
3362         mmsgp[i].msg_len = tswap32(ret);
3363         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3364         if (flags & MSG_WAITFORONE) {
3365             flags |= MSG_DONTWAIT;
3366         }
3367     }
3368 
3369     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3370 
3371     /* Return number of datagrams sent if we sent any at all;
3372      * otherwise return the error.
3373      */
3374     if (i) {
3375         return i;
3376     }
3377     return ret;
3378 }
3379 
3380 /* do_accept4() Must return target values and target errnos. */
3381 static abi_long do_accept4(int fd, abi_ulong target_addr,
3382                            abi_ulong target_addrlen_addr, int flags)
3383 {
3384     socklen_t addrlen, ret_addrlen;
3385     void *addr;
3386     abi_long ret;
3387     int host_flags;
3388 
3389     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3390 
3391     if (target_addr == 0) {
3392         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3393     }
3394 
3395     /* linux returns EFAULT if addrlen pointer is invalid */
3396     if (get_user_u32(addrlen, target_addrlen_addr))
3397         return -TARGET_EFAULT;
3398 
3399     if ((int)addrlen < 0) {
3400         return -TARGET_EINVAL;
3401     }
3402 
3403     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3404         return -TARGET_EFAULT;
3405     }
3406 
3407     addr = alloca(addrlen);
3408 
3409     ret_addrlen = addrlen;
3410     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3411     if (!is_error(ret)) {
3412         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3413         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3414             ret = -TARGET_EFAULT;
3415         }
3416     }
3417     return ret;
3418 }
3419 
3420 /* do_getpeername() Must return target values and target errnos. */
3421 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3422                                abi_ulong target_addrlen_addr)
3423 {
3424     socklen_t addrlen, ret_addrlen;
3425     void *addr;
3426     abi_long ret;
3427 
3428     if (get_user_u32(addrlen, target_addrlen_addr))
3429         return -TARGET_EFAULT;
3430 
3431     if ((int)addrlen < 0) {
3432         return -TARGET_EINVAL;
3433     }
3434 
3435     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3436         return -TARGET_EFAULT;
3437     }
3438 
3439     addr = alloca(addrlen);
3440 
3441     ret_addrlen = addrlen;
3442     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3443     if (!is_error(ret)) {
3444         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3445         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3446             ret = -TARGET_EFAULT;
3447         }
3448     }
3449     return ret;
3450 }
3451 
3452 /* do_getsockname() Must return target values and target errnos. */
3453 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3454                                abi_ulong target_addrlen_addr)
3455 {
3456     socklen_t addrlen, ret_addrlen;
3457     void *addr;
3458     abi_long ret;
3459 
3460     if (get_user_u32(addrlen, target_addrlen_addr))
3461         return -TARGET_EFAULT;
3462 
3463     if ((int)addrlen < 0) {
3464         return -TARGET_EINVAL;
3465     }
3466 
3467     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3468         return -TARGET_EFAULT;
3469     }
3470 
3471     addr = alloca(addrlen);
3472 
3473     ret_addrlen = addrlen;
3474     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3475     if (!is_error(ret)) {
3476         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3477         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3478             ret = -TARGET_EFAULT;
3479         }
3480     }
3481     return ret;
3482 }
3483 
3484 /* do_socketpair() Must return target values and target errnos. */
3485 static abi_long do_socketpair(int domain, int type, int protocol,
3486                               abi_ulong target_tab_addr)
3487 {
3488     int tab[2];
3489     abi_long ret;
3490 
3491     target_to_host_sock_type(&type);
3492 
3493     ret = get_errno(socketpair(domain, type, protocol, tab));
3494     if (!is_error(ret)) {
3495         if (put_user_s32(tab[0], target_tab_addr)
3496             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3497             ret = -TARGET_EFAULT;
3498     }
3499     return ret;
3500 }
3501 
3502 /* do_sendto() Must return target values and target errnos. */
3503 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3504                           abi_ulong target_addr, socklen_t addrlen)
3505 {
3506     void *addr;
3507     void *host_msg;
3508     void *copy_msg = NULL;
3509     abi_long ret;
3510 
3511     if ((int)addrlen < 0) {
3512         return -TARGET_EINVAL;
3513     }
3514 
3515     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3516     if (!host_msg)
3517         return -TARGET_EFAULT;
3518     if (fd_trans_target_to_host_data(fd)) {
3519         copy_msg = host_msg;
3520         host_msg = g_malloc(len);
3521         memcpy(host_msg, copy_msg, len);
3522         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3523         if (ret < 0) {
3524             goto fail;
3525         }
3526     }
3527     if (target_addr) {
3528         addr = alloca(addrlen+1);
3529         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3530         if (ret) {
3531             goto fail;
3532         }
3533         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3534     } else {
3535         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3536     }
3537 fail:
3538     if (copy_msg) {
3539         g_free(host_msg);
3540         host_msg = copy_msg;
3541     }
3542     unlock_user(host_msg, msg, 0);
3543     return ret;
3544 }
3545 
3546 /* do_recvfrom() Must return target values and target errnos. */
3547 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3548                             abi_ulong target_addr,
3549                             abi_ulong target_addrlen)
3550 {
3551     socklen_t addrlen, ret_addrlen;
3552     void *addr;
3553     void *host_msg;
3554     abi_long ret;
3555 
3556     if (!msg) {
3557         host_msg = NULL;
3558     } else {
3559         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3560         if (!host_msg) {
3561             return -TARGET_EFAULT;
3562         }
3563     }
3564     if (target_addr) {
3565         if (get_user_u32(addrlen, target_addrlen)) {
3566             ret = -TARGET_EFAULT;
3567             goto fail;
3568         }
3569         if ((int)addrlen < 0) {
3570             ret = -TARGET_EINVAL;
3571             goto fail;
3572         }
3573         addr = alloca(addrlen);
3574         ret_addrlen = addrlen;
3575         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3576                                       addr, &ret_addrlen));
3577     } else {
3578         addr = NULL; /* To keep compiler quiet.  */
3579         addrlen = 0; /* To keep compiler quiet.  */
3580         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3581     }
3582     if (!is_error(ret)) {
3583         if (fd_trans_host_to_target_data(fd)) {
3584             abi_long trans;
3585             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3586             if (is_error(trans)) {
3587                 ret = trans;
3588                 goto fail;
3589             }
3590         }
3591         if (target_addr) {
3592             host_to_target_sockaddr(target_addr, addr,
3593                                     MIN(addrlen, ret_addrlen));
3594             if (put_user_u32(ret_addrlen, target_addrlen)) {
3595                 ret = -TARGET_EFAULT;
3596                 goto fail;
3597             }
3598         }
3599         unlock_user(host_msg, msg, len);
3600     } else {
3601 fail:
3602         unlock_user(host_msg, msg, 0);
3603     }
3604     return ret;
3605 }
3606 
3607 #ifdef TARGET_NR_socketcall
3608 /* do_socketcall() must return target values and target errnos. */
3609 static abi_long do_socketcall(int num, abi_ulong vptr)
3610 {
3611     static const unsigned nargs[] = { /* number of arguments per operation */
3612         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3613         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3614         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3615         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3616         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3617         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3618         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3619         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3620         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3621         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3622         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3623         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3624         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3625         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3626         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3627         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3628         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3629         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3630         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3631         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3632     };
3633     abi_long a[6]; /* max 6 args */
3634     unsigned i;
3635 
3636     /* check the range of the first argument num */
3637     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3638     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3639         return -TARGET_EINVAL;
3640     }
3641     /* ensure we have space for args */
3642     if (nargs[num] > ARRAY_SIZE(a)) {
3643         return -TARGET_EINVAL;
3644     }
3645     /* collect the arguments in a[] according to nargs[] */
3646     for (i = 0; i < nargs[num]; ++i) {
3647         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3648             return -TARGET_EFAULT;
3649         }
3650     }
3651     /* now when we have the args, invoke the appropriate underlying function */
3652     switch (num) {
3653     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3654         return do_socket(a[0], a[1], a[2]);
3655     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3656         return do_bind(a[0], a[1], a[2]);
3657     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3658         return do_connect(a[0], a[1], a[2]);
3659     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3660         return get_errno(listen(a[0], a[1]));
3661     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3662         return do_accept4(a[0], a[1], a[2], 0);
3663     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3664         return do_getsockname(a[0], a[1], a[2]);
3665     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3666         return do_getpeername(a[0], a[1], a[2]);
3667     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3668         return do_socketpair(a[0], a[1], a[2], a[3]);
3669     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3670         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3671     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3672         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3673     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3674         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3675     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3676         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3677     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3678         return get_errno(shutdown(a[0], a[1]));
3679     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3680         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3681     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3682         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3683     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3684         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3685     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3686         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3687     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3688         return do_accept4(a[0], a[1], a[2], a[3]);
3689     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3690         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3691     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3692         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3693     default:
3694         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3695         return -TARGET_EINVAL;
3696     }
3697 }
3698 #endif
3699 
3700 #define N_SHM_REGIONS	32
3701 
3702 static struct shm_region {
3703     abi_ulong start;
3704     abi_ulong size;
3705     bool in_use;
3706 } shm_regions[N_SHM_REGIONS];
3707 
3708 #ifndef TARGET_SEMID64_DS
3709 /* asm-generic version of this struct */
3710 struct target_semid64_ds
3711 {
3712   struct target_ipc_perm sem_perm;
3713   abi_ulong sem_otime;
3714 #if TARGET_ABI_BITS == 32
3715   abi_ulong __unused1;
3716 #endif
3717   abi_ulong sem_ctime;
3718 #if TARGET_ABI_BITS == 32
3719   abi_ulong __unused2;
3720 #endif
3721   abi_ulong sem_nsems;
3722   abi_ulong __unused3;
3723   abi_ulong __unused4;
3724 };
3725 #endif
3726 
3727 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3728                                                abi_ulong target_addr)
3729 {
3730     struct target_ipc_perm *target_ip;
3731     struct target_semid64_ds *target_sd;
3732 
3733     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3734         return -TARGET_EFAULT;
3735     target_ip = &(target_sd->sem_perm);
3736     host_ip->__key = tswap32(target_ip->__key);
3737     host_ip->uid = tswap32(target_ip->uid);
3738     host_ip->gid = tswap32(target_ip->gid);
3739     host_ip->cuid = tswap32(target_ip->cuid);
3740     host_ip->cgid = tswap32(target_ip->cgid);
3741 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3742     host_ip->mode = tswap32(target_ip->mode);
3743 #else
3744     host_ip->mode = tswap16(target_ip->mode);
3745 #endif
3746 #if defined(TARGET_PPC)
3747     host_ip->__seq = tswap32(target_ip->__seq);
3748 #else
3749     host_ip->__seq = tswap16(target_ip->__seq);
3750 #endif
3751     unlock_user_struct(target_sd, target_addr, 0);
3752     return 0;
3753 }
3754 
3755 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3756                                                struct ipc_perm *host_ip)
3757 {
3758     struct target_ipc_perm *target_ip;
3759     struct target_semid64_ds *target_sd;
3760 
3761     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3762         return -TARGET_EFAULT;
3763     target_ip = &(target_sd->sem_perm);
3764     target_ip->__key = tswap32(host_ip->__key);
3765     target_ip->uid = tswap32(host_ip->uid);
3766     target_ip->gid = tswap32(host_ip->gid);
3767     target_ip->cuid = tswap32(host_ip->cuid);
3768     target_ip->cgid = tswap32(host_ip->cgid);
3769 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3770     target_ip->mode = tswap32(host_ip->mode);
3771 #else
3772     target_ip->mode = tswap16(host_ip->mode);
3773 #endif
3774 #if defined(TARGET_PPC)
3775     target_ip->__seq = tswap32(host_ip->__seq);
3776 #else
3777     target_ip->__seq = tswap16(host_ip->__seq);
3778 #endif
3779     unlock_user_struct(target_sd, target_addr, 1);
3780     return 0;
3781 }
3782 
3783 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3784                                                abi_ulong target_addr)
3785 {
3786     struct target_semid64_ds *target_sd;
3787 
3788     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3789         return -TARGET_EFAULT;
3790     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3791         return -TARGET_EFAULT;
3792     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3793     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3794     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3795     unlock_user_struct(target_sd, target_addr, 0);
3796     return 0;
3797 }
3798 
3799 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3800                                                struct semid_ds *host_sd)
3801 {
3802     struct target_semid64_ds *target_sd;
3803 
3804     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3805         return -TARGET_EFAULT;
3806     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3807         return -TARGET_EFAULT;
3808     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3809     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3810     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3811     unlock_user_struct(target_sd, target_addr, 1);
3812     return 0;
3813 }
3814 
3815 struct target_seminfo {
3816     int semmap;
3817     int semmni;
3818     int semmns;
3819     int semmnu;
3820     int semmsl;
3821     int semopm;
3822     int semume;
3823     int semusz;
3824     int semvmx;
3825     int semaem;
3826 };
3827 
3828 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3829                                               struct seminfo *host_seminfo)
3830 {
3831     struct target_seminfo *target_seminfo;
3832     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3833         return -TARGET_EFAULT;
3834     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3835     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3836     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3837     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3838     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3839     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3840     __put_user(host_seminfo->semume, &target_seminfo->semume);
3841     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3842     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3843     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3844     unlock_user_struct(target_seminfo, target_addr, 1);
3845     return 0;
3846 }
3847 
3848 union semun {
3849 	int val;
3850 	struct semid_ds *buf;
3851 	unsigned short *array;
3852 	struct seminfo *__buf;
3853 };
3854 
3855 union target_semun {
3856 	int val;
3857 	abi_ulong buf;
3858 	abi_ulong array;
3859 	abi_ulong __buf;
3860 };
3861 
3862 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3863                                                abi_ulong target_addr)
3864 {
3865     int nsems;
3866     unsigned short *array;
3867     union semun semun;
3868     struct semid_ds semid_ds;
3869     int i, ret;
3870 
3871     semun.buf = &semid_ds;
3872 
3873     ret = semctl(semid, 0, IPC_STAT, semun);
3874     if (ret == -1)
3875         return get_errno(ret);
3876 
3877     nsems = semid_ds.sem_nsems;
3878 
3879     *host_array = g_try_new(unsigned short, nsems);
3880     if (!*host_array) {
3881         return -TARGET_ENOMEM;
3882     }
3883     array = lock_user(VERIFY_READ, target_addr,
3884                       nsems*sizeof(unsigned short), 1);
3885     if (!array) {
3886         g_free(*host_array);
3887         return -TARGET_EFAULT;
3888     }
3889 
3890     for(i=0; i<nsems; i++) {
3891         __get_user((*host_array)[i], &array[i]);
3892     }
3893     unlock_user(array, target_addr, 0);
3894 
3895     return 0;
3896 }
3897 
3898 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3899                                                unsigned short **host_array)
3900 {
3901     int nsems;
3902     unsigned short *array;
3903     union semun semun;
3904     struct semid_ds semid_ds;
3905     int i, ret;
3906 
3907     semun.buf = &semid_ds;
3908 
3909     ret = semctl(semid, 0, IPC_STAT, semun);
3910     if (ret == -1)
3911         return get_errno(ret);
3912 
3913     nsems = semid_ds.sem_nsems;
3914 
3915     array = lock_user(VERIFY_WRITE, target_addr,
3916                       nsems*sizeof(unsigned short), 0);
3917     if (!array)
3918         return -TARGET_EFAULT;
3919 
3920     for(i=0; i<nsems; i++) {
3921         __put_user((*host_array)[i], &array[i]);
3922     }
3923     g_free(*host_array);
3924     unlock_user(array, target_addr, 1);
3925 
3926     return 0;
3927 }
3928 
3929 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3930                                  abi_ulong target_arg)
3931 {
3932     union target_semun target_su = { .buf = target_arg };
3933     union semun arg;
3934     struct semid_ds dsarg;
3935     unsigned short *array = NULL;
3936     struct seminfo seminfo;
3937     abi_long ret = -TARGET_EINVAL;
3938     abi_long err;
3939     cmd &= 0xff;
3940 
3941     switch( cmd ) {
3942 	case GETVAL:
3943 	case SETVAL:
3944             /* In 64 bit cross-endian situations, we will erroneously pick up
3945              * the wrong half of the union for the "val" element.  To rectify
3946              * this, the entire 8-byte structure is byteswapped, followed by
3947 	     * a swap of the 4 byte val field. In other cases, the data is
3948 	     * already in proper host byte order. */
3949 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3950 		target_su.buf = tswapal(target_su.buf);
3951 		arg.val = tswap32(target_su.val);
3952 	    } else {
3953 		arg.val = target_su.val;
3954 	    }
3955             ret = get_errno(semctl(semid, semnum, cmd, arg));
3956             break;
3957 	case GETALL:
3958 	case SETALL:
3959             err = target_to_host_semarray(semid, &array, target_su.array);
3960             if (err)
3961                 return err;
3962             arg.array = array;
3963             ret = get_errno(semctl(semid, semnum, cmd, arg));
3964             err = host_to_target_semarray(semid, target_su.array, &array);
3965             if (err)
3966                 return err;
3967             break;
3968 	case IPC_STAT:
3969 	case IPC_SET:
3970 	case SEM_STAT:
3971             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3972             if (err)
3973                 return err;
3974             arg.buf = &dsarg;
3975             ret = get_errno(semctl(semid, semnum, cmd, arg));
3976             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3977             if (err)
3978                 return err;
3979             break;
3980 	case IPC_INFO:
3981 	case SEM_INFO:
3982             arg.__buf = &seminfo;
3983             ret = get_errno(semctl(semid, semnum, cmd, arg));
3984             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3985             if (err)
3986                 return err;
3987             break;
3988 	case IPC_RMID:
3989 	case GETPID:
3990 	case GETNCNT:
3991 	case GETZCNT:
3992             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3993             break;
3994     }
3995 
3996     return ret;
3997 }
3998 
3999 struct target_sembuf {
4000     unsigned short sem_num;
4001     short sem_op;
4002     short sem_flg;
4003 };
4004 
4005 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4006                                              abi_ulong target_addr,
4007                                              unsigned nsops)
4008 {
4009     struct target_sembuf *target_sembuf;
4010     int i;
4011 
4012     target_sembuf = lock_user(VERIFY_READ, target_addr,
4013                               nsops*sizeof(struct target_sembuf), 1);
4014     if (!target_sembuf)
4015         return -TARGET_EFAULT;
4016 
4017     for(i=0; i<nsops; i++) {
4018         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4019         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4020         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4021     }
4022 
4023     unlock_user(target_sembuf, target_addr, 0);
4024 
4025     return 0;
4026 }
4027 
4028 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4029     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4030 
4031 /*
4032  * This macro is required to handle the s390 variants, which passes the
4033  * arguments in a different order than default.
4034  */
4035 #ifdef __s390x__
4036 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4037   (__nsops), (__timeout), (__sops)
4038 #else
4039 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4040   (__nsops), 0, (__sops), (__timeout)
4041 #endif
4042 
4043 static inline abi_long do_semtimedop(int semid,
4044                                      abi_long ptr,
4045                                      unsigned nsops,
4046                                      abi_long timeout, bool time64)
4047 {
4048     struct sembuf *sops;
4049     struct timespec ts, *pts = NULL;
4050     abi_long ret;
4051 
4052     if (timeout) {
4053         pts = &ts;
4054         if (time64) {
4055             if (target_to_host_timespec64(pts, timeout)) {
4056                 return -TARGET_EFAULT;
4057             }
4058         } else {
4059             if (target_to_host_timespec(pts, timeout)) {
4060                 return -TARGET_EFAULT;
4061             }
4062         }
4063     }
4064 
4065     if (nsops > TARGET_SEMOPM) {
4066         return -TARGET_E2BIG;
4067     }
4068 
4069     sops = g_new(struct sembuf, nsops);
4070 
4071     if (target_to_host_sembuf(sops, ptr, nsops)) {
4072         g_free(sops);
4073         return -TARGET_EFAULT;
4074     }
4075 
4076     ret = -TARGET_ENOSYS;
4077 #ifdef __NR_semtimedop
4078     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4079 #endif
4080 #ifdef __NR_ipc
4081     if (ret == -TARGET_ENOSYS) {
4082         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4083                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4084     }
4085 #endif
4086     g_free(sops);
4087     return ret;
4088 }
4089 #endif
4090 
4091 struct target_msqid_ds
4092 {
4093     struct target_ipc_perm msg_perm;
4094     abi_ulong msg_stime;
4095 #if TARGET_ABI_BITS == 32
4096     abi_ulong __unused1;
4097 #endif
4098     abi_ulong msg_rtime;
4099 #if TARGET_ABI_BITS == 32
4100     abi_ulong __unused2;
4101 #endif
4102     abi_ulong msg_ctime;
4103 #if TARGET_ABI_BITS == 32
4104     abi_ulong __unused3;
4105 #endif
4106     abi_ulong __msg_cbytes;
4107     abi_ulong msg_qnum;
4108     abi_ulong msg_qbytes;
4109     abi_ulong msg_lspid;
4110     abi_ulong msg_lrpid;
4111     abi_ulong __unused4;
4112     abi_ulong __unused5;
4113 };
4114 
4115 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4116                                                abi_ulong target_addr)
4117 {
4118     struct target_msqid_ds *target_md;
4119 
4120     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4121         return -TARGET_EFAULT;
4122     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4123         return -TARGET_EFAULT;
4124     host_md->msg_stime = tswapal(target_md->msg_stime);
4125     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4126     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4127     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4128     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4129     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4130     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4131     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4132     unlock_user_struct(target_md, target_addr, 0);
4133     return 0;
4134 }
4135 
4136 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4137                                                struct msqid_ds *host_md)
4138 {
4139     struct target_msqid_ds *target_md;
4140 
4141     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4142         return -TARGET_EFAULT;
4143     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4144         return -TARGET_EFAULT;
4145     target_md->msg_stime = tswapal(host_md->msg_stime);
4146     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4147     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4148     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4149     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4150     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4151     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4152     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4153     unlock_user_struct(target_md, target_addr, 1);
4154     return 0;
4155 }
4156 
4157 struct target_msginfo {
4158     int msgpool;
4159     int msgmap;
4160     int msgmax;
4161     int msgmnb;
4162     int msgmni;
4163     int msgssz;
4164     int msgtql;
4165     unsigned short int msgseg;
4166 };
4167 
4168 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4169                                               struct msginfo *host_msginfo)
4170 {
4171     struct target_msginfo *target_msginfo;
4172     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4173         return -TARGET_EFAULT;
4174     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4175     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4176     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4177     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4178     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4179     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4180     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4181     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4182     unlock_user_struct(target_msginfo, target_addr, 1);
4183     return 0;
4184 }
4185 
4186 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4187 {
4188     struct msqid_ds dsarg;
4189     struct msginfo msginfo;
4190     abi_long ret = -TARGET_EINVAL;
4191 
4192     cmd &= 0xff;
4193 
4194     switch (cmd) {
4195     case IPC_STAT:
4196     case IPC_SET:
4197     case MSG_STAT:
4198         if (target_to_host_msqid_ds(&dsarg,ptr))
4199             return -TARGET_EFAULT;
4200         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4201         if (host_to_target_msqid_ds(ptr,&dsarg))
4202             return -TARGET_EFAULT;
4203         break;
4204     case IPC_RMID:
4205         ret = get_errno(msgctl(msgid, cmd, NULL));
4206         break;
4207     case IPC_INFO:
4208     case MSG_INFO:
4209         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4210         if (host_to_target_msginfo(ptr, &msginfo))
4211             return -TARGET_EFAULT;
4212         break;
4213     }
4214 
4215     return ret;
4216 }
4217 
4218 struct target_msgbuf {
4219     abi_long mtype;
4220     char	mtext[1];
4221 };
4222 
4223 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4224                                  ssize_t msgsz, int msgflg)
4225 {
4226     struct target_msgbuf *target_mb;
4227     struct msgbuf *host_mb;
4228     abi_long ret = 0;
4229 
4230     if (msgsz < 0) {
4231         return -TARGET_EINVAL;
4232     }
4233 
4234     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4235         return -TARGET_EFAULT;
4236     host_mb = g_try_malloc(msgsz + sizeof(long));
4237     if (!host_mb) {
4238         unlock_user_struct(target_mb, msgp, 0);
4239         return -TARGET_ENOMEM;
4240     }
4241     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4242     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4243     ret = -TARGET_ENOSYS;
4244 #ifdef __NR_msgsnd
4245     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4246 #endif
4247 #ifdef __NR_ipc
4248     if (ret == -TARGET_ENOSYS) {
4249 #ifdef __s390x__
4250         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4251                                  host_mb));
4252 #else
4253         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4254                                  host_mb, 0));
4255 #endif
4256     }
4257 #endif
4258     g_free(host_mb);
4259     unlock_user_struct(target_mb, msgp, 0);
4260 
4261     return ret;
4262 }
4263 
4264 #ifdef __NR_ipc
4265 #if defined(__sparc__)
4266 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4267 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4268 #elif defined(__s390x__)
4269 /* The s390 sys_ipc variant has only five parameters.  */
4270 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4271     ((long int[]){(long int)__msgp, __msgtyp})
4272 #else
4273 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4274     ((long int[]){(long int)__msgp, __msgtyp}), 0
4275 #endif
4276 #endif
4277 
4278 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4279                                  ssize_t msgsz, abi_long msgtyp,
4280                                  int msgflg)
4281 {
4282     struct target_msgbuf *target_mb;
4283     char *target_mtext;
4284     struct msgbuf *host_mb;
4285     abi_long ret = 0;
4286 
4287     if (msgsz < 0) {
4288         return -TARGET_EINVAL;
4289     }
4290 
4291     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4292         return -TARGET_EFAULT;
4293 
4294     host_mb = g_try_malloc(msgsz + sizeof(long));
4295     if (!host_mb) {
4296         ret = -TARGET_ENOMEM;
4297         goto end;
4298     }
4299     ret = -TARGET_ENOSYS;
4300 #ifdef __NR_msgrcv
4301     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4302 #endif
4303 #ifdef __NR_ipc
4304     if (ret == -TARGET_ENOSYS) {
4305         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4306                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4307     }
4308 #endif
4309 
4310     if (ret > 0) {
4311         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4312         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4313         if (!target_mtext) {
4314             ret = -TARGET_EFAULT;
4315             goto end;
4316         }
4317         memcpy(target_mb->mtext, host_mb->mtext, ret);
4318         unlock_user(target_mtext, target_mtext_addr, ret);
4319     }
4320 
4321     target_mb->mtype = tswapal(host_mb->mtype);
4322 
4323 end:
4324     if (target_mb)
4325         unlock_user_struct(target_mb, msgp, 1);
4326     g_free(host_mb);
4327     return ret;
4328 }
4329 
4330 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4331                                                abi_ulong target_addr)
4332 {
4333     struct target_shmid_ds *target_sd;
4334 
4335     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4336         return -TARGET_EFAULT;
4337     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4338         return -TARGET_EFAULT;
4339     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4340     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4341     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4342     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4343     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4344     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4345     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4346     unlock_user_struct(target_sd, target_addr, 0);
4347     return 0;
4348 }
4349 
4350 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4351                                                struct shmid_ds *host_sd)
4352 {
4353     struct target_shmid_ds *target_sd;
4354 
4355     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4356         return -TARGET_EFAULT;
4357     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4358         return -TARGET_EFAULT;
4359     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4360     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4361     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4362     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4363     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4364     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4365     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4366     unlock_user_struct(target_sd, target_addr, 1);
4367     return 0;
4368 }
4369 
4370 struct  target_shminfo {
4371     abi_ulong shmmax;
4372     abi_ulong shmmin;
4373     abi_ulong shmmni;
4374     abi_ulong shmseg;
4375     abi_ulong shmall;
4376 };
4377 
4378 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4379                                               struct shminfo *host_shminfo)
4380 {
4381     struct target_shminfo *target_shminfo;
4382     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4383         return -TARGET_EFAULT;
4384     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4385     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4386     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4387     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4388     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4389     unlock_user_struct(target_shminfo, target_addr, 1);
4390     return 0;
4391 }
4392 
4393 struct target_shm_info {
4394     int used_ids;
4395     abi_ulong shm_tot;
4396     abi_ulong shm_rss;
4397     abi_ulong shm_swp;
4398     abi_ulong swap_attempts;
4399     abi_ulong swap_successes;
4400 };
4401 
4402 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4403                                                struct shm_info *host_shm_info)
4404 {
4405     struct target_shm_info *target_shm_info;
4406     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4407         return -TARGET_EFAULT;
4408     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4409     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4410     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4411     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4412     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4413     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4414     unlock_user_struct(target_shm_info, target_addr, 1);
4415     return 0;
4416 }
4417 
4418 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4419 {
4420     struct shmid_ds dsarg;
4421     struct shminfo shminfo;
4422     struct shm_info shm_info;
4423     abi_long ret = -TARGET_EINVAL;
4424 
4425     cmd &= 0xff;
4426 
4427     switch(cmd) {
4428     case IPC_STAT:
4429     case IPC_SET:
4430     case SHM_STAT:
4431         if (target_to_host_shmid_ds(&dsarg, buf))
4432             return -TARGET_EFAULT;
4433         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4434         if (host_to_target_shmid_ds(buf, &dsarg))
4435             return -TARGET_EFAULT;
4436         break;
4437     case IPC_INFO:
4438         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4439         if (host_to_target_shminfo(buf, &shminfo))
4440             return -TARGET_EFAULT;
4441         break;
4442     case SHM_INFO:
4443         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4444         if (host_to_target_shm_info(buf, &shm_info))
4445             return -TARGET_EFAULT;
4446         break;
4447     case IPC_RMID:
4448     case SHM_LOCK:
4449     case SHM_UNLOCK:
4450         ret = get_errno(shmctl(shmid, cmd, NULL));
4451         break;
4452     }
4453 
4454     return ret;
4455 }
4456 
4457 #ifndef TARGET_FORCE_SHMLBA
4458 /* For most architectures, SHMLBA is the same as the page size;
4459  * some architectures have larger values, in which case they should
4460  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4461  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4462  * and defining its own value for SHMLBA.
4463  *
4464  * The kernel also permits SHMLBA to be set by the architecture to a
4465  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4466  * this means that addresses are rounded to the large size if
4467  * SHM_RND is set but addresses not aligned to that size are not rejected
4468  * as long as they are at least page-aligned. Since the only architecture
4469  * which uses this is ia64 this code doesn't provide for that oddity.
4470  */
4471 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4472 {
4473     return TARGET_PAGE_SIZE;
4474 }
4475 #endif
4476 
4477 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4478                                  int shmid, abi_ulong shmaddr, int shmflg)
4479 {
4480     CPUState *cpu = env_cpu(cpu_env);
4481     abi_long raddr;
4482     void *host_raddr;
4483     struct shmid_ds shm_info;
4484     int i,ret;
4485     abi_ulong shmlba;
4486 
4487     /* shmat pointers are always untagged */
4488 
4489     /* find out the length of the shared memory segment */
4490     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4491     if (is_error(ret)) {
4492         /* can't get length, bail out */
4493         return ret;
4494     }
4495 
4496     shmlba = target_shmlba(cpu_env);
4497 
4498     if (shmaddr & (shmlba - 1)) {
4499         if (shmflg & SHM_RND) {
4500             shmaddr &= ~(shmlba - 1);
4501         } else {
4502             return -TARGET_EINVAL;
4503         }
4504     }
4505     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4506         return -TARGET_EINVAL;
4507     }
4508 
4509     mmap_lock();
4510 
4511     /*
4512      * We're mapping shared memory, so ensure we generate code for parallel
4513      * execution and flush old translations.  This will work up to the level
4514      * supported by the host -- anything that requires EXCP_ATOMIC will not
4515      * be atomic with respect to an external process.
4516      */
4517     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4518         cpu->tcg_cflags |= CF_PARALLEL;
4519         tb_flush(cpu);
4520     }
4521 
4522     if (shmaddr)
4523         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4524     else {
4525         abi_ulong mmap_start;
4526 
4527         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4528         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4529 
4530         if (mmap_start == -1) {
4531             errno = ENOMEM;
4532             host_raddr = (void *)-1;
4533         } else
4534             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4535                                shmflg | SHM_REMAP);
4536     }
4537 
4538     if (host_raddr == (void *)-1) {
4539         mmap_unlock();
4540         return get_errno((long)host_raddr);
4541     }
4542     raddr=h2g((unsigned long)host_raddr);
4543 
4544     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4545                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4546                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4547 
4548     for (i = 0; i < N_SHM_REGIONS; i++) {
4549         if (!shm_regions[i].in_use) {
4550             shm_regions[i].in_use = true;
4551             shm_regions[i].start = raddr;
4552             shm_regions[i].size = shm_info.shm_segsz;
4553             break;
4554         }
4555     }
4556 
4557     mmap_unlock();
4558     return raddr;
4559 
4560 }
4561 
4562 static inline abi_long do_shmdt(abi_ulong shmaddr)
4563 {
4564     int i;
4565     abi_long rv;
4566 
4567     /* shmdt pointers are always untagged */
4568 
4569     mmap_lock();
4570 
4571     for (i = 0; i < N_SHM_REGIONS; ++i) {
4572         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4573             shm_regions[i].in_use = false;
4574             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4575             break;
4576         }
4577     }
4578     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4579 
4580     mmap_unlock();
4581 
4582     return rv;
4583 }
4584 
4585 #ifdef TARGET_NR_ipc
4586 /* ??? This only works with linear mappings.  */
4587 /* do_ipc() must return target values and target errnos. */
4588 static abi_long do_ipc(CPUArchState *cpu_env,
4589                        unsigned int call, abi_long first,
4590                        abi_long second, abi_long third,
4591                        abi_long ptr, abi_long fifth)
4592 {
4593     int version;
4594     abi_long ret = 0;
4595 
4596     version = call >> 16;
4597     call &= 0xffff;
4598 
4599     switch (call) {
4600     case IPCOP_semop:
4601         ret = do_semtimedop(first, ptr, second, 0, false);
4602         break;
4603     case IPCOP_semtimedop:
4604     /*
4605      * The s390 sys_ipc variant has only five parameters instead of six
4606      * (as for default variant) and the only difference is the handling of
4607      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4608      * to a struct timespec where the generic variant uses fifth parameter.
4609      */
4610 #if defined(TARGET_S390X)
4611         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4612 #else
4613         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4614 #endif
4615         break;
4616 
4617     case IPCOP_semget:
4618         ret = get_errno(semget(first, second, third));
4619         break;
4620 
4621     case IPCOP_semctl: {
4622         /* The semun argument to semctl is passed by value, so dereference the
4623          * ptr argument. */
4624         abi_ulong atptr;
4625         get_user_ual(atptr, ptr);
4626         ret = do_semctl(first, second, third, atptr);
4627         break;
4628     }
4629 
4630     case IPCOP_msgget:
4631         ret = get_errno(msgget(first, second));
4632         break;
4633 
4634     case IPCOP_msgsnd:
4635         ret = do_msgsnd(first, ptr, second, third);
4636         break;
4637 
4638     case IPCOP_msgctl:
4639         ret = do_msgctl(first, second, ptr);
4640         break;
4641 
4642     case IPCOP_msgrcv:
4643         switch (version) {
4644         case 0:
4645             {
4646                 struct target_ipc_kludge {
4647                     abi_long msgp;
4648                     abi_long msgtyp;
4649                 } *tmp;
4650 
4651                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4652                     ret = -TARGET_EFAULT;
4653                     break;
4654                 }
4655 
4656                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4657 
4658                 unlock_user_struct(tmp, ptr, 0);
4659                 break;
4660             }
4661         default:
4662             ret = do_msgrcv(first, ptr, second, fifth, third);
4663         }
4664         break;
4665 
4666     case IPCOP_shmat:
4667         switch (version) {
4668         default:
4669         {
4670             abi_ulong raddr;
4671             raddr = do_shmat(cpu_env, first, ptr, second);
4672             if (is_error(raddr))
4673                 return get_errno(raddr);
4674             if (put_user_ual(raddr, third))
4675                 return -TARGET_EFAULT;
4676             break;
4677         }
4678         case 1:
4679             ret = -TARGET_EINVAL;
4680             break;
4681         }
4682 	break;
4683     case IPCOP_shmdt:
4684         ret = do_shmdt(ptr);
4685 	break;
4686 
4687     case IPCOP_shmget:
4688 	/* IPC_* flag values are the same on all linux platforms */
4689 	ret = get_errno(shmget(first, second, third));
4690 	break;
4691 
4692 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4693     case IPCOP_shmctl:
4694         ret = do_shmctl(first, second, ptr);
4695         break;
4696     default:
4697         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4698                       call, version);
4699 	ret = -TARGET_ENOSYS;
4700 	break;
4701     }
4702     return ret;
4703 }
4704 #endif
4705 
4706 /* kernel structure types definitions */
4707 
4708 #define STRUCT(name, ...) STRUCT_ ## name,
4709 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4710 enum {
4711 #include "syscall_types.h"
4712 STRUCT_MAX
4713 };
4714 #undef STRUCT
4715 #undef STRUCT_SPECIAL
4716 
4717 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4718 #define STRUCT_SPECIAL(name)
4719 #include "syscall_types.h"
4720 #undef STRUCT
4721 #undef STRUCT_SPECIAL
4722 
4723 #define MAX_STRUCT_SIZE 4096
4724 
4725 #ifdef CONFIG_FIEMAP
4726 /* So fiemap access checks don't overflow on 32 bit systems.
4727  * This is very slightly smaller than the limit imposed by
4728  * the underlying kernel.
4729  */
4730 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4731                             / sizeof(struct fiemap_extent))
4732 
4733 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4734                                        int fd, int cmd, abi_long arg)
4735 {
4736     /* The parameter for this ioctl is a struct fiemap followed
4737      * by an array of struct fiemap_extent whose size is set
4738      * in fiemap->fm_extent_count. The array is filled in by the
4739      * ioctl.
4740      */
4741     int target_size_in, target_size_out;
4742     struct fiemap *fm;
4743     const argtype *arg_type = ie->arg_type;
4744     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4745     void *argptr, *p;
4746     abi_long ret;
4747     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4748     uint32_t outbufsz;
4749     int free_fm = 0;
4750 
4751     assert(arg_type[0] == TYPE_PTR);
4752     assert(ie->access == IOC_RW);
4753     arg_type++;
4754     target_size_in = thunk_type_size(arg_type, 0);
4755     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4756     if (!argptr) {
4757         return -TARGET_EFAULT;
4758     }
4759     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4760     unlock_user(argptr, arg, 0);
4761     fm = (struct fiemap *)buf_temp;
4762     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4763         return -TARGET_EINVAL;
4764     }
4765 
4766     outbufsz = sizeof (*fm) +
4767         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4768 
4769     if (outbufsz > MAX_STRUCT_SIZE) {
4770         /* We can't fit all the extents into the fixed size buffer.
4771          * Allocate one that is large enough and use it instead.
4772          */
4773         fm = g_try_malloc(outbufsz);
4774         if (!fm) {
4775             return -TARGET_ENOMEM;
4776         }
4777         memcpy(fm, buf_temp, sizeof(struct fiemap));
4778         free_fm = 1;
4779     }
4780     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4781     if (!is_error(ret)) {
4782         target_size_out = target_size_in;
4783         /* An extent_count of 0 means we were only counting the extents
4784          * so there are no structs to copy
4785          */
4786         if (fm->fm_extent_count != 0) {
4787             target_size_out += fm->fm_mapped_extents * extent_size;
4788         }
4789         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4790         if (!argptr) {
4791             ret = -TARGET_EFAULT;
4792         } else {
4793             /* Convert the struct fiemap */
4794             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4795             if (fm->fm_extent_count != 0) {
4796                 p = argptr + target_size_in;
4797                 /* ...and then all the struct fiemap_extents */
4798                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4799                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4800                                   THUNK_TARGET);
4801                     p += extent_size;
4802                 }
4803             }
4804             unlock_user(argptr, arg, target_size_out);
4805         }
4806     }
4807     if (free_fm) {
4808         g_free(fm);
4809     }
4810     return ret;
4811 }
4812 #endif
4813 
4814 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4815                                 int fd, int cmd, abi_long arg)
4816 {
4817     const argtype *arg_type = ie->arg_type;
4818     int target_size;
4819     void *argptr;
4820     int ret;
4821     struct ifconf *host_ifconf;
4822     uint32_t outbufsz;
4823     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4824     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4825     int target_ifreq_size;
4826     int nb_ifreq;
4827     int free_buf = 0;
4828     int i;
4829     int target_ifc_len;
4830     abi_long target_ifc_buf;
4831     int host_ifc_len;
4832     char *host_ifc_buf;
4833 
4834     assert(arg_type[0] == TYPE_PTR);
4835     assert(ie->access == IOC_RW);
4836 
4837     arg_type++;
4838     target_size = thunk_type_size(arg_type, 0);
4839 
4840     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4841     if (!argptr)
4842         return -TARGET_EFAULT;
4843     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4844     unlock_user(argptr, arg, 0);
4845 
4846     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4847     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4848     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4849 
4850     if (target_ifc_buf != 0) {
4851         target_ifc_len = host_ifconf->ifc_len;
4852         nb_ifreq = target_ifc_len / target_ifreq_size;
4853         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4854 
4855         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4856         if (outbufsz > MAX_STRUCT_SIZE) {
4857             /*
4858              * We can't fit all the extents into the fixed size buffer.
4859              * Allocate one that is large enough and use it instead.
4860              */
4861             host_ifconf = malloc(outbufsz);
4862             if (!host_ifconf) {
4863                 return -TARGET_ENOMEM;
4864             }
4865             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4866             free_buf = 1;
4867         }
4868         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4869 
4870         host_ifconf->ifc_len = host_ifc_len;
4871     } else {
4872       host_ifc_buf = NULL;
4873     }
4874     host_ifconf->ifc_buf = host_ifc_buf;
4875 
4876     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4877     if (!is_error(ret)) {
4878 	/* convert host ifc_len to target ifc_len */
4879 
4880         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4881         target_ifc_len = nb_ifreq * target_ifreq_size;
4882         host_ifconf->ifc_len = target_ifc_len;
4883 
4884 	/* restore target ifc_buf */
4885 
4886         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4887 
4888 	/* copy struct ifconf to target user */
4889 
4890         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4891         if (!argptr)
4892             return -TARGET_EFAULT;
4893         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4894         unlock_user(argptr, arg, target_size);
4895 
4896         if (target_ifc_buf != 0) {
4897             /* copy ifreq[] to target user */
4898             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4899             for (i = 0; i < nb_ifreq ; i++) {
4900                 thunk_convert(argptr + i * target_ifreq_size,
4901                               host_ifc_buf + i * sizeof(struct ifreq),
4902                               ifreq_arg_type, THUNK_TARGET);
4903             }
4904             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4905         }
4906     }
4907 
4908     if (free_buf) {
4909         free(host_ifconf);
4910     }
4911 
4912     return ret;
4913 }
4914 
4915 #if defined(CONFIG_USBFS)
4916 #if HOST_LONG_BITS > 64
4917 #error USBDEVFS thunks do not support >64 bit hosts yet.
4918 #endif
4919 struct live_urb {
4920     uint64_t target_urb_adr;
4921     uint64_t target_buf_adr;
4922     char *target_buf_ptr;
4923     struct usbdevfs_urb host_urb;
4924 };
4925 
4926 static GHashTable *usbdevfs_urb_hashtable(void)
4927 {
4928     static GHashTable *urb_hashtable;
4929 
4930     if (!urb_hashtable) {
4931         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4932     }
4933     return urb_hashtable;
4934 }
4935 
4936 static void urb_hashtable_insert(struct live_urb *urb)
4937 {
4938     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4939     g_hash_table_insert(urb_hashtable, urb, urb);
4940 }
4941 
4942 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4943 {
4944     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4945     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4946 }
4947 
4948 static void urb_hashtable_remove(struct live_urb *urb)
4949 {
4950     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4951     g_hash_table_remove(urb_hashtable, urb);
4952 }
4953 
4954 static abi_long
4955 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4956                           int fd, int cmd, abi_long arg)
4957 {
4958     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4959     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4960     struct live_urb *lurb;
4961     void *argptr;
4962     uint64_t hurb;
4963     int target_size;
4964     uintptr_t target_urb_adr;
4965     abi_long ret;
4966 
4967     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4968 
4969     memset(buf_temp, 0, sizeof(uint64_t));
4970     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4971     if (is_error(ret)) {
4972         return ret;
4973     }
4974 
4975     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4976     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4977     if (!lurb->target_urb_adr) {
4978         return -TARGET_EFAULT;
4979     }
4980     urb_hashtable_remove(lurb);
4981     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4982         lurb->host_urb.buffer_length);
4983     lurb->target_buf_ptr = NULL;
4984 
4985     /* restore the guest buffer pointer */
4986     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4987 
4988     /* update the guest urb struct */
4989     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4990     if (!argptr) {
4991         g_free(lurb);
4992         return -TARGET_EFAULT;
4993     }
4994     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4995     unlock_user(argptr, lurb->target_urb_adr, target_size);
4996 
4997     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4998     /* write back the urb handle */
4999     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5000     if (!argptr) {
5001         g_free(lurb);
5002         return -TARGET_EFAULT;
5003     }
5004 
5005     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5006     target_urb_adr = lurb->target_urb_adr;
5007     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5008     unlock_user(argptr, arg, target_size);
5009 
5010     g_free(lurb);
5011     return ret;
5012 }
5013 
5014 static abi_long
5015 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5016                              uint8_t *buf_temp __attribute__((unused)),
5017                              int fd, int cmd, abi_long arg)
5018 {
5019     struct live_urb *lurb;
5020 
5021     /* map target address back to host URB with metadata. */
5022     lurb = urb_hashtable_lookup(arg);
5023     if (!lurb) {
5024         return -TARGET_EFAULT;
5025     }
5026     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5027 }
5028 
5029 static abi_long
5030 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5031                             int fd, int cmd, abi_long arg)
5032 {
5033     const argtype *arg_type = ie->arg_type;
5034     int target_size;
5035     abi_long ret;
5036     void *argptr;
5037     int rw_dir;
5038     struct live_urb *lurb;
5039 
5040     /*
5041      * each submitted URB needs to map to a unique ID for the
5042      * kernel, and that unique ID needs to be a pointer to
5043      * host memory.  hence, we need to malloc for each URB.
5044      * isochronous transfers have a variable length struct.
5045      */
5046     arg_type++;
5047     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5048 
5049     /* construct host copy of urb and metadata */
5050     lurb = g_try_malloc0(sizeof(struct live_urb));
5051     if (!lurb) {
5052         return -TARGET_ENOMEM;
5053     }
5054 
5055     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5056     if (!argptr) {
5057         g_free(lurb);
5058         return -TARGET_EFAULT;
5059     }
5060     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5061     unlock_user(argptr, arg, 0);
5062 
5063     lurb->target_urb_adr = arg;
5064     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5065 
5066     /* buffer space used depends on endpoint type so lock the entire buffer */
5067     /* control type urbs should check the buffer contents for true direction */
5068     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5069     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5070         lurb->host_urb.buffer_length, 1);
5071     if (lurb->target_buf_ptr == NULL) {
5072         g_free(lurb);
5073         return -TARGET_EFAULT;
5074     }
5075 
5076     /* update buffer pointer in host copy */
5077     lurb->host_urb.buffer = lurb->target_buf_ptr;
5078 
5079     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5080     if (is_error(ret)) {
5081         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5082         g_free(lurb);
5083     } else {
5084         urb_hashtable_insert(lurb);
5085     }
5086 
5087     return ret;
5088 }
5089 #endif /* CONFIG_USBFS */
5090 
5091 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5092                             int cmd, abi_long arg)
5093 {
5094     void *argptr;
5095     struct dm_ioctl *host_dm;
5096     abi_long guest_data;
5097     uint32_t guest_data_size;
5098     int target_size;
5099     const argtype *arg_type = ie->arg_type;
5100     abi_long ret;
5101     void *big_buf = NULL;
5102     char *host_data;
5103 
5104     arg_type++;
5105     target_size = thunk_type_size(arg_type, 0);
5106     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5107     if (!argptr) {
5108         ret = -TARGET_EFAULT;
5109         goto out;
5110     }
5111     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5112     unlock_user(argptr, arg, 0);
5113 
5114     /* buf_temp is too small, so fetch things into a bigger buffer */
5115     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5116     memcpy(big_buf, buf_temp, target_size);
5117     buf_temp = big_buf;
5118     host_dm = big_buf;
5119 
5120     guest_data = arg + host_dm->data_start;
5121     if ((guest_data - arg) < 0) {
5122         ret = -TARGET_EINVAL;
5123         goto out;
5124     }
5125     guest_data_size = host_dm->data_size - host_dm->data_start;
5126     host_data = (char*)host_dm + host_dm->data_start;
5127 
5128     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5129     if (!argptr) {
5130         ret = -TARGET_EFAULT;
5131         goto out;
5132     }
5133 
5134     switch (ie->host_cmd) {
5135     case DM_REMOVE_ALL:
5136     case DM_LIST_DEVICES:
5137     case DM_DEV_CREATE:
5138     case DM_DEV_REMOVE:
5139     case DM_DEV_SUSPEND:
5140     case DM_DEV_STATUS:
5141     case DM_DEV_WAIT:
5142     case DM_TABLE_STATUS:
5143     case DM_TABLE_CLEAR:
5144     case DM_TABLE_DEPS:
5145     case DM_LIST_VERSIONS:
5146         /* no input data */
5147         break;
5148     case DM_DEV_RENAME:
5149     case DM_DEV_SET_GEOMETRY:
5150         /* data contains only strings */
5151         memcpy(host_data, argptr, guest_data_size);
5152         break;
5153     case DM_TARGET_MSG:
5154         memcpy(host_data, argptr, guest_data_size);
5155         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5156         break;
5157     case DM_TABLE_LOAD:
5158     {
5159         void *gspec = argptr;
5160         void *cur_data = host_data;
5161         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5162         int spec_size = thunk_type_size(arg_type, 0);
5163         int i;
5164 
5165         for (i = 0; i < host_dm->target_count; i++) {
5166             struct dm_target_spec *spec = cur_data;
5167             uint32_t next;
5168             int slen;
5169 
5170             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5171             slen = strlen((char*)gspec + spec_size) + 1;
5172             next = spec->next;
5173             spec->next = sizeof(*spec) + slen;
5174             strcpy((char*)&spec[1], gspec + spec_size);
5175             gspec += next;
5176             cur_data += spec->next;
5177         }
5178         break;
5179     }
5180     default:
5181         ret = -TARGET_EINVAL;
5182         unlock_user(argptr, guest_data, 0);
5183         goto out;
5184     }
5185     unlock_user(argptr, guest_data, 0);
5186 
5187     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5188     if (!is_error(ret)) {
5189         guest_data = arg + host_dm->data_start;
5190         guest_data_size = host_dm->data_size - host_dm->data_start;
5191         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5192         switch (ie->host_cmd) {
5193         case DM_REMOVE_ALL:
5194         case DM_DEV_CREATE:
5195         case DM_DEV_REMOVE:
5196         case DM_DEV_RENAME:
5197         case DM_DEV_SUSPEND:
5198         case DM_DEV_STATUS:
5199         case DM_TABLE_LOAD:
5200         case DM_TABLE_CLEAR:
5201         case DM_TARGET_MSG:
5202         case DM_DEV_SET_GEOMETRY:
5203             /* no return data */
5204             break;
5205         case DM_LIST_DEVICES:
5206         {
5207             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5208             uint32_t remaining_data = guest_data_size;
5209             void *cur_data = argptr;
5210             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5211             int nl_size = 12; /* can't use thunk_size due to alignment */
5212 
5213             while (1) {
5214                 uint32_t next = nl->next;
5215                 if (next) {
5216                     nl->next = nl_size + (strlen(nl->name) + 1);
5217                 }
5218                 if (remaining_data < nl->next) {
5219                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5220                     break;
5221                 }
5222                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5223                 strcpy(cur_data + nl_size, nl->name);
5224                 cur_data += nl->next;
5225                 remaining_data -= nl->next;
5226                 if (!next) {
5227                     break;
5228                 }
5229                 nl = (void*)nl + next;
5230             }
5231             break;
5232         }
5233         case DM_DEV_WAIT:
5234         case DM_TABLE_STATUS:
5235         {
5236             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5237             void *cur_data = argptr;
5238             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5239             int spec_size = thunk_type_size(arg_type, 0);
5240             int i;
5241 
5242             for (i = 0; i < host_dm->target_count; i++) {
5243                 uint32_t next = spec->next;
5244                 int slen = strlen((char*)&spec[1]) + 1;
5245                 spec->next = (cur_data - argptr) + spec_size + slen;
5246                 if (guest_data_size < spec->next) {
5247                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5248                     break;
5249                 }
5250                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5251                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5252                 cur_data = argptr + spec->next;
5253                 spec = (void*)host_dm + host_dm->data_start + next;
5254             }
5255             break;
5256         }
5257         case DM_TABLE_DEPS:
5258         {
5259             void *hdata = (void*)host_dm + host_dm->data_start;
5260             int count = *(uint32_t*)hdata;
5261             uint64_t *hdev = hdata + 8;
5262             uint64_t *gdev = argptr + 8;
5263             int i;
5264 
5265             *(uint32_t*)argptr = tswap32(count);
5266             for (i = 0; i < count; i++) {
5267                 *gdev = tswap64(*hdev);
5268                 gdev++;
5269                 hdev++;
5270             }
5271             break;
5272         }
5273         case DM_LIST_VERSIONS:
5274         {
5275             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5276             uint32_t remaining_data = guest_data_size;
5277             void *cur_data = argptr;
5278             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5279             int vers_size = thunk_type_size(arg_type, 0);
5280 
5281             while (1) {
5282                 uint32_t next = vers->next;
5283                 if (next) {
5284                     vers->next = vers_size + (strlen(vers->name) + 1);
5285                 }
5286                 if (remaining_data < vers->next) {
5287                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5288                     break;
5289                 }
5290                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5291                 strcpy(cur_data + vers_size, vers->name);
5292                 cur_data += vers->next;
5293                 remaining_data -= vers->next;
5294                 if (!next) {
5295                     break;
5296                 }
5297                 vers = (void*)vers + next;
5298             }
5299             break;
5300         }
5301         default:
5302             unlock_user(argptr, guest_data, 0);
5303             ret = -TARGET_EINVAL;
5304             goto out;
5305         }
5306         unlock_user(argptr, guest_data, guest_data_size);
5307 
5308         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5309         if (!argptr) {
5310             ret = -TARGET_EFAULT;
5311             goto out;
5312         }
5313         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5314         unlock_user(argptr, arg, target_size);
5315     }
5316 out:
5317     g_free(big_buf);
5318     return ret;
5319 }
5320 
5321 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5322                                int cmd, abi_long arg)
5323 {
5324     void *argptr;
5325     int target_size;
5326     const argtype *arg_type = ie->arg_type;
5327     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5328     abi_long ret;
5329 
5330     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5331     struct blkpg_partition host_part;
5332 
5333     /* Read and convert blkpg */
5334     arg_type++;
5335     target_size = thunk_type_size(arg_type, 0);
5336     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5337     if (!argptr) {
5338         ret = -TARGET_EFAULT;
5339         goto out;
5340     }
5341     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5342     unlock_user(argptr, arg, 0);
5343 
5344     switch (host_blkpg->op) {
5345     case BLKPG_ADD_PARTITION:
5346     case BLKPG_DEL_PARTITION:
5347         /* payload is struct blkpg_partition */
5348         break;
5349     default:
5350         /* Unknown opcode */
5351         ret = -TARGET_EINVAL;
5352         goto out;
5353     }
5354 
5355     /* Read and convert blkpg->data */
5356     arg = (abi_long)(uintptr_t)host_blkpg->data;
5357     target_size = thunk_type_size(part_arg_type, 0);
5358     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5359     if (!argptr) {
5360         ret = -TARGET_EFAULT;
5361         goto out;
5362     }
5363     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5364     unlock_user(argptr, arg, 0);
5365 
5366     /* Swizzle the data pointer to our local copy and call! */
5367     host_blkpg->data = &host_part;
5368     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5369 
5370 out:
5371     return ret;
5372 }
5373 
5374 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5375                                 int fd, int cmd, abi_long arg)
5376 {
5377     const argtype *arg_type = ie->arg_type;
5378     const StructEntry *se;
5379     const argtype *field_types;
5380     const int *dst_offsets, *src_offsets;
5381     int target_size;
5382     void *argptr;
5383     abi_ulong *target_rt_dev_ptr = NULL;
5384     unsigned long *host_rt_dev_ptr = NULL;
5385     abi_long ret;
5386     int i;
5387 
5388     assert(ie->access == IOC_W);
5389     assert(*arg_type == TYPE_PTR);
5390     arg_type++;
5391     assert(*arg_type == TYPE_STRUCT);
5392     target_size = thunk_type_size(arg_type, 0);
5393     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5394     if (!argptr) {
5395         return -TARGET_EFAULT;
5396     }
5397     arg_type++;
5398     assert(*arg_type == (int)STRUCT_rtentry);
5399     se = struct_entries + *arg_type++;
5400     assert(se->convert[0] == NULL);
5401     /* convert struct here to be able to catch rt_dev string */
5402     field_types = se->field_types;
5403     dst_offsets = se->field_offsets[THUNK_HOST];
5404     src_offsets = se->field_offsets[THUNK_TARGET];
5405     for (i = 0; i < se->nb_fields; i++) {
5406         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5407             assert(*field_types == TYPE_PTRVOID);
5408             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5409             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5410             if (*target_rt_dev_ptr != 0) {
5411                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5412                                                   tswapal(*target_rt_dev_ptr));
5413                 if (!*host_rt_dev_ptr) {
5414                     unlock_user(argptr, arg, 0);
5415                     return -TARGET_EFAULT;
5416                 }
5417             } else {
5418                 *host_rt_dev_ptr = 0;
5419             }
5420             field_types++;
5421             continue;
5422         }
5423         field_types = thunk_convert(buf_temp + dst_offsets[i],
5424                                     argptr + src_offsets[i],
5425                                     field_types, THUNK_HOST);
5426     }
5427     unlock_user(argptr, arg, 0);
5428 
5429     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5430 
5431     assert(host_rt_dev_ptr != NULL);
5432     assert(target_rt_dev_ptr != NULL);
5433     if (*host_rt_dev_ptr != 0) {
5434         unlock_user((void *)*host_rt_dev_ptr,
5435                     *target_rt_dev_ptr, 0);
5436     }
5437     return ret;
5438 }
5439 
5440 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5441                                      int fd, int cmd, abi_long arg)
5442 {
5443     int sig = target_to_host_signal(arg);
5444     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5445 }
5446 
5447 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5448                                     int fd, int cmd, abi_long arg)
5449 {
5450     struct timeval tv;
5451     abi_long ret;
5452 
5453     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5454     if (is_error(ret)) {
5455         return ret;
5456     }
5457 
5458     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5459         if (copy_to_user_timeval(arg, &tv)) {
5460             return -TARGET_EFAULT;
5461         }
5462     } else {
5463         if (copy_to_user_timeval64(arg, &tv)) {
5464             return -TARGET_EFAULT;
5465         }
5466     }
5467 
5468     return ret;
5469 }
5470 
5471 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5472                                       int fd, int cmd, abi_long arg)
5473 {
5474     struct timespec ts;
5475     abi_long ret;
5476 
5477     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5478     if (is_error(ret)) {
5479         return ret;
5480     }
5481 
5482     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5483         if (host_to_target_timespec(arg, &ts)) {
5484             return -TARGET_EFAULT;
5485         }
5486     } else{
5487         if (host_to_target_timespec64(arg, &ts)) {
5488             return -TARGET_EFAULT;
5489         }
5490     }
5491 
5492     return ret;
5493 }
5494 
5495 #ifdef TIOCGPTPEER
5496 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5497                                      int fd, int cmd, abi_long arg)
5498 {
5499     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5500     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5501 }
5502 #endif
5503 
5504 #ifdef HAVE_DRM_H
5505 
5506 static void unlock_drm_version(struct drm_version *host_ver,
5507                                struct target_drm_version *target_ver,
5508                                bool copy)
5509 {
5510     unlock_user(host_ver->name, target_ver->name,
5511                                 copy ? host_ver->name_len : 0);
5512     unlock_user(host_ver->date, target_ver->date,
5513                                 copy ? host_ver->date_len : 0);
5514     unlock_user(host_ver->desc, target_ver->desc,
5515                                 copy ? host_ver->desc_len : 0);
5516 }
5517 
5518 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5519                                           struct target_drm_version *target_ver)
5520 {
5521     memset(host_ver, 0, sizeof(*host_ver));
5522 
5523     __get_user(host_ver->name_len, &target_ver->name_len);
5524     if (host_ver->name_len) {
5525         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5526                                    target_ver->name_len, 0);
5527         if (!host_ver->name) {
5528             return -EFAULT;
5529         }
5530     }
5531 
5532     __get_user(host_ver->date_len, &target_ver->date_len);
5533     if (host_ver->date_len) {
5534         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5535                                    target_ver->date_len, 0);
5536         if (!host_ver->date) {
5537             goto err;
5538         }
5539     }
5540 
5541     __get_user(host_ver->desc_len, &target_ver->desc_len);
5542     if (host_ver->desc_len) {
5543         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5544                                    target_ver->desc_len, 0);
5545         if (!host_ver->desc) {
5546             goto err;
5547         }
5548     }
5549 
5550     return 0;
5551 err:
5552     unlock_drm_version(host_ver, target_ver, false);
5553     return -EFAULT;
5554 }
5555 
5556 static inline void host_to_target_drmversion(
5557                                           struct target_drm_version *target_ver,
5558                                           struct drm_version *host_ver)
5559 {
5560     __put_user(host_ver->version_major, &target_ver->version_major);
5561     __put_user(host_ver->version_minor, &target_ver->version_minor);
5562     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5563     __put_user(host_ver->name_len, &target_ver->name_len);
5564     __put_user(host_ver->date_len, &target_ver->date_len);
5565     __put_user(host_ver->desc_len, &target_ver->desc_len);
5566     unlock_drm_version(host_ver, target_ver, true);
5567 }
5568 
5569 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5570                              int fd, int cmd, abi_long arg)
5571 {
5572     struct drm_version *ver;
5573     struct target_drm_version *target_ver;
5574     abi_long ret;
5575 
5576     switch (ie->host_cmd) {
5577     case DRM_IOCTL_VERSION:
5578         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5579             return -TARGET_EFAULT;
5580         }
5581         ver = (struct drm_version *)buf_temp;
5582         ret = target_to_host_drmversion(ver, target_ver);
5583         if (!is_error(ret)) {
5584             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5585             if (is_error(ret)) {
5586                 unlock_drm_version(ver, target_ver, false);
5587             } else {
5588                 host_to_target_drmversion(target_ver, ver);
5589             }
5590         }
5591         unlock_user_struct(target_ver, arg, 0);
5592         return ret;
5593     }
5594     return -TARGET_ENOSYS;
5595 }
5596 
5597 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5598                                            struct drm_i915_getparam *gparam,
5599                                            int fd, abi_long arg)
5600 {
5601     abi_long ret;
5602     int value;
5603     struct target_drm_i915_getparam *target_gparam;
5604 
5605     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5606         return -TARGET_EFAULT;
5607     }
5608 
5609     __get_user(gparam->param, &target_gparam->param);
5610     gparam->value = &value;
5611     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5612     put_user_s32(value, target_gparam->value);
5613 
5614     unlock_user_struct(target_gparam, arg, 0);
5615     return ret;
5616 }
5617 
5618 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5619                                   int fd, int cmd, abi_long arg)
5620 {
5621     switch (ie->host_cmd) {
5622     case DRM_IOCTL_I915_GETPARAM:
5623         return do_ioctl_drm_i915_getparam(ie,
5624                                           (struct drm_i915_getparam *)buf_temp,
5625                                           fd, arg);
5626     default:
5627         return -TARGET_ENOSYS;
5628     }
5629 }
5630 
5631 #endif
5632 
5633 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5634                                         int fd, int cmd, abi_long arg)
5635 {
5636     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5637     struct tun_filter *target_filter;
5638     char *target_addr;
5639 
5640     assert(ie->access == IOC_W);
5641 
5642     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5643     if (!target_filter) {
5644         return -TARGET_EFAULT;
5645     }
5646     filter->flags = tswap16(target_filter->flags);
5647     filter->count = tswap16(target_filter->count);
5648     unlock_user(target_filter, arg, 0);
5649 
5650     if (filter->count) {
5651         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5652             MAX_STRUCT_SIZE) {
5653             return -TARGET_EFAULT;
5654         }
5655 
5656         target_addr = lock_user(VERIFY_READ,
5657                                 arg + offsetof(struct tun_filter, addr),
5658                                 filter->count * ETH_ALEN, 1);
5659         if (!target_addr) {
5660             return -TARGET_EFAULT;
5661         }
5662         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5663         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5664     }
5665 
5666     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5667 }
5668 
5669 IOCTLEntry ioctl_entries[] = {
5670 #define IOCTL(cmd, access, ...) \
5671     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5672 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5673     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5674 #define IOCTL_IGNORE(cmd) \
5675     { TARGET_ ## cmd, 0, #cmd },
5676 #include "ioctls.h"
5677     { 0, 0, },
5678 };
5679 
5680 /* ??? Implement proper locking for ioctls.  */
5681 /* do_ioctl() Must return target values and target errnos. */
5682 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5683 {
5684     const IOCTLEntry *ie;
5685     const argtype *arg_type;
5686     abi_long ret;
5687     uint8_t buf_temp[MAX_STRUCT_SIZE];
5688     int target_size;
5689     void *argptr;
5690 
5691     ie = ioctl_entries;
5692     for(;;) {
5693         if (ie->target_cmd == 0) {
5694             qemu_log_mask(
5695                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5696             return -TARGET_ENOSYS;
5697         }
5698         if (ie->target_cmd == cmd)
5699             break;
5700         ie++;
5701     }
5702     arg_type = ie->arg_type;
5703     if (ie->do_ioctl) {
5704         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5705     } else if (!ie->host_cmd) {
5706         /* Some architectures define BSD ioctls in their headers
5707            that are not implemented in Linux.  */
5708         return -TARGET_ENOSYS;
5709     }
5710 
5711     switch(arg_type[0]) {
5712     case TYPE_NULL:
5713         /* no argument */
5714         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5715         break;
5716     case TYPE_PTRVOID:
5717     case TYPE_INT:
5718     case TYPE_LONG:
5719     case TYPE_ULONG:
5720         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5721         break;
5722     case TYPE_PTR:
5723         arg_type++;
5724         target_size = thunk_type_size(arg_type, 0);
5725         switch(ie->access) {
5726         case IOC_R:
5727             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5728             if (!is_error(ret)) {
5729                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5730                 if (!argptr)
5731                     return -TARGET_EFAULT;
5732                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5733                 unlock_user(argptr, arg, target_size);
5734             }
5735             break;
5736         case IOC_W:
5737             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5738             if (!argptr)
5739                 return -TARGET_EFAULT;
5740             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5741             unlock_user(argptr, arg, 0);
5742             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5743             break;
5744         default:
5745         case IOC_RW:
5746             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5747             if (!argptr)
5748                 return -TARGET_EFAULT;
5749             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5750             unlock_user(argptr, arg, 0);
5751             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5752             if (!is_error(ret)) {
5753                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5754                 if (!argptr)
5755                     return -TARGET_EFAULT;
5756                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5757                 unlock_user(argptr, arg, target_size);
5758             }
5759             break;
5760         }
5761         break;
5762     default:
5763         qemu_log_mask(LOG_UNIMP,
5764                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5765                       (long)cmd, arg_type[0]);
5766         ret = -TARGET_ENOSYS;
5767         break;
5768     }
5769     return ret;
5770 }
5771 
5772 static const bitmask_transtbl iflag_tbl[] = {
5773         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5774         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5775         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5776         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5777         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5778         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5779         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5780         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5781         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5782         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5783         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5784         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5785         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5786         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5787         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5788         { 0, 0, 0, 0 }
5789 };
5790 
5791 static const bitmask_transtbl oflag_tbl[] = {
5792 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5793 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5794 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5795 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5796 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5797 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5798 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5799 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5800 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5801 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5802 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5803 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5804 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5805 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5806 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5807 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5808 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5809 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5810 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5811 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5812 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5813 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5814 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5815 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5816 	{ 0, 0, 0, 0 }
5817 };
5818 
5819 static const bitmask_transtbl cflag_tbl[] = {
5820 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5821 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5822 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5823 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5824 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5825 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5826 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5827 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5828 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5829 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5830 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5831 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5832 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5833 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5834 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5835 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5836 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5837 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5838 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5839 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5840 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5841 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5842 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5843 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5844 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5845 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5846 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5847 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5848 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5849 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5850 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5851 	{ 0, 0, 0, 0 }
5852 };
5853 
5854 static const bitmask_transtbl lflag_tbl[] = {
5855   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5856   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5857   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5858   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5859   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5860   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5861   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5862   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5863   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5864   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5865   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5866   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5867   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5868   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5869   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5870   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5871   { 0, 0, 0, 0 }
5872 };
5873 
5874 static void target_to_host_termios (void *dst, const void *src)
5875 {
5876     struct host_termios *host = dst;
5877     const struct target_termios *target = src;
5878 
5879     host->c_iflag =
5880         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5881     host->c_oflag =
5882         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5883     host->c_cflag =
5884         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5885     host->c_lflag =
5886         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5887     host->c_line = target->c_line;
5888 
5889     memset(host->c_cc, 0, sizeof(host->c_cc));
5890     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5891     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5892     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5893     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5894     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5895     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5896     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5897     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5898     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5899     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5900     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5901     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5902     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5903     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5904     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5905     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5906     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5907 }
5908 
5909 static void host_to_target_termios (void *dst, const void *src)
5910 {
5911     struct target_termios *target = dst;
5912     const struct host_termios *host = src;
5913 
5914     target->c_iflag =
5915         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5916     target->c_oflag =
5917         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5918     target->c_cflag =
5919         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5920     target->c_lflag =
5921         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5922     target->c_line = host->c_line;
5923 
5924     memset(target->c_cc, 0, sizeof(target->c_cc));
5925     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5926     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5927     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5928     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5929     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5930     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5931     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5932     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5933     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5934     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5935     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5936     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5937     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5938     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5939     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5940     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5941     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5942 }
5943 
5944 static const StructEntry struct_termios_def = {
5945     .convert = { host_to_target_termios, target_to_host_termios },
5946     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5947     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5948     .print = print_termios,
5949 };
5950 
5951 static const bitmask_transtbl mmap_flags_tbl[] = {
5952     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5953     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5954     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5955     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5956       MAP_ANONYMOUS, MAP_ANONYMOUS },
5957     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5958       MAP_GROWSDOWN, MAP_GROWSDOWN },
5959     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5960       MAP_DENYWRITE, MAP_DENYWRITE },
5961     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5962       MAP_EXECUTABLE, MAP_EXECUTABLE },
5963     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5964     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5965       MAP_NORESERVE, MAP_NORESERVE },
5966     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5967     /* MAP_STACK had been ignored by the kernel for quite some time.
5968        Recognize it for the target insofar as we do not want to pass
5969        it through to the host.  */
5970     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5971     { 0, 0, 0, 0 }
5972 };
5973 
5974 /*
5975  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5976  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5977  */
5978 #if defined(TARGET_I386)
5979 
5980 /* NOTE: there is really one LDT for all the threads */
5981 static uint8_t *ldt_table;
5982 
5983 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5984 {
5985     int size;
5986     void *p;
5987 
5988     if (!ldt_table)
5989         return 0;
5990     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5991     if (size > bytecount)
5992         size = bytecount;
5993     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5994     if (!p)
5995         return -TARGET_EFAULT;
5996     /* ??? Should this by byteswapped?  */
5997     memcpy(p, ldt_table, size);
5998     unlock_user(p, ptr, size);
5999     return size;
6000 }
6001 
6002 /* XXX: add locking support */
6003 static abi_long write_ldt(CPUX86State *env,
6004                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6005 {
6006     struct target_modify_ldt_ldt_s ldt_info;
6007     struct target_modify_ldt_ldt_s *target_ldt_info;
6008     int seg_32bit, contents, read_exec_only, limit_in_pages;
6009     int seg_not_present, useable, lm;
6010     uint32_t *lp, entry_1, entry_2;
6011 
6012     if (bytecount != sizeof(ldt_info))
6013         return -TARGET_EINVAL;
6014     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6015         return -TARGET_EFAULT;
6016     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6017     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6018     ldt_info.limit = tswap32(target_ldt_info->limit);
6019     ldt_info.flags = tswap32(target_ldt_info->flags);
6020     unlock_user_struct(target_ldt_info, ptr, 0);
6021 
6022     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6023         return -TARGET_EINVAL;
6024     seg_32bit = ldt_info.flags & 1;
6025     contents = (ldt_info.flags >> 1) & 3;
6026     read_exec_only = (ldt_info.flags >> 3) & 1;
6027     limit_in_pages = (ldt_info.flags >> 4) & 1;
6028     seg_not_present = (ldt_info.flags >> 5) & 1;
6029     useable = (ldt_info.flags >> 6) & 1;
6030 #ifdef TARGET_ABI32
6031     lm = 0;
6032 #else
6033     lm = (ldt_info.flags >> 7) & 1;
6034 #endif
6035     if (contents == 3) {
6036         if (oldmode)
6037             return -TARGET_EINVAL;
6038         if (seg_not_present == 0)
6039             return -TARGET_EINVAL;
6040     }
6041     /* allocate the LDT */
6042     if (!ldt_table) {
6043         env->ldt.base = target_mmap(0,
6044                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6045                                     PROT_READ|PROT_WRITE,
6046                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6047         if (env->ldt.base == -1)
6048             return -TARGET_ENOMEM;
6049         memset(g2h_untagged(env->ldt.base), 0,
6050                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6051         env->ldt.limit = 0xffff;
6052         ldt_table = g2h_untagged(env->ldt.base);
6053     }
6054 
6055     /* NOTE: same code as Linux kernel */
6056     /* Allow LDTs to be cleared by the user. */
6057     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6058         if (oldmode ||
6059             (contents == 0		&&
6060              read_exec_only == 1	&&
6061              seg_32bit == 0		&&
6062              limit_in_pages == 0	&&
6063              seg_not_present == 1	&&
6064              useable == 0 )) {
6065             entry_1 = 0;
6066             entry_2 = 0;
6067             goto install;
6068         }
6069     }
6070 
6071     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6072         (ldt_info.limit & 0x0ffff);
6073     entry_2 = (ldt_info.base_addr & 0xff000000) |
6074         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6075         (ldt_info.limit & 0xf0000) |
6076         ((read_exec_only ^ 1) << 9) |
6077         (contents << 10) |
6078         ((seg_not_present ^ 1) << 15) |
6079         (seg_32bit << 22) |
6080         (limit_in_pages << 23) |
6081         (lm << 21) |
6082         0x7000;
6083     if (!oldmode)
6084         entry_2 |= (useable << 20);
6085 
6086     /* Install the new entry ...  */
6087 install:
6088     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6089     lp[0] = tswap32(entry_1);
6090     lp[1] = tswap32(entry_2);
6091     return 0;
6092 }
6093 
6094 /* specific and weird i386 syscalls */
6095 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6096                               unsigned long bytecount)
6097 {
6098     abi_long ret;
6099 
6100     switch (func) {
6101     case 0:
6102         ret = read_ldt(ptr, bytecount);
6103         break;
6104     case 1:
6105         ret = write_ldt(env, ptr, bytecount, 1);
6106         break;
6107     case 0x11:
6108         ret = write_ldt(env, ptr, bytecount, 0);
6109         break;
6110     default:
6111         ret = -TARGET_ENOSYS;
6112         break;
6113     }
6114     return ret;
6115 }
6116 
6117 #if defined(TARGET_ABI32)
6118 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6119 {
6120     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6121     struct target_modify_ldt_ldt_s ldt_info;
6122     struct target_modify_ldt_ldt_s *target_ldt_info;
6123     int seg_32bit, contents, read_exec_only, limit_in_pages;
6124     int seg_not_present, useable, lm;
6125     uint32_t *lp, entry_1, entry_2;
6126     int i;
6127 
6128     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6129     if (!target_ldt_info)
6130         return -TARGET_EFAULT;
6131     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6132     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6133     ldt_info.limit = tswap32(target_ldt_info->limit);
6134     ldt_info.flags = tswap32(target_ldt_info->flags);
6135     if (ldt_info.entry_number == -1) {
6136         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6137             if (gdt_table[i] == 0) {
6138                 ldt_info.entry_number = i;
6139                 target_ldt_info->entry_number = tswap32(i);
6140                 break;
6141             }
6142         }
6143     }
6144     unlock_user_struct(target_ldt_info, ptr, 1);
6145 
6146     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6147         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6148            return -TARGET_EINVAL;
6149     seg_32bit = ldt_info.flags & 1;
6150     contents = (ldt_info.flags >> 1) & 3;
6151     read_exec_only = (ldt_info.flags >> 3) & 1;
6152     limit_in_pages = (ldt_info.flags >> 4) & 1;
6153     seg_not_present = (ldt_info.flags >> 5) & 1;
6154     useable = (ldt_info.flags >> 6) & 1;
6155 #ifdef TARGET_ABI32
6156     lm = 0;
6157 #else
6158     lm = (ldt_info.flags >> 7) & 1;
6159 #endif
6160 
6161     if (contents == 3) {
6162         if (seg_not_present == 0)
6163             return -TARGET_EINVAL;
6164     }
6165 
6166     /* NOTE: same code as Linux kernel */
6167     /* Allow LDTs to be cleared by the user. */
6168     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6169         if ((contents == 0             &&
6170              read_exec_only == 1       &&
6171              seg_32bit == 0            &&
6172              limit_in_pages == 0       &&
6173              seg_not_present == 1      &&
6174              useable == 0 )) {
6175             entry_1 = 0;
6176             entry_2 = 0;
6177             goto install;
6178         }
6179     }
6180 
6181     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6182         (ldt_info.limit & 0x0ffff);
6183     entry_2 = (ldt_info.base_addr & 0xff000000) |
6184         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6185         (ldt_info.limit & 0xf0000) |
6186         ((read_exec_only ^ 1) << 9) |
6187         (contents << 10) |
6188         ((seg_not_present ^ 1) << 15) |
6189         (seg_32bit << 22) |
6190         (limit_in_pages << 23) |
6191         (useable << 20) |
6192         (lm << 21) |
6193         0x7000;
6194 
6195     /* Install the new entry ...  */
6196 install:
6197     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6198     lp[0] = tswap32(entry_1);
6199     lp[1] = tswap32(entry_2);
6200     return 0;
6201 }
6202 
6203 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6204 {
6205     struct target_modify_ldt_ldt_s *target_ldt_info;
6206     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6207     uint32_t base_addr, limit, flags;
6208     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6209     int seg_not_present, useable, lm;
6210     uint32_t *lp, entry_1, entry_2;
6211 
6212     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6213     if (!target_ldt_info)
6214         return -TARGET_EFAULT;
6215     idx = tswap32(target_ldt_info->entry_number);
6216     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6217         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6218         unlock_user_struct(target_ldt_info, ptr, 1);
6219         return -TARGET_EINVAL;
6220     }
6221     lp = (uint32_t *)(gdt_table + idx);
6222     entry_1 = tswap32(lp[0]);
6223     entry_2 = tswap32(lp[1]);
6224 
6225     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6226     contents = (entry_2 >> 10) & 3;
6227     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6228     seg_32bit = (entry_2 >> 22) & 1;
6229     limit_in_pages = (entry_2 >> 23) & 1;
6230     useable = (entry_2 >> 20) & 1;
6231 #ifdef TARGET_ABI32
6232     lm = 0;
6233 #else
6234     lm = (entry_2 >> 21) & 1;
6235 #endif
6236     flags = (seg_32bit << 0) | (contents << 1) |
6237         (read_exec_only << 3) | (limit_in_pages << 4) |
6238         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6239     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6240     base_addr = (entry_1 >> 16) |
6241         (entry_2 & 0xff000000) |
6242         ((entry_2 & 0xff) << 16);
6243     target_ldt_info->base_addr = tswapal(base_addr);
6244     target_ldt_info->limit = tswap32(limit);
6245     target_ldt_info->flags = tswap32(flags);
6246     unlock_user_struct(target_ldt_info, ptr, 1);
6247     return 0;
6248 }
6249 
6250 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6251 {
6252     return -TARGET_ENOSYS;
6253 }
6254 #else
6255 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6256 {
6257     abi_long ret = 0;
6258     abi_ulong val;
6259     int idx;
6260 
6261     switch(code) {
6262     case TARGET_ARCH_SET_GS:
6263     case TARGET_ARCH_SET_FS:
6264         if (code == TARGET_ARCH_SET_GS)
6265             idx = R_GS;
6266         else
6267             idx = R_FS;
6268         cpu_x86_load_seg(env, idx, 0);
6269         env->segs[idx].base = addr;
6270         break;
6271     case TARGET_ARCH_GET_GS:
6272     case TARGET_ARCH_GET_FS:
6273         if (code == TARGET_ARCH_GET_GS)
6274             idx = R_GS;
6275         else
6276             idx = R_FS;
6277         val = env->segs[idx].base;
6278         if (put_user(val, addr, abi_ulong))
6279             ret = -TARGET_EFAULT;
6280         break;
6281     default:
6282         ret = -TARGET_EINVAL;
6283         break;
6284     }
6285     return ret;
6286 }
6287 #endif /* defined(TARGET_ABI32 */
6288 
6289 #endif /* defined(TARGET_I386) */
6290 
6291 #define NEW_STACK_SIZE 0x40000
6292 
6293 
6294 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6295 typedef struct {
6296     CPUArchState *env;
6297     pthread_mutex_t mutex;
6298     pthread_cond_t cond;
6299     pthread_t thread;
6300     uint32_t tid;
6301     abi_ulong child_tidptr;
6302     abi_ulong parent_tidptr;
6303     sigset_t sigmask;
6304 } new_thread_info;
6305 
6306 static void *clone_func(void *arg)
6307 {
6308     new_thread_info *info = arg;
6309     CPUArchState *env;
6310     CPUState *cpu;
6311     TaskState *ts;
6312 
6313     rcu_register_thread();
6314     tcg_register_thread();
6315     env = info->env;
6316     cpu = env_cpu(env);
6317     thread_cpu = cpu;
6318     ts = (TaskState *)cpu->opaque;
6319     info->tid = sys_gettid();
6320     task_settid(ts);
6321     if (info->child_tidptr)
6322         put_user_u32(info->tid, info->child_tidptr);
6323     if (info->parent_tidptr)
6324         put_user_u32(info->tid, info->parent_tidptr);
6325     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6326     /* Enable signals.  */
6327     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6328     /* Signal to the parent that we're ready.  */
6329     pthread_mutex_lock(&info->mutex);
6330     pthread_cond_broadcast(&info->cond);
6331     pthread_mutex_unlock(&info->mutex);
6332     /* Wait until the parent has finished initializing the tls state.  */
6333     pthread_mutex_lock(&clone_lock);
6334     pthread_mutex_unlock(&clone_lock);
6335     cpu_loop(env);
6336     /* never exits */
6337     return NULL;
6338 }
6339 
6340 /* do_fork() Must return host values and target errnos (unlike most
6341    do_*() functions). */
6342 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6343                    abi_ulong parent_tidptr, target_ulong newtls,
6344                    abi_ulong child_tidptr)
6345 {
6346     CPUState *cpu = env_cpu(env);
6347     int ret;
6348     TaskState *ts;
6349     CPUState *new_cpu;
6350     CPUArchState *new_env;
6351     sigset_t sigmask;
6352 
6353     flags &= ~CLONE_IGNORED_FLAGS;
6354 
6355     /* Emulate vfork() with fork() */
6356     if (flags & CLONE_VFORK)
6357         flags &= ~(CLONE_VFORK | CLONE_VM);
6358 
6359     if (flags & CLONE_VM) {
6360         TaskState *parent_ts = (TaskState *)cpu->opaque;
6361         new_thread_info info;
6362         pthread_attr_t attr;
6363 
6364         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6365             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6366             return -TARGET_EINVAL;
6367         }
6368 
6369         ts = g_new0(TaskState, 1);
6370         init_task_state(ts);
6371 
6372         /* Grab a mutex so that thread setup appears atomic.  */
6373         pthread_mutex_lock(&clone_lock);
6374 
6375         /*
6376          * If this is our first additional thread, we need to ensure we
6377          * generate code for parallel execution and flush old translations.
6378          * Do this now so that the copy gets CF_PARALLEL too.
6379          */
6380         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6381             cpu->tcg_cflags |= CF_PARALLEL;
6382             tb_flush(cpu);
6383         }
6384 
6385         /* we create a new CPU instance. */
6386         new_env = cpu_copy(env);
6387         /* Init regs that differ from the parent.  */
6388         cpu_clone_regs_child(new_env, newsp, flags);
6389         cpu_clone_regs_parent(env, flags);
6390         new_cpu = env_cpu(new_env);
6391         new_cpu->opaque = ts;
6392         ts->bprm = parent_ts->bprm;
6393         ts->info = parent_ts->info;
6394         ts->signal_mask = parent_ts->signal_mask;
6395 
6396         if (flags & CLONE_CHILD_CLEARTID) {
6397             ts->child_tidptr = child_tidptr;
6398         }
6399 
6400         if (flags & CLONE_SETTLS) {
6401             cpu_set_tls (new_env, newtls);
6402         }
6403 
6404         memset(&info, 0, sizeof(info));
6405         pthread_mutex_init(&info.mutex, NULL);
6406         pthread_mutex_lock(&info.mutex);
6407         pthread_cond_init(&info.cond, NULL);
6408         info.env = new_env;
6409         if (flags & CLONE_CHILD_SETTID) {
6410             info.child_tidptr = child_tidptr;
6411         }
6412         if (flags & CLONE_PARENT_SETTID) {
6413             info.parent_tidptr = parent_tidptr;
6414         }
6415 
6416         ret = pthread_attr_init(&attr);
6417         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6418         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6419         /* It is not safe to deliver signals until the child has finished
6420            initializing, so temporarily block all signals.  */
6421         sigfillset(&sigmask);
6422         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6423         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6424 
6425         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6426         /* TODO: Free new CPU state if thread creation failed.  */
6427 
6428         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6429         pthread_attr_destroy(&attr);
6430         if (ret == 0) {
6431             /* Wait for the child to initialize.  */
6432             pthread_cond_wait(&info.cond, &info.mutex);
6433             ret = info.tid;
6434         } else {
6435             ret = -1;
6436         }
6437         pthread_mutex_unlock(&info.mutex);
6438         pthread_cond_destroy(&info.cond);
6439         pthread_mutex_destroy(&info.mutex);
6440         pthread_mutex_unlock(&clone_lock);
6441     } else {
6442         /* if no CLONE_VM, we consider it is a fork */
6443         if (flags & CLONE_INVALID_FORK_FLAGS) {
6444             return -TARGET_EINVAL;
6445         }
6446 
6447         /* We can't support custom termination signals */
6448         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6449             return -TARGET_EINVAL;
6450         }
6451 
6452         if (block_signals()) {
6453             return -TARGET_ERESTARTSYS;
6454         }
6455 
6456         fork_start();
6457         ret = fork();
6458         if (ret == 0) {
6459             /* Child Process.  */
6460             cpu_clone_regs_child(env, newsp, flags);
6461             fork_end(1);
6462             /* There is a race condition here.  The parent process could
6463                theoretically read the TID in the child process before the child
6464                tid is set.  This would require using either ptrace
6465                (not implemented) or having *_tidptr to point at a shared memory
6466                mapping.  We can't repeat the spinlock hack used above because
6467                the child process gets its own copy of the lock.  */
6468             if (flags & CLONE_CHILD_SETTID)
6469                 put_user_u32(sys_gettid(), child_tidptr);
6470             if (flags & CLONE_PARENT_SETTID)
6471                 put_user_u32(sys_gettid(), parent_tidptr);
6472             ts = (TaskState *)cpu->opaque;
6473             if (flags & CLONE_SETTLS)
6474                 cpu_set_tls (env, newtls);
6475             if (flags & CLONE_CHILD_CLEARTID)
6476                 ts->child_tidptr = child_tidptr;
6477         } else {
6478             cpu_clone_regs_parent(env, flags);
6479             fork_end(0);
6480         }
6481     }
6482     return ret;
6483 }
6484 
6485 /* warning : doesn't handle linux specific flags... */
6486 static int target_to_host_fcntl_cmd(int cmd)
6487 {
6488     int ret;
6489 
6490     switch(cmd) {
6491     case TARGET_F_DUPFD:
6492     case TARGET_F_GETFD:
6493     case TARGET_F_SETFD:
6494     case TARGET_F_GETFL:
6495     case TARGET_F_SETFL:
6496     case TARGET_F_OFD_GETLK:
6497     case TARGET_F_OFD_SETLK:
6498     case TARGET_F_OFD_SETLKW:
6499         ret = cmd;
6500         break;
6501     case TARGET_F_GETLK:
6502         ret = F_GETLK64;
6503         break;
6504     case TARGET_F_SETLK:
6505         ret = F_SETLK64;
6506         break;
6507     case TARGET_F_SETLKW:
6508         ret = F_SETLKW64;
6509         break;
6510     case TARGET_F_GETOWN:
6511         ret = F_GETOWN;
6512         break;
6513     case TARGET_F_SETOWN:
6514         ret = F_SETOWN;
6515         break;
6516     case TARGET_F_GETSIG:
6517         ret = F_GETSIG;
6518         break;
6519     case TARGET_F_SETSIG:
6520         ret = F_SETSIG;
6521         break;
6522 #if TARGET_ABI_BITS == 32
6523     case TARGET_F_GETLK64:
6524         ret = F_GETLK64;
6525         break;
6526     case TARGET_F_SETLK64:
6527         ret = F_SETLK64;
6528         break;
6529     case TARGET_F_SETLKW64:
6530         ret = F_SETLKW64;
6531         break;
6532 #endif
6533     case TARGET_F_SETLEASE:
6534         ret = F_SETLEASE;
6535         break;
6536     case TARGET_F_GETLEASE:
6537         ret = F_GETLEASE;
6538         break;
6539 #ifdef F_DUPFD_CLOEXEC
6540     case TARGET_F_DUPFD_CLOEXEC:
6541         ret = F_DUPFD_CLOEXEC;
6542         break;
6543 #endif
6544     case TARGET_F_NOTIFY:
6545         ret = F_NOTIFY;
6546         break;
6547 #ifdef F_GETOWN_EX
6548     case TARGET_F_GETOWN_EX:
6549         ret = F_GETOWN_EX;
6550         break;
6551 #endif
6552 #ifdef F_SETOWN_EX
6553     case TARGET_F_SETOWN_EX:
6554         ret = F_SETOWN_EX;
6555         break;
6556 #endif
6557 #ifdef F_SETPIPE_SZ
6558     case TARGET_F_SETPIPE_SZ:
6559         ret = F_SETPIPE_SZ;
6560         break;
6561     case TARGET_F_GETPIPE_SZ:
6562         ret = F_GETPIPE_SZ;
6563         break;
6564 #endif
6565 #ifdef F_ADD_SEALS
6566     case TARGET_F_ADD_SEALS:
6567         ret = F_ADD_SEALS;
6568         break;
6569     case TARGET_F_GET_SEALS:
6570         ret = F_GET_SEALS;
6571         break;
6572 #endif
6573     default:
6574         ret = -TARGET_EINVAL;
6575         break;
6576     }
6577 
6578 #if defined(__powerpc64__)
6579     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6580      * is not supported by kernel. The glibc fcntl call actually adjusts
6581      * them to 5, 6 and 7 before making the syscall(). Since we make the
6582      * syscall directly, adjust to what is supported by the kernel.
6583      */
6584     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6585         ret -= F_GETLK64 - 5;
6586     }
6587 #endif
6588 
6589     return ret;
6590 }
6591 
6592 #define FLOCK_TRANSTBL \
6593     switch (type) { \
6594     TRANSTBL_CONVERT(F_RDLCK); \
6595     TRANSTBL_CONVERT(F_WRLCK); \
6596     TRANSTBL_CONVERT(F_UNLCK); \
6597     }
6598 
6599 static int target_to_host_flock(int type)
6600 {
6601 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6602     FLOCK_TRANSTBL
6603 #undef  TRANSTBL_CONVERT
6604     return -TARGET_EINVAL;
6605 }
6606 
6607 static int host_to_target_flock(int type)
6608 {
6609 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6610     FLOCK_TRANSTBL
6611 #undef  TRANSTBL_CONVERT
6612     /* if we don't know how to convert the value coming
6613      * from the host we copy to the target field as-is
6614      */
6615     return type;
6616 }
6617 
6618 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6619                                             abi_ulong target_flock_addr)
6620 {
6621     struct target_flock *target_fl;
6622     int l_type;
6623 
6624     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6625         return -TARGET_EFAULT;
6626     }
6627 
6628     __get_user(l_type, &target_fl->l_type);
6629     l_type = target_to_host_flock(l_type);
6630     if (l_type < 0) {
6631         return l_type;
6632     }
6633     fl->l_type = l_type;
6634     __get_user(fl->l_whence, &target_fl->l_whence);
6635     __get_user(fl->l_start, &target_fl->l_start);
6636     __get_user(fl->l_len, &target_fl->l_len);
6637     __get_user(fl->l_pid, &target_fl->l_pid);
6638     unlock_user_struct(target_fl, target_flock_addr, 0);
6639     return 0;
6640 }
6641 
6642 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6643                                           const struct flock64 *fl)
6644 {
6645     struct target_flock *target_fl;
6646     short l_type;
6647 
6648     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6649         return -TARGET_EFAULT;
6650     }
6651 
6652     l_type = host_to_target_flock(fl->l_type);
6653     __put_user(l_type, &target_fl->l_type);
6654     __put_user(fl->l_whence, &target_fl->l_whence);
6655     __put_user(fl->l_start, &target_fl->l_start);
6656     __put_user(fl->l_len, &target_fl->l_len);
6657     __put_user(fl->l_pid, &target_fl->l_pid);
6658     unlock_user_struct(target_fl, target_flock_addr, 1);
6659     return 0;
6660 }
6661 
6662 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6663 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6664 
6665 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6666 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6667                                                    abi_ulong target_flock_addr)
6668 {
6669     struct target_oabi_flock64 *target_fl;
6670     int l_type;
6671 
6672     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6673         return -TARGET_EFAULT;
6674     }
6675 
6676     __get_user(l_type, &target_fl->l_type);
6677     l_type = target_to_host_flock(l_type);
6678     if (l_type < 0) {
6679         return l_type;
6680     }
6681     fl->l_type = l_type;
6682     __get_user(fl->l_whence, &target_fl->l_whence);
6683     __get_user(fl->l_start, &target_fl->l_start);
6684     __get_user(fl->l_len, &target_fl->l_len);
6685     __get_user(fl->l_pid, &target_fl->l_pid);
6686     unlock_user_struct(target_fl, target_flock_addr, 0);
6687     return 0;
6688 }
6689 
6690 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6691                                                  const struct flock64 *fl)
6692 {
6693     struct target_oabi_flock64 *target_fl;
6694     short l_type;
6695 
6696     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6697         return -TARGET_EFAULT;
6698     }
6699 
6700     l_type = host_to_target_flock(fl->l_type);
6701     __put_user(l_type, &target_fl->l_type);
6702     __put_user(fl->l_whence, &target_fl->l_whence);
6703     __put_user(fl->l_start, &target_fl->l_start);
6704     __put_user(fl->l_len, &target_fl->l_len);
6705     __put_user(fl->l_pid, &target_fl->l_pid);
6706     unlock_user_struct(target_fl, target_flock_addr, 1);
6707     return 0;
6708 }
6709 #endif
6710 
6711 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6712                                               abi_ulong target_flock_addr)
6713 {
6714     struct target_flock64 *target_fl;
6715     int l_type;
6716 
6717     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6718         return -TARGET_EFAULT;
6719     }
6720 
6721     __get_user(l_type, &target_fl->l_type);
6722     l_type = target_to_host_flock(l_type);
6723     if (l_type < 0) {
6724         return l_type;
6725     }
6726     fl->l_type = l_type;
6727     __get_user(fl->l_whence, &target_fl->l_whence);
6728     __get_user(fl->l_start, &target_fl->l_start);
6729     __get_user(fl->l_len, &target_fl->l_len);
6730     __get_user(fl->l_pid, &target_fl->l_pid);
6731     unlock_user_struct(target_fl, target_flock_addr, 0);
6732     return 0;
6733 }
6734 
6735 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6736                                             const struct flock64 *fl)
6737 {
6738     struct target_flock64 *target_fl;
6739     short l_type;
6740 
6741     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6742         return -TARGET_EFAULT;
6743     }
6744 
6745     l_type = host_to_target_flock(fl->l_type);
6746     __put_user(l_type, &target_fl->l_type);
6747     __put_user(fl->l_whence, &target_fl->l_whence);
6748     __put_user(fl->l_start, &target_fl->l_start);
6749     __put_user(fl->l_len, &target_fl->l_len);
6750     __put_user(fl->l_pid, &target_fl->l_pid);
6751     unlock_user_struct(target_fl, target_flock_addr, 1);
6752     return 0;
6753 }
6754 
6755 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6756 {
6757     struct flock64 fl64;
6758 #ifdef F_GETOWN_EX
6759     struct f_owner_ex fox;
6760     struct target_f_owner_ex *target_fox;
6761 #endif
6762     abi_long ret;
6763     int host_cmd = target_to_host_fcntl_cmd(cmd);
6764 
6765     if (host_cmd == -TARGET_EINVAL)
6766 	    return host_cmd;
6767 
6768     switch(cmd) {
6769     case TARGET_F_GETLK:
6770         ret = copy_from_user_flock(&fl64, arg);
6771         if (ret) {
6772             return ret;
6773         }
6774         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6775         if (ret == 0) {
6776             ret = copy_to_user_flock(arg, &fl64);
6777         }
6778         break;
6779 
6780     case TARGET_F_SETLK:
6781     case TARGET_F_SETLKW:
6782         ret = copy_from_user_flock(&fl64, arg);
6783         if (ret) {
6784             return ret;
6785         }
6786         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6787         break;
6788 
6789     case TARGET_F_GETLK64:
6790     case TARGET_F_OFD_GETLK:
6791         ret = copy_from_user_flock64(&fl64, arg);
6792         if (ret) {
6793             return ret;
6794         }
6795         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6796         if (ret == 0) {
6797             ret = copy_to_user_flock64(arg, &fl64);
6798         }
6799         break;
6800     case TARGET_F_SETLK64:
6801     case TARGET_F_SETLKW64:
6802     case TARGET_F_OFD_SETLK:
6803     case TARGET_F_OFD_SETLKW:
6804         ret = copy_from_user_flock64(&fl64, arg);
6805         if (ret) {
6806             return ret;
6807         }
6808         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6809         break;
6810 
6811     case TARGET_F_GETFL:
6812         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6813         if (ret >= 0) {
6814             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6815         }
6816         break;
6817 
6818     case TARGET_F_SETFL:
6819         ret = get_errno(safe_fcntl(fd, host_cmd,
6820                                    target_to_host_bitmask(arg,
6821                                                           fcntl_flags_tbl)));
6822         break;
6823 
6824 #ifdef F_GETOWN_EX
6825     case TARGET_F_GETOWN_EX:
6826         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6827         if (ret >= 0) {
6828             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6829                 return -TARGET_EFAULT;
6830             target_fox->type = tswap32(fox.type);
6831             target_fox->pid = tswap32(fox.pid);
6832             unlock_user_struct(target_fox, arg, 1);
6833         }
6834         break;
6835 #endif
6836 
6837 #ifdef F_SETOWN_EX
6838     case TARGET_F_SETOWN_EX:
6839         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6840             return -TARGET_EFAULT;
6841         fox.type = tswap32(target_fox->type);
6842         fox.pid = tswap32(target_fox->pid);
6843         unlock_user_struct(target_fox, arg, 0);
6844         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6845         break;
6846 #endif
6847 
6848     case TARGET_F_SETSIG:
6849         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6850         break;
6851 
6852     case TARGET_F_GETSIG:
6853         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6854         break;
6855 
6856     case TARGET_F_SETOWN:
6857     case TARGET_F_GETOWN:
6858     case TARGET_F_SETLEASE:
6859     case TARGET_F_GETLEASE:
6860     case TARGET_F_SETPIPE_SZ:
6861     case TARGET_F_GETPIPE_SZ:
6862     case TARGET_F_ADD_SEALS:
6863     case TARGET_F_GET_SEALS:
6864         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6865         break;
6866 
6867     default:
6868         ret = get_errno(safe_fcntl(fd, cmd, arg));
6869         break;
6870     }
6871     return ret;
6872 }
6873 
6874 #ifdef USE_UID16
6875 
6876 static inline int high2lowuid(int uid)
6877 {
6878     if (uid > 65535)
6879         return 65534;
6880     else
6881         return uid;
6882 }
6883 
6884 static inline int high2lowgid(int gid)
6885 {
6886     if (gid > 65535)
6887         return 65534;
6888     else
6889         return gid;
6890 }
6891 
6892 static inline int low2highuid(int uid)
6893 {
6894     if ((int16_t)uid == -1)
6895         return -1;
6896     else
6897         return uid;
6898 }
6899 
6900 static inline int low2highgid(int gid)
6901 {
6902     if ((int16_t)gid == -1)
6903         return -1;
6904     else
6905         return gid;
6906 }
6907 static inline int tswapid(int id)
6908 {
6909     return tswap16(id);
6910 }
6911 
6912 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6913 
6914 #else /* !USE_UID16 */
6915 static inline int high2lowuid(int uid)
6916 {
6917     return uid;
6918 }
6919 static inline int high2lowgid(int gid)
6920 {
6921     return gid;
6922 }
6923 static inline int low2highuid(int uid)
6924 {
6925     return uid;
6926 }
6927 static inline int low2highgid(int gid)
6928 {
6929     return gid;
6930 }
6931 static inline int tswapid(int id)
6932 {
6933     return tswap32(id);
6934 }
6935 
6936 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6937 
6938 #endif /* USE_UID16 */
6939 
6940 /* We must do direct syscalls for setting UID/GID, because we want to
6941  * implement the Linux system call semantics of "change only for this thread",
6942  * not the libc/POSIX semantics of "change for all threads in process".
6943  * (See http://ewontfix.com/17/ for more details.)
6944  * We use the 32-bit version of the syscalls if present; if it is not
6945  * then either the host architecture supports 32-bit UIDs natively with
6946  * the standard syscall, or the 16-bit UID is the best we can do.
6947  */
6948 #ifdef __NR_setuid32
6949 #define __NR_sys_setuid __NR_setuid32
6950 #else
6951 #define __NR_sys_setuid __NR_setuid
6952 #endif
6953 #ifdef __NR_setgid32
6954 #define __NR_sys_setgid __NR_setgid32
6955 #else
6956 #define __NR_sys_setgid __NR_setgid
6957 #endif
6958 #ifdef __NR_setresuid32
6959 #define __NR_sys_setresuid __NR_setresuid32
6960 #else
6961 #define __NR_sys_setresuid __NR_setresuid
6962 #endif
6963 #ifdef __NR_setresgid32
6964 #define __NR_sys_setresgid __NR_setresgid32
6965 #else
6966 #define __NR_sys_setresgid __NR_setresgid
6967 #endif
6968 
6969 _syscall1(int, sys_setuid, uid_t, uid)
6970 _syscall1(int, sys_setgid, gid_t, gid)
6971 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6972 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6973 
6974 void syscall_init(void)
6975 {
6976     IOCTLEntry *ie;
6977     const argtype *arg_type;
6978     int size;
6979 
6980     thunk_init(STRUCT_MAX);
6981 
6982 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6983 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6984 #include "syscall_types.h"
6985 #undef STRUCT
6986 #undef STRUCT_SPECIAL
6987 
6988     /* we patch the ioctl size if necessary. We rely on the fact that
6989        no ioctl has all the bits at '1' in the size field */
6990     ie = ioctl_entries;
6991     while (ie->target_cmd != 0) {
6992         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6993             TARGET_IOC_SIZEMASK) {
6994             arg_type = ie->arg_type;
6995             if (arg_type[0] != TYPE_PTR) {
6996                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6997                         ie->target_cmd);
6998                 exit(1);
6999             }
7000             arg_type++;
7001             size = thunk_type_size(arg_type, 0);
7002             ie->target_cmd = (ie->target_cmd &
7003                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7004                 (size << TARGET_IOC_SIZESHIFT);
7005         }
7006 
7007         /* automatic consistency check if same arch */
7008 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7009     (defined(__x86_64__) && defined(TARGET_X86_64))
7010         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7011             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7012                     ie->name, ie->target_cmd, ie->host_cmd);
7013         }
7014 #endif
7015         ie++;
7016     }
7017 }
7018 
7019 #ifdef TARGET_NR_truncate64
7020 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7021                                          abi_long arg2,
7022                                          abi_long arg3,
7023                                          abi_long arg4)
7024 {
7025     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7026         arg2 = arg3;
7027         arg3 = arg4;
7028     }
7029     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7030 }
7031 #endif
7032 
7033 #ifdef TARGET_NR_ftruncate64
7034 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7035                                           abi_long arg2,
7036                                           abi_long arg3,
7037                                           abi_long arg4)
7038 {
7039     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7040         arg2 = arg3;
7041         arg3 = arg4;
7042     }
7043     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7044 }
7045 #endif
7046 
7047 #if defined(TARGET_NR_timer_settime) || \
7048     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7049 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7050                                                  abi_ulong target_addr)
7051 {
7052     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7053                                 offsetof(struct target_itimerspec,
7054                                          it_interval)) ||
7055         target_to_host_timespec(&host_its->it_value, target_addr +
7056                                 offsetof(struct target_itimerspec,
7057                                          it_value))) {
7058         return -TARGET_EFAULT;
7059     }
7060 
7061     return 0;
7062 }
7063 #endif
7064 
7065 #if defined(TARGET_NR_timer_settime64) || \
7066     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7067 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7068                                                    abi_ulong target_addr)
7069 {
7070     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7071                                   offsetof(struct target__kernel_itimerspec,
7072                                            it_interval)) ||
7073         target_to_host_timespec64(&host_its->it_value, target_addr +
7074                                   offsetof(struct target__kernel_itimerspec,
7075                                            it_value))) {
7076         return -TARGET_EFAULT;
7077     }
7078 
7079     return 0;
7080 }
7081 #endif
7082 
7083 #if ((defined(TARGET_NR_timerfd_gettime) || \
7084       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7085       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7086 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7087                                                  struct itimerspec *host_its)
7088 {
7089     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7090                                                        it_interval),
7091                                 &host_its->it_interval) ||
7092         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7093                                                        it_value),
7094                                 &host_its->it_value)) {
7095         return -TARGET_EFAULT;
7096     }
7097     return 0;
7098 }
7099 #endif
7100 
7101 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7102       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7103       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7104 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7105                                                    struct itimerspec *host_its)
7106 {
7107     if (host_to_target_timespec64(target_addr +
7108                                   offsetof(struct target__kernel_itimerspec,
7109                                            it_interval),
7110                                   &host_its->it_interval) ||
7111         host_to_target_timespec64(target_addr +
7112                                   offsetof(struct target__kernel_itimerspec,
7113                                            it_value),
7114                                   &host_its->it_value)) {
7115         return -TARGET_EFAULT;
7116     }
7117     return 0;
7118 }
7119 #endif
7120 
7121 #if defined(TARGET_NR_adjtimex) || \
7122     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7123 static inline abi_long target_to_host_timex(struct timex *host_tx,
7124                                             abi_long target_addr)
7125 {
7126     struct target_timex *target_tx;
7127 
7128     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7129         return -TARGET_EFAULT;
7130     }
7131 
7132     __get_user(host_tx->modes, &target_tx->modes);
7133     __get_user(host_tx->offset, &target_tx->offset);
7134     __get_user(host_tx->freq, &target_tx->freq);
7135     __get_user(host_tx->maxerror, &target_tx->maxerror);
7136     __get_user(host_tx->esterror, &target_tx->esterror);
7137     __get_user(host_tx->status, &target_tx->status);
7138     __get_user(host_tx->constant, &target_tx->constant);
7139     __get_user(host_tx->precision, &target_tx->precision);
7140     __get_user(host_tx->tolerance, &target_tx->tolerance);
7141     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7142     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7143     __get_user(host_tx->tick, &target_tx->tick);
7144     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7145     __get_user(host_tx->jitter, &target_tx->jitter);
7146     __get_user(host_tx->shift, &target_tx->shift);
7147     __get_user(host_tx->stabil, &target_tx->stabil);
7148     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7149     __get_user(host_tx->calcnt, &target_tx->calcnt);
7150     __get_user(host_tx->errcnt, &target_tx->errcnt);
7151     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7152     __get_user(host_tx->tai, &target_tx->tai);
7153 
7154     unlock_user_struct(target_tx, target_addr, 0);
7155     return 0;
7156 }
7157 
7158 static inline abi_long host_to_target_timex(abi_long target_addr,
7159                                             struct timex *host_tx)
7160 {
7161     struct target_timex *target_tx;
7162 
7163     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7164         return -TARGET_EFAULT;
7165     }
7166 
7167     __put_user(host_tx->modes, &target_tx->modes);
7168     __put_user(host_tx->offset, &target_tx->offset);
7169     __put_user(host_tx->freq, &target_tx->freq);
7170     __put_user(host_tx->maxerror, &target_tx->maxerror);
7171     __put_user(host_tx->esterror, &target_tx->esterror);
7172     __put_user(host_tx->status, &target_tx->status);
7173     __put_user(host_tx->constant, &target_tx->constant);
7174     __put_user(host_tx->precision, &target_tx->precision);
7175     __put_user(host_tx->tolerance, &target_tx->tolerance);
7176     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7177     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7178     __put_user(host_tx->tick, &target_tx->tick);
7179     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7180     __put_user(host_tx->jitter, &target_tx->jitter);
7181     __put_user(host_tx->shift, &target_tx->shift);
7182     __put_user(host_tx->stabil, &target_tx->stabil);
7183     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7184     __put_user(host_tx->calcnt, &target_tx->calcnt);
7185     __put_user(host_tx->errcnt, &target_tx->errcnt);
7186     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7187     __put_user(host_tx->tai, &target_tx->tai);
7188 
7189     unlock_user_struct(target_tx, target_addr, 1);
7190     return 0;
7191 }
7192 #endif
7193 
7194 
7195 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7196 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7197                                               abi_long target_addr)
7198 {
7199     struct target__kernel_timex *target_tx;
7200 
7201     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7202                                  offsetof(struct target__kernel_timex,
7203                                           time))) {
7204         return -TARGET_EFAULT;
7205     }
7206 
7207     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7208         return -TARGET_EFAULT;
7209     }
7210 
7211     __get_user(host_tx->modes, &target_tx->modes);
7212     __get_user(host_tx->offset, &target_tx->offset);
7213     __get_user(host_tx->freq, &target_tx->freq);
7214     __get_user(host_tx->maxerror, &target_tx->maxerror);
7215     __get_user(host_tx->esterror, &target_tx->esterror);
7216     __get_user(host_tx->status, &target_tx->status);
7217     __get_user(host_tx->constant, &target_tx->constant);
7218     __get_user(host_tx->precision, &target_tx->precision);
7219     __get_user(host_tx->tolerance, &target_tx->tolerance);
7220     __get_user(host_tx->tick, &target_tx->tick);
7221     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7222     __get_user(host_tx->jitter, &target_tx->jitter);
7223     __get_user(host_tx->shift, &target_tx->shift);
7224     __get_user(host_tx->stabil, &target_tx->stabil);
7225     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7226     __get_user(host_tx->calcnt, &target_tx->calcnt);
7227     __get_user(host_tx->errcnt, &target_tx->errcnt);
7228     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7229     __get_user(host_tx->tai, &target_tx->tai);
7230 
7231     unlock_user_struct(target_tx, target_addr, 0);
7232     return 0;
7233 }
7234 
7235 static inline abi_long host_to_target_timex64(abi_long target_addr,
7236                                               struct timex *host_tx)
7237 {
7238     struct target__kernel_timex *target_tx;
7239 
7240    if (copy_to_user_timeval64(target_addr +
7241                               offsetof(struct target__kernel_timex, time),
7242                               &host_tx->time)) {
7243         return -TARGET_EFAULT;
7244     }
7245 
7246     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7247         return -TARGET_EFAULT;
7248     }
7249 
7250     __put_user(host_tx->modes, &target_tx->modes);
7251     __put_user(host_tx->offset, &target_tx->offset);
7252     __put_user(host_tx->freq, &target_tx->freq);
7253     __put_user(host_tx->maxerror, &target_tx->maxerror);
7254     __put_user(host_tx->esterror, &target_tx->esterror);
7255     __put_user(host_tx->status, &target_tx->status);
7256     __put_user(host_tx->constant, &target_tx->constant);
7257     __put_user(host_tx->precision, &target_tx->precision);
7258     __put_user(host_tx->tolerance, &target_tx->tolerance);
7259     __put_user(host_tx->tick, &target_tx->tick);
7260     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7261     __put_user(host_tx->jitter, &target_tx->jitter);
7262     __put_user(host_tx->shift, &target_tx->shift);
7263     __put_user(host_tx->stabil, &target_tx->stabil);
7264     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7265     __put_user(host_tx->calcnt, &target_tx->calcnt);
7266     __put_user(host_tx->errcnt, &target_tx->errcnt);
7267     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7268     __put_user(host_tx->tai, &target_tx->tai);
7269 
7270     unlock_user_struct(target_tx, target_addr, 1);
7271     return 0;
7272 }
7273 #endif
7274 
7275 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7276 #define sigev_notify_thread_id _sigev_un._tid
7277 #endif
7278 
7279 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7280                                                abi_ulong target_addr)
7281 {
7282     struct target_sigevent *target_sevp;
7283 
7284     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7285         return -TARGET_EFAULT;
7286     }
7287 
7288     /* This union is awkward on 64 bit systems because it has a 32 bit
7289      * integer and a pointer in it; we follow the conversion approach
7290      * used for handling sigval types in signal.c so the guest should get
7291      * the correct value back even if we did a 64 bit byteswap and it's
7292      * using the 32 bit integer.
7293      */
7294     host_sevp->sigev_value.sival_ptr =
7295         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7296     host_sevp->sigev_signo =
7297         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7298     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7299     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7300 
7301     unlock_user_struct(target_sevp, target_addr, 1);
7302     return 0;
7303 }
7304 
7305 #if defined(TARGET_NR_mlockall)
7306 static inline int target_to_host_mlockall_arg(int arg)
7307 {
7308     int result = 0;
7309 
7310     if (arg & TARGET_MCL_CURRENT) {
7311         result |= MCL_CURRENT;
7312     }
7313     if (arg & TARGET_MCL_FUTURE) {
7314         result |= MCL_FUTURE;
7315     }
7316 #ifdef MCL_ONFAULT
7317     if (arg & TARGET_MCL_ONFAULT) {
7318         result |= MCL_ONFAULT;
7319     }
7320 #endif
7321 
7322     return result;
7323 }
7324 #endif
7325 
7326 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7327      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7328      defined(TARGET_NR_newfstatat))
7329 static inline abi_long host_to_target_stat64(void *cpu_env,
7330                                              abi_ulong target_addr,
7331                                              struct stat *host_st)
7332 {
7333 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7334     if (((CPUARMState *)cpu_env)->eabi) {
7335         struct target_eabi_stat64 *target_st;
7336 
7337         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7338             return -TARGET_EFAULT;
7339         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7340         __put_user(host_st->st_dev, &target_st->st_dev);
7341         __put_user(host_st->st_ino, &target_st->st_ino);
7342 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7343         __put_user(host_st->st_ino, &target_st->__st_ino);
7344 #endif
7345         __put_user(host_st->st_mode, &target_st->st_mode);
7346         __put_user(host_st->st_nlink, &target_st->st_nlink);
7347         __put_user(host_st->st_uid, &target_st->st_uid);
7348         __put_user(host_st->st_gid, &target_st->st_gid);
7349         __put_user(host_st->st_rdev, &target_st->st_rdev);
7350         __put_user(host_st->st_size, &target_st->st_size);
7351         __put_user(host_st->st_blksize, &target_st->st_blksize);
7352         __put_user(host_st->st_blocks, &target_st->st_blocks);
7353         __put_user(host_st->st_atime, &target_st->target_st_atime);
7354         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7355         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7356 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7357         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7358         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7359         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7360 #endif
7361         unlock_user_struct(target_st, target_addr, 1);
7362     } else
7363 #endif
7364     {
7365 #if defined(TARGET_HAS_STRUCT_STAT64)
7366         struct target_stat64 *target_st;
7367 #else
7368         struct target_stat *target_st;
7369 #endif
7370 
7371         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7372             return -TARGET_EFAULT;
7373         memset(target_st, 0, sizeof(*target_st));
7374         __put_user(host_st->st_dev, &target_st->st_dev);
7375         __put_user(host_st->st_ino, &target_st->st_ino);
7376 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7377         __put_user(host_st->st_ino, &target_st->__st_ino);
7378 #endif
7379         __put_user(host_st->st_mode, &target_st->st_mode);
7380         __put_user(host_st->st_nlink, &target_st->st_nlink);
7381         __put_user(host_st->st_uid, &target_st->st_uid);
7382         __put_user(host_st->st_gid, &target_st->st_gid);
7383         __put_user(host_st->st_rdev, &target_st->st_rdev);
7384         /* XXX: better use of kernel struct */
7385         __put_user(host_st->st_size, &target_st->st_size);
7386         __put_user(host_st->st_blksize, &target_st->st_blksize);
7387         __put_user(host_st->st_blocks, &target_st->st_blocks);
7388         __put_user(host_st->st_atime, &target_st->target_st_atime);
7389         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7390         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7391 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7392         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7393         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7394         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7395 #endif
7396         unlock_user_struct(target_st, target_addr, 1);
7397     }
7398 
7399     return 0;
7400 }
7401 #endif
7402 
7403 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7404 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7405                                             abi_ulong target_addr)
7406 {
7407     struct target_statx *target_stx;
7408 
7409     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7410         return -TARGET_EFAULT;
7411     }
7412     memset(target_stx, 0, sizeof(*target_stx));
7413 
7414     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7415     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7416     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7417     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7418     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7419     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7420     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7421     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7422     __put_user(host_stx->stx_size, &target_stx->stx_size);
7423     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7424     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7425     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7426     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7427     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7428     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7429     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7430     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7431     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7432     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7433     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7434     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7435     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7436     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7437 
7438     unlock_user_struct(target_stx, target_addr, 1);
7439 
7440     return 0;
7441 }
7442 #endif
7443 
7444 static int do_sys_futex(int *uaddr, int op, int val,
7445                          const struct timespec *timeout, int *uaddr2,
7446                          int val3)
7447 {
7448 #if HOST_LONG_BITS == 64
7449 #if defined(__NR_futex)
7450     /* always a 64-bit time_t, it doesn't define _time64 version  */
7451     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7452 
7453 #endif
7454 #else /* HOST_LONG_BITS == 64 */
7455 #if defined(__NR_futex_time64)
7456     if (sizeof(timeout->tv_sec) == 8) {
7457         /* _time64 function on 32bit arch */
7458         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7459     }
7460 #endif
7461 #if defined(__NR_futex)
7462     /* old function on 32bit arch */
7463     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7464 #endif
7465 #endif /* HOST_LONG_BITS == 64 */
7466     g_assert_not_reached();
7467 }
7468 
7469 static int do_safe_futex(int *uaddr, int op, int val,
7470                          const struct timespec *timeout, int *uaddr2,
7471                          int val3)
7472 {
7473 #if HOST_LONG_BITS == 64
7474 #if defined(__NR_futex)
7475     /* always a 64-bit time_t, it doesn't define _time64 version  */
7476     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7477 #endif
7478 #else /* HOST_LONG_BITS == 64 */
7479 #if defined(__NR_futex_time64)
7480     if (sizeof(timeout->tv_sec) == 8) {
7481         /* _time64 function on 32bit arch */
7482         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7483                                            val3));
7484     }
7485 #endif
7486 #if defined(__NR_futex)
7487     /* old function on 32bit arch */
7488     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7489 #endif
7490 #endif /* HOST_LONG_BITS == 64 */
7491     return -TARGET_ENOSYS;
7492 }
7493 
7494 /* ??? Using host futex calls even when target atomic operations
7495    are not really atomic probably breaks things.  However implementing
7496    futexes locally would make futexes shared between multiple processes
7497    tricky.  However they're probably useless because guest atomic
7498    operations won't work either.  */
7499 #if defined(TARGET_NR_futex)
7500 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7501                     target_ulong timeout, target_ulong uaddr2, int val3)
7502 {
7503     struct timespec ts, *pts;
7504     int base_op;
7505 
7506     /* ??? We assume FUTEX_* constants are the same on both host
7507        and target.  */
7508 #ifdef FUTEX_CMD_MASK
7509     base_op = op & FUTEX_CMD_MASK;
7510 #else
7511     base_op = op;
7512 #endif
7513     switch (base_op) {
7514     case FUTEX_WAIT:
7515     case FUTEX_WAIT_BITSET:
7516         if (timeout) {
7517             pts = &ts;
7518             target_to_host_timespec(pts, timeout);
7519         } else {
7520             pts = NULL;
7521         }
7522         return do_safe_futex(g2h(cpu, uaddr),
7523                              op, tswap32(val), pts, NULL, val3);
7524     case FUTEX_WAKE:
7525         return do_safe_futex(g2h(cpu, uaddr),
7526                              op, val, NULL, NULL, 0);
7527     case FUTEX_FD:
7528         return do_safe_futex(g2h(cpu, uaddr),
7529                              op, val, NULL, NULL, 0);
7530     case FUTEX_REQUEUE:
7531     case FUTEX_CMP_REQUEUE:
7532     case FUTEX_WAKE_OP:
7533         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7534            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7535            But the prototype takes a `struct timespec *'; insert casts
7536            to satisfy the compiler.  We do not need to tswap TIMEOUT
7537            since it's not compared to guest memory.  */
7538         pts = (struct timespec *)(uintptr_t) timeout;
7539         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7540                              (base_op == FUTEX_CMP_REQUEUE
7541                               ? tswap32(val3) : val3));
7542     default:
7543         return -TARGET_ENOSYS;
7544     }
7545 }
7546 #endif
7547 
7548 #if defined(TARGET_NR_futex_time64)
7549 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7550                            int val, target_ulong timeout,
7551                            target_ulong uaddr2, int val3)
7552 {
7553     struct timespec ts, *pts;
7554     int base_op;
7555 
7556     /* ??? We assume FUTEX_* constants are the same on both host
7557        and target.  */
7558 #ifdef FUTEX_CMD_MASK
7559     base_op = op & FUTEX_CMD_MASK;
7560 #else
7561     base_op = op;
7562 #endif
7563     switch (base_op) {
7564     case FUTEX_WAIT:
7565     case FUTEX_WAIT_BITSET:
7566         if (timeout) {
7567             pts = &ts;
7568             if (target_to_host_timespec64(pts, timeout)) {
7569                 return -TARGET_EFAULT;
7570             }
7571         } else {
7572             pts = NULL;
7573         }
7574         return do_safe_futex(g2h(cpu, uaddr), op,
7575                              tswap32(val), pts, NULL, val3);
7576     case FUTEX_WAKE:
7577         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7578     case FUTEX_FD:
7579         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7580     case FUTEX_REQUEUE:
7581     case FUTEX_CMP_REQUEUE:
7582     case FUTEX_WAKE_OP:
7583         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7584            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7585            But the prototype takes a `struct timespec *'; insert casts
7586            to satisfy the compiler.  We do not need to tswap TIMEOUT
7587            since it's not compared to guest memory.  */
7588         pts = (struct timespec *)(uintptr_t) timeout;
7589         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7590                              (base_op == FUTEX_CMP_REQUEUE
7591                               ? tswap32(val3) : val3));
7592     default:
7593         return -TARGET_ENOSYS;
7594     }
7595 }
7596 #endif
7597 
7598 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7599 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7600                                      abi_long handle, abi_long mount_id,
7601                                      abi_long flags)
7602 {
7603     struct file_handle *target_fh;
7604     struct file_handle *fh;
7605     int mid = 0;
7606     abi_long ret;
7607     char *name;
7608     unsigned int size, total_size;
7609 
7610     if (get_user_s32(size, handle)) {
7611         return -TARGET_EFAULT;
7612     }
7613 
7614     name = lock_user_string(pathname);
7615     if (!name) {
7616         return -TARGET_EFAULT;
7617     }
7618 
7619     total_size = sizeof(struct file_handle) + size;
7620     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7621     if (!target_fh) {
7622         unlock_user(name, pathname, 0);
7623         return -TARGET_EFAULT;
7624     }
7625 
7626     fh = g_malloc0(total_size);
7627     fh->handle_bytes = size;
7628 
7629     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7630     unlock_user(name, pathname, 0);
7631 
7632     /* man name_to_handle_at(2):
7633      * Other than the use of the handle_bytes field, the caller should treat
7634      * the file_handle structure as an opaque data type
7635      */
7636 
7637     memcpy(target_fh, fh, total_size);
7638     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7639     target_fh->handle_type = tswap32(fh->handle_type);
7640     g_free(fh);
7641     unlock_user(target_fh, handle, total_size);
7642 
7643     if (put_user_s32(mid, mount_id)) {
7644         return -TARGET_EFAULT;
7645     }
7646 
7647     return ret;
7648 
7649 }
7650 #endif
7651 
7652 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7653 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7654                                      abi_long flags)
7655 {
7656     struct file_handle *target_fh;
7657     struct file_handle *fh;
7658     unsigned int size, total_size;
7659     abi_long ret;
7660 
7661     if (get_user_s32(size, handle)) {
7662         return -TARGET_EFAULT;
7663     }
7664 
7665     total_size = sizeof(struct file_handle) + size;
7666     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7667     if (!target_fh) {
7668         return -TARGET_EFAULT;
7669     }
7670 
7671     fh = g_memdup(target_fh, total_size);
7672     fh->handle_bytes = size;
7673     fh->handle_type = tswap32(target_fh->handle_type);
7674 
7675     ret = get_errno(open_by_handle_at(mount_fd, fh,
7676                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7677 
7678     g_free(fh);
7679 
7680     unlock_user(target_fh, handle, total_size);
7681 
7682     return ret;
7683 }
7684 #endif
7685 
7686 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7687 
7688 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7689 {
7690     int host_flags;
7691     target_sigset_t *target_mask;
7692     sigset_t host_mask;
7693     abi_long ret;
7694 
7695     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7696         return -TARGET_EINVAL;
7697     }
7698     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7699         return -TARGET_EFAULT;
7700     }
7701 
7702     target_to_host_sigset(&host_mask, target_mask);
7703 
7704     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7705 
7706     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7707     if (ret >= 0) {
7708         fd_trans_register(ret, &target_signalfd_trans);
7709     }
7710 
7711     unlock_user_struct(target_mask, mask, 0);
7712 
7713     return ret;
7714 }
7715 #endif
7716 
7717 /* Map host to target signal numbers for the wait family of syscalls.
7718    Assume all other status bits are the same.  */
7719 int host_to_target_waitstatus(int status)
7720 {
7721     if (WIFSIGNALED(status)) {
7722         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7723     }
7724     if (WIFSTOPPED(status)) {
7725         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7726                | (status & 0xff);
7727     }
7728     return status;
7729 }
7730 
7731 static int open_self_cmdline(void *cpu_env, int fd)
7732 {
7733     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7734     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7735     int i;
7736 
7737     for (i = 0; i < bprm->argc; i++) {
7738         size_t len = strlen(bprm->argv[i]) + 1;
7739 
7740         if (write(fd, bprm->argv[i], len) != len) {
7741             return -1;
7742         }
7743     }
7744 
7745     return 0;
7746 }
7747 
7748 static int open_self_maps(void *cpu_env, int fd)
7749 {
7750     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7751     TaskState *ts = cpu->opaque;
7752     GSList *map_info = read_self_maps();
7753     GSList *s;
7754     int count;
7755 
7756     for (s = map_info; s; s = g_slist_next(s)) {
7757         MapInfo *e = (MapInfo *) s->data;
7758 
7759         if (h2g_valid(e->start)) {
7760             unsigned long min = e->start;
7761             unsigned long max = e->end;
7762             int flags = page_get_flags(h2g(min));
7763             const char *path;
7764 
7765             max = h2g_valid(max - 1) ?
7766                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7767 
7768             if (page_check_range(h2g(min), max - min, flags) == -1) {
7769                 continue;
7770             }
7771 
7772             if (h2g(min) == ts->info->stack_limit) {
7773                 path = "[stack]";
7774             } else {
7775                 path = e->path;
7776             }
7777 
7778             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7779                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7780                             h2g(min), h2g(max - 1) + 1,
7781                             (flags & PAGE_READ) ? 'r' : '-',
7782                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7783                             (flags & PAGE_EXEC) ? 'x' : '-',
7784                             e->is_priv ? 'p' : '-',
7785                             (uint64_t) e->offset, e->dev, e->inode);
7786             if (path) {
7787                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7788             } else {
7789                 dprintf(fd, "\n");
7790             }
7791         }
7792     }
7793 
7794     free_self_maps(map_info);
7795 
7796 #ifdef TARGET_VSYSCALL_PAGE
7797     /*
7798      * We only support execution from the vsyscall page.
7799      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7800      */
7801     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7802                     " --xp 00000000 00:00 0",
7803                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7804     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7805 #endif
7806 
7807     return 0;
7808 }
7809 
7810 static int open_self_stat(void *cpu_env, int fd)
7811 {
7812     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7813     TaskState *ts = cpu->opaque;
7814     g_autoptr(GString) buf = g_string_new(NULL);
7815     int i;
7816 
7817     for (i = 0; i < 44; i++) {
7818         if (i == 0) {
7819             /* pid */
7820             g_string_printf(buf, FMT_pid " ", getpid());
7821         } else if (i == 1) {
7822             /* app name */
7823             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7824             bin = bin ? bin + 1 : ts->bprm->argv[0];
7825             g_string_printf(buf, "(%.15s) ", bin);
7826         } else if (i == 3) {
7827             /* ppid */
7828             g_string_printf(buf, FMT_pid " ", getppid());
7829         } else if (i == 27) {
7830             /* stack bottom */
7831             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7832         } else {
7833             /* for the rest, there is MasterCard */
7834             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7835         }
7836 
7837         if (write(fd, buf->str, buf->len) != buf->len) {
7838             return -1;
7839         }
7840     }
7841 
7842     return 0;
7843 }
7844 
7845 static int open_self_auxv(void *cpu_env, int fd)
7846 {
7847     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7848     TaskState *ts = cpu->opaque;
7849     abi_ulong auxv = ts->info->saved_auxv;
7850     abi_ulong len = ts->info->auxv_len;
7851     char *ptr;
7852 
7853     /*
7854      * Auxiliary vector is stored in target process stack.
7855      * read in whole auxv vector and copy it to file
7856      */
7857     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7858     if (ptr != NULL) {
7859         while (len > 0) {
7860             ssize_t r;
7861             r = write(fd, ptr, len);
7862             if (r <= 0) {
7863                 break;
7864             }
7865             len -= r;
7866             ptr += r;
7867         }
7868         lseek(fd, 0, SEEK_SET);
7869         unlock_user(ptr, auxv, len);
7870     }
7871 
7872     return 0;
7873 }
7874 
7875 static int is_proc_myself(const char *filename, const char *entry)
7876 {
7877     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7878         filename += strlen("/proc/");
7879         if (!strncmp(filename, "self/", strlen("self/"))) {
7880             filename += strlen("self/");
7881         } else if (*filename >= '1' && *filename <= '9') {
7882             char myself[80];
7883             snprintf(myself, sizeof(myself), "%d/", getpid());
7884             if (!strncmp(filename, myself, strlen(myself))) {
7885                 filename += strlen(myself);
7886             } else {
7887                 return 0;
7888             }
7889         } else {
7890             return 0;
7891         }
7892         if (!strcmp(filename, entry)) {
7893             return 1;
7894         }
7895     }
7896     return 0;
7897 }
7898 
7899 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7900     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7901 static int is_proc(const char *filename, const char *entry)
7902 {
7903     return strcmp(filename, entry) == 0;
7904 }
7905 #endif
7906 
7907 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7908 static int open_net_route(void *cpu_env, int fd)
7909 {
7910     FILE *fp;
7911     char *line = NULL;
7912     size_t len = 0;
7913     ssize_t read;
7914 
7915     fp = fopen("/proc/net/route", "r");
7916     if (fp == NULL) {
7917         return -1;
7918     }
7919 
7920     /* read header */
7921 
7922     read = getline(&line, &len, fp);
7923     dprintf(fd, "%s", line);
7924 
7925     /* read routes */
7926 
7927     while ((read = getline(&line, &len, fp)) != -1) {
7928         char iface[16];
7929         uint32_t dest, gw, mask;
7930         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7931         int fields;
7932 
7933         fields = sscanf(line,
7934                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7935                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7936                         &mask, &mtu, &window, &irtt);
7937         if (fields != 11) {
7938             continue;
7939         }
7940         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7941                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7942                 metric, tswap32(mask), mtu, window, irtt);
7943     }
7944 
7945     free(line);
7946     fclose(fp);
7947 
7948     return 0;
7949 }
7950 #endif
7951 
7952 #if defined(TARGET_SPARC)
7953 static int open_cpuinfo(void *cpu_env, int fd)
7954 {
7955     dprintf(fd, "type\t\t: sun4u\n");
7956     return 0;
7957 }
7958 #endif
7959 
7960 #if defined(TARGET_HPPA)
7961 static int open_cpuinfo(void *cpu_env, int fd)
7962 {
7963     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7964     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7965     dprintf(fd, "capabilities\t: os32\n");
7966     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7967     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7968     return 0;
7969 }
7970 #endif
7971 
7972 #if defined(TARGET_M68K)
7973 static int open_hardware(void *cpu_env, int fd)
7974 {
7975     dprintf(fd, "Model:\t\tqemu-m68k\n");
7976     return 0;
7977 }
7978 #endif
7979 
7980 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7981 {
7982     struct fake_open {
7983         const char *filename;
7984         int (*fill)(void *cpu_env, int fd);
7985         int (*cmp)(const char *s1, const char *s2);
7986     };
7987     const struct fake_open *fake_open;
7988     static const struct fake_open fakes[] = {
7989         { "maps", open_self_maps, is_proc_myself },
7990         { "stat", open_self_stat, is_proc_myself },
7991         { "auxv", open_self_auxv, is_proc_myself },
7992         { "cmdline", open_self_cmdline, is_proc_myself },
7993 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7994         { "/proc/net/route", open_net_route, is_proc },
7995 #endif
7996 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7997         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7998 #endif
7999 #if defined(TARGET_M68K)
8000         { "/proc/hardware", open_hardware, is_proc },
8001 #endif
8002         { NULL, NULL, NULL }
8003     };
8004 
8005     if (is_proc_myself(pathname, "exe")) {
8006         int execfd = qemu_getauxval(AT_EXECFD);
8007         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8008     }
8009 
8010     for (fake_open = fakes; fake_open->filename; fake_open++) {
8011         if (fake_open->cmp(pathname, fake_open->filename)) {
8012             break;
8013         }
8014     }
8015 
8016     if (fake_open->filename) {
8017         const char *tmpdir;
8018         char filename[PATH_MAX];
8019         int fd, r;
8020 
8021         /* create temporary file to map stat to */
8022         tmpdir = getenv("TMPDIR");
8023         if (!tmpdir)
8024             tmpdir = "/tmp";
8025         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8026         fd = mkstemp(filename);
8027         if (fd < 0) {
8028             return fd;
8029         }
8030         unlink(filename);
8031 
8032         if ((r = fake_open->fill(cpu_env, fd))) {
8033             int e = errno;
8034             close(fd);
8035             errno = e;
8036             return r;
8037         }
8038         lseek(fd, 0, SEEK_SET);
8039 
8040         return fd;
8041     }
8042 
8043     return safe_openat(dirfd, path(pathname), flags, mode);
8044 }
8045 
8046 #define TIMER_MAGIC 0x0caf0000
8047 #define TIMER_MAGIC_MASK 0xffff0000
8048 
8049 /* Convert QEMU provided timer ID back to internal 16bit index format */
8050 static target_timer_t get_timer_id(abi_long arg)
8051 {
8052     target_timer_t timerid = arg;
8053 
8054     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8055         return -TARGET_EINVAL;
8056     }
8057 
8058     timerid &= 0xffff;
8059 
8060     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8061         return -TARGET_EINVAL;
8062     }
8063 
8064     return timerid;
8065 }
8066 
8067 static int target_to_host_cpu_mask(unsigned long *host_mask,
8068                                    size_t host_size,
8069                                    abi_ulong target_addr,
8070                                    size_t target_size)
8071 {
8072     unsigned target_bits = sizeof(abi_ulong) * 8;
8073     unsigned host_bits = sizeof(*host_mask) * 8;
8074     abi_ulong *target_mask;
8075     unsigned i, j;
8076 
8077     assert(host_size >= target_size);
8078 
8079     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8080     if (!target_mask) {
8081         return -TARGET_EFAULT;
8082     }
8083     memset(host_mask, 0, host_size);
8084 
8085     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8086         unsigned bit = i * target_bits;
8087         abi_ulong val;
8088 
8089         __get_user(val, &target_mask[i]);
8090         for (j = 0; j < target_bits; j++, bit++) {
8091             if (val & (1UL << j)) {
8092                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8093             }
8094         }
8095     }
8096 
8097     unlock_user(target_mask, target_addr, 0);
8098     return 0;
8099 }
8100 
8101 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8102                                    size_t host_size,
8103                                    abi_ulong target_addr,
8104                                    size_t target_size)
8105 {
8106     unsigned target_bits = sizeof(abi_ulong) * 8;
8107     unsigned host_bits = sizeof(*host_mask) * 8;
8108     abi_ulong *target_mask;
8109     unsigned i, j;
8110 
8111     assert(host_size >= target_size);
8112 
8113     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8114     if (!target_mask) {
8115         return -TARGET_EFAULT;
8116     }
8117 
8118     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8119         unsigned bit = i * target_bits;
8120         abi_ulong val = 0;
8121 
8122         for (j = 0; j < target_bits; j++, bit++) {
8123             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8124                 val |= 1UL << j;
8125             }
8126         }
8127         __put_user(val, &target_mask[i]);
8128     }
8129 
8130     unlock_user(target_mask, target_addr, target_size);
8131     return 0;
8132 }
8133 
8134 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8135 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8136 #endif
8137 
8138 /* This is an internal helper for do_syscall so that it is easier
8139  * to have a single return point, so that actions, such as logging
8140  * of syscall results, can be performed.
8141  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8142  */
8143 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8144                             abi_long arg2, abi_long arg3, abi_long arg4,
8145                             abi_long arg5, abi_long arg6, abi_long arg7,
8146                             abi_long arg8)
8147 {
8148     CPUState *cpu = env_cpu(cpu_env);
8149     abi_long ret;
8150 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8151     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8152     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8153     || defined(TARGET_NR_statx)
8154     struct stat st;
8155 #endif
8156 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8157     || defined(TARGET_NR_fstatfs)
8158     struct statfs stfs;
8159 #endif
8160     void *p;
8161 
8162     switch(num) {
8163     case TARGET_NR_exit:
8164         /* In old applications this may be used to implement _exit(2).
8165            However in threaded applications it is used for thread termination,
8166            and _exit_group is used for application termination.
8167            Do thread termination if we have more then one thread.  */
8168 
8169         if (block_signals()) {
8170             return -TARGET_ERESTARTSYS;
8171         }
8172 
8173         pthread_mutex_lock(&clone_lock);
8174 
8175         if (CPU_NEXT(first_cpu)) {
8176             TaskState *ts = cpu->opaque;
8177 
8178             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8179             object_unref(OBJECT(cpu));
8180             /*
8181              * At this point the CPU should be unrealized and removed
8182              * from cpu lists. We can clean-up the rest of the thread
8183              * data without the lock held.
8184              */
8185 
8186             pthread_mutex_unlock(&clone_lock);
8187 
8188             if (ts->child_tidptr) {
8189                 put_user_u32(0, ts->child_tidptr);
8190                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8191                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8192             }
8193             thread_cpu = NULL;
8194             g_free(ts);
8195             rcu_unregister_thread();
8196             pthread_exit(NULL);
8197         }
8198 
8199         pthread_mutex_unlock(&clone_lock);
8200         preexit_cleanup(cpu_env, arg1);
8201         _exit(arg1);
8202         return 0; /* avoid warning */
8203     case TARGET_NR_read:
8204         if (arg2 == 0 && arg3 == 0) {
8205             return get_errno(safe_read(arg1, 0, 0));
8206         } else {
8207             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8208                 return -TARGET_EFAULT;
8209             ret = get_errno(safe_read(arg1, p, arg3));
8210             if (ret >= 0 &&
8211                 fd_trans_host_to_target_data(arg1)) {
8212                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8213             }
8214             unlock_user(p, arg2, ret);
8215         }
8216         return ret;
8217     case TARGET_NR_write:
8218         if (arg2 == 0 && arg3 == 0) {
8219             return get_errno(safe_write(arg1, 0, 0));
8220         }
8221         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8222             return -TARGET_EFAULT;
8223         if (fd_trans_target_to_host_data(arg1)) {
8224             void *copy = g_malloc(arg3);
8225             memcpy(copy, p, arg3);
8226             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8227             if (ret >= 0) {
8228                 ret = get_errno(safe_write(arg1, copy, ret));
8229             }
8230             g_free(copy);
8231         } else {
8232             ret = get_errno(safe_write(arg1, p, arg3));
8233         }
8234         unlock_user(p, arg2, 0);
8235         return ret;
8236 
8237 #ifdef TARGET_NR_open
8238     case TARGET_NR_open:
8239         if (!(p = lock_user_string(arg1)))
8240             return -TARGET_EFAULT;
8241         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8242                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8243                                   arg3));
8244         fd_trans_unregister(ret);
8245         unlock_user(p, arg1, 0);
8246         return ret;
8247 #endif
8248     case TARGET_NR_openat:
8249         if (!(p = lock_user_string(arg2)))
8250             return -TARGET_EFAULT;
8251         ret = get_errno(do_openat(cpu_env, arg1, p,
8252                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8253                                   arg4));
8254         fd_trans_unregister(ret);
8255         unlock_user(p, arg2, 0);
8256         return ret;
8257 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8258     case TARGET_NR_name_to_handle_at:
8259         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8260         return ret;
8261 #endif
8262 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8263     case TARGET_NR_open_by_handle_at:
8264         ret = do_open_by_handle_at(arg1, arg2, arg3);
8265         fd_trans_unregister(ret);
8266         return ret;
8267 #endif
8268     case TARGET_NR_close:
8269         fd_trans_unregister(arg1);
8270         return get_errno(close(arg1));
8271 
8272     case TARGET_NR_brk:
8273         return do_brk(arg1);
8274 #ifdef TARGET_NR_fork
8275     case TARGET_NR_fork:
8276         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8277 #endif
8278 #ifdef TARGET_NR_waitpid
8279     case TARGET_NR_waitpid:
8280         {
8281             int status;
8282             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8283             if (!is_error(ret) && arg2 && ret
8284                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8285                 return -TARGET_EFAULT;
8286         }
8287         return ret;
8288 #endif
8289 #ifdef TARGET_NR_waitid
8290     case TARGET_NR_waitid:
8291         {
8292             siginfo_t info;
8293             info.si_pid = 0;
8294             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8295             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8296                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8297                     return -TARGET_EFAULT;
8298                 host_to_target_siginfo(p, &info);
8299                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8300             }
8301         }
8302         return ret;
8303 #endif
8304 #ifdef TARGET_NR_creat /* not on alpha */
8305     case TARGET_NR_creat:
8306         if (!(p = lock_user_string(arg1)))
8307             return -TARGET_EFAULT;
8308         ret = get_errno(creat(p, arg2));
8309         fd_trans_unregister(ret);
8310         unlock_user(p, arg1, 0);
8311         return ret;
8312 #endif
8313 #ifdef TARGET_NR_link
8314     case TARGET_NR_link:
8315         {
8316             void * p2;
8317             p = lock_user_string(arg1);
8318             p2 = lock_user_string(arg2);
8319             if (!p || !p2)
8320                 ret = -TARGET_EFAULT;
8321             else
8322                 ret = get_errno(link(p, p2));
8323             unlock_user(p2, arg2, 0);
8324             unlock_user(p, arg1, 0);
8325         }
8326         return ret;
8327 #endif
8328 #if defined(TARGET_NR_linkat)
8329     case TARGET_NR_linkat:
8330         {
8331             void * p2 = NULL;
8332             if (!arg2 || !arg4)
8333                 return -TARGET_EFAULT;
8334             p  = lock_user_string(arg2);
8335             p2 = lock_user_string(arg4);
8336             if (!p || !p2)
8337                 ret = -TARGET_EFAULT;
8338             else
8339                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8340             unlock_user(p, arg2, 0);
8341             unlock_user(p2, arg4, 0);
8342         }
8343         return ret;
8344 #endif
8345 #ifdef TARGET_NR_unlink
8346     case TARGET_NR_unlink:
8347         if (!(p = lock_user_string(arg1)))
8348             return -TARGET_EFAULT;
8349         ret = get_errno(unlink(p));
8350         unlock_user(p, arg1, 0);
8351         return ret;
8352 #endif
8353 #if defined(TARGET_NR_unlinkat)
8354     case TARGET_NR_unlinkat:
8355         if (!(p = lock_user_string(arg2)))
8356             return -TARGET_EFAULT;
8357         ret = get_errno(unlinkat(arg1, p, arg3));
8358         unlock_user(p, arg2, 0);
8359         return ret;
8360 #endif
8361     case TARGET_NR_execve:
8362         {
8363             char **argp, **envp;
8364             int argc, envc;
8365             abi_ulong gp;
8366             abi_ulong guest_argp;
8367             abi_ulong guest_envp;
8368             abi_ulong addr;
8369             char **q;
8370 
8371             argc = 0;
8372             guest_argp = arg2;
8373             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8374                 if (get_user_ual(addr, gp))
8375                     return -TARGET_EFAULT;
8376                 if (!addr)
8377                     break;
8378                 argc++;
8379             }
8380             envc = 0;
8381             guest_envp = arg3;
8382             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8383                 if (get_user_ual(addr, gp))
8384                     return -TARGET_EFAULT;
8385                 if (!addr)
8386                     break;
8387                 envc++;
8388             }
8389 
8390             argp = g_new0(char *, argc + 1);
8391             envp = g_new0(char *, envc + 1);
8392 
8393             for (gp = guest_argp, q = argp; gp;
8394                   gp += sizeof(abi_ulong), q++) {
8395                 if (get_user_ual(addr, gp))
8396                     goto execve_efault;
8397                 if (!addr)
8398                     break;
8399                 if (!(*q = lock_user_string(addr)))
8400                     goto execve_efault;
8401             }
8402             *q = NULL;
8403 
8404             for (gp = guest_envp, q = envp; gp;
8405                   gp += sizeof(abi_ulong), q++) {
8406                 if (get_user_ual(addr, gp))
8407                     goto execve_efault;
8408                 if (!addr)
8409                     break;
8410                 if (!(*q = lock_user_string(addr)))
8411                     goto execve_efault;
8412             }
8413             *q = NULL;
8414 
8415             if (!(p = lock_user_string(arg1)))
8416                 goto execve_efault;
8417             /* Although execve() is not an interruptible syscall it is
8418              * a special case where we must use the safe_syscall wrapper:
8419              * if we allow a signal to happen before we make the host
8420              * syscall then we will 'lose' it, because at the point of
8421              * execve the process leaves QEMU's control. So we use the
8422              * safe syscall wrapper to ensure that we either take the
8423              * signal as a guest signal, or else it does not happen
8424              * before the execve completes and makes it the other
8425              * program's problem.
8426              */
8427             ret = get_errno(safe_execve(p, argp, envp));
8428             unlock_user(p, arg1, 0);
8429 
8430             goto execve_end;
8431 
8432         execve_efault:
8433             ret = -TARGET_EFAULT;
8434 
8435         execve_end:
8436             for (gp = guest_argp, q = argp; *q;
8437                   gp += sizeof(abi_ulong), q++) {
8438                 if (get_user_ual(addr, gp)
8439                     || !addr)
8440                     break;
8441                 unlock_user(*q, addr, 0);
8442             }
8443             for (gp = guest_envp, q = envp; *q;
8444                   gp += sizeof(abi_ulong), q++) {
8445                 if (get_user_ual(addr, gp)
8446                     || !addr)
8447                     break;
8448                 unlock_user(*q, addr, 0);
8449             }
8450 
8451             g_free(argp);
8452             g_free(envp);
8453         }
8454         return ret;
8455     case TARGET_NR_chdir:
8456         if (!(p = lock_user_string(arg1)))
8457             return -TARGET_EFAULT;
8458         ret = get_errno(chdir(p));
8459         unlock_user(p, arg1, 0);
8460         return ret;
8461 #ifdef TARGET_NR_time
8462     case TARGET_NR_time:
8463         {
8464             time_t host_time;
8465             ret = get_errno(time(&host_time));
8466             if (!is_error(ret)
8467                 && arg1
8468                 && put_user_sal(host_time, arg1))
8469                 return -TARGET_EFAULT;
8470         }
8471         return ret;
8472 #endif
8473 #ifdef TARGET_NR_mknod
8474     case TARGET_NR_mknod:
8475         if (!(p = lock_user_string(arg1)))
8476             return -TARGET_EFAULT;
8477         ret = get_errno(mknod(p, arg2, arg3));
8478         unlock_user(p, arg1, 0);
8479         return ret;
8480 #endif
8481 #if defined(TARGET_NR_mknodat)
8482     case TARGET_NR_mknodat:
8483         if (!(p = lock_user_string(arg2)))
8484             return -TARGET_EFAULT;
8485         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8486         unlock_user(p, arg2, 0);
8487         return ret;
8488 #endif
8489 #ifdef TARGET_NR_chmod
8490     case TARGET_NR_chmod:
8491         if (!(p = lock_user_string(arg1)))
8492             return -TARGET_EFAULT;
8493         ret = get_errno(chmod(p, arg2));
8494         unlock_user(p, arg1, 0);
8495         return ret;
8496 #endif
8497 #ifdef TARGET_NR_lseek
8498     case TARGET_NR_lseek:
8499         return get_errno(lseek(arg1, arg2, arg3));
8500 #endif
8501 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8502     /* Alpha specific */
8503     case TARGET_NR_getxpid:
8504         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8505         return get_errno(getpid());
8506 #endif
8507 #ifdef TARGET_NR_getpid
8508     case TARGET_NR_getpid:
8509         return get_errno(getpid());
8510 #endif
8511     case TARGET_NR_mount:
8512         {
8513             /* need to look at the data field */
8514             void *p2, *p3;
8515 
8516             if (arg1) {
8517                 p = lock_user_string(arg1);
8518                 if (!p) {
8519                     return -TARGET_EFAULT;
8520                 }
8521             } else {
8522                 p = NULL;
8523             }
8524 
8525             p2 = lock_user_string(arg2);
8526             if (!p2) {
8527                 if (arg1) {
8528                     unlock_user(p, arg1, 0);
8529                 }
8530                 return -TARGET_EFAULT;
8531             }
8532 
8533             if (arg3) {
8534                 p3 = lock_user_string(arg3);
8535                 if (!p3) {
8536                     if (arg1) {
8537                         unlock_user(p, arg1, 0);
8538                     }
8539                     unlock_user(p2, arg2, 0);
8540                     return -TARGET_EFAULT;
8541                 }
8542             } else {
8543                 p3 = NULL;
8544             }
8545 
8546             /* FIXME - arg5 should be locked, but it isn't clear how to
8547              * do that since it's not guaranteed to be a NULL-terminated
8548              * string.
8549              */
8550             if (!arg5) {
8551                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8552             } else {
8553                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8554             }
8555             ret = get_errno(ret);
8556 
8557             if (arg1) {
8558                 unlock_user(p, arg1, 0);
8559             }
8560             unlock_user(p2, arg2, 0);
8561             if (arg3) {
8562                 unlock_user(p3, arg3, 0);
8563             }
8564         }
8565         return ret;
8566 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8567 #if defined(TARGET_NR_umount)
8568     case TARGET_NR_umount:
8569 #endif
8570 #if defined(TARGET_NR_oldumount)
8571     case TARGET_NR_oldumount:
8572 #endif
8573         if (!(p = lock_user_string(arg1)))
8574             return -TARGET_EFAULT;
8575         ret = get_errno(umount(p));
8576         unlock_user(p, arg1, 0);
8577         return ret;
8578 #endif
8579 #ifdef TARGET_NR_stime /* not on alpha */
8580     case TARGET_NR_stime:
8581         {
8582             struct timespec ts;
8583             ts.tv_nsec = 0;
8584             if (get_user_sal(ts.tv_sec, arg1)) {
8585                 return -TARGET_EFAULT;
8586             }
8587             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8588         }
8589 #endif
8590 #ifdef TARGET_NR_alarm /* not on alpha */
8591     case TARGET_NR_alarm:
8592         return alarm(arg1);
8593 #endif
8594 #ifdef TARGET_NR_pause /* not on alpha */
8595     case TARGET_NR_pause:
8596         if (!block_signals()) {
8597             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8598         }
8599         return -TARGET_EINTR;
8600 #endif
8601 #ifdef TARGET_NR_utime
8602     case TARGET_NR_utime:
8603         {
8604             struct utimbuf tbuf, *host_tbuf;
8605             struct target_utimbuf *target_tbuf;
8606             if (arg2) {
8607                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8608                     return -TARGET_EFAULT;
8609                 tbuf.actime = tswapal(target_tbuf->actime);
8610                 tbuf.modtime = tswapal(target_tbuf->modtime);
8611                 unlock_user_struct(target_tbuf, arg2, 0);
8612                 host_tbuf = &tbuf;
8613             } else {
8614                 host_tbuf = NULL;
8615             }
8616             if (!(p = lock_user_string(arg1)))
8617                 return -TARGET_EFAULT;
8618             ret = get_errno(utime(p, host_tbuf));
8619             unlock_user(p, arg1, 0);
8620         }
8621         return ret;
8622 #endif
8623 #ifdef TARGET_NR_utimes
8624     case TARGET_NR_utimes:
8625         {
8626             struct timeval *tvp, tv[2];
8627             if (arg2) {
8628                 if (copy_from_user_timeval(&tv[0], arg2)
8629                     || copy_from_user_timeval(&tv[1],
8630                                               arg2 + sizeof(struct target_timeval)))
8631                     return -TARGET_EFAULT;
8632                 tvp = tv;
8633             } else {
8634                 tvp = NULL;
8635             }
8636             if (!(p = lock_user_string(arg1)))
8637                 return -TARGET_EFAULT;
8638             ret = get_errno(utimes(p, tvp));
8639             unlock_user(p, arg1, 0);
8640         }
8641         return ret;
8642 #endif
8643 #if defined(TARGET_NR_futimesat)
8644     case TARGET_NR_futimesat:
8645         {
8646             struct timeval *tvp, tv[2];
8647             if (arg3) {
8648                 if (copy_from_user_timeval(&tv[0], arg3)
8649                     || copy_from_user_timeval(&tv[1],
8650                                               arg3 + sizeof(struct target_timeval)))
8651                     return -TARGET_EFAULT;
8652                 tvp = tv;
8653             } else {
8654                 tvp = NULL;
8655             }
8656             if (!(p = lock_user_string(arg2))) {
8657                 return -TARGET_EFAULT;
8658             }
8659             ret = get_errno(futimesat(arg1, path(p), tvp));
8660             unlock_user(p, arg2, 0);
8661         }
8662         return ret;
8663 #endif
8664 #ifdef TARGET_NR_access
8665     case TARGET_NR_access:
8666         if (!(p = lock_user_string(arg1))) {
8667             return -TARGET_EFAULT;
8668         }
8669         ret = get_errno(access(path(p), arg2));
8670         unlock_user(p, arg1, 0);
8671         return ret;
8672 #endif
8673 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8674     case TARGET_NR_faccessat:
8675         if (!(p = lock_user_string(arg2))) {
8676             return -TARGET_EFAULT;
8677         }
8678         ret = get_errno(faccessat(arg1, p, arg3, 0));
8679         unlock_user(p, arg2, 0);
8680         return ret;
8681 #endif
8682 #ifdef TARGET_NR_nice /* not on alpha */
8683     case TARGET_NR_nice:
8684         return get_errno(nice(arg1));
8685 #endif
8686     case TARGET_NR_sync:
8687         sync();
8688         return 0;
8689 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8690     case TARGET_NR_syncfs:
8691         return get_errno(syncfs(arg1));
8692 #endif
8693     case TARGET_NR_kill:
8694         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8695 #ifdef TARGET_NR_rename
8696     case TARGET_NR_rename:
8697         {
8698             void *p2;
8699             p = lock_user_string(arg1);
8700             p2 = lock_user_string(arg2);
8701             if (!p || !p2)
8702                 ret = -TARGET_EFAULT;
8703             else
8704                 ret = get_errno(rename(p, p2));
8705             unlock_user(p2, arg2, 0);
8706             unlock_user(p, arg1, 0);
8707         }
8708         return ret;
8709 #endif
8710 #if defined(TARGET_NR_renameat)
8711     case TARGET_NR_renameat:
8712         {
8713             void *p2;
8714             p  = lock_user_string(arg2);
8715             p2 = lock_user_string(arg4);
8716             if (!p || !p2)
8717                 ret = -TARGET_EFAULT;
8718             else
8719                 ret = get_errno(renameat(arg1, p, arg3, p2));
8720             unlock_user(p2, arg4, 0);
8721             unlock_user(p, arg2, 0);
8722         }
8723         return ret;
8724 #endif
8725 #if defined(TARGET_NR_renameat2)
8726     case TARGET_NR_renameat2:
8727         {
8728             void *p2;
8729             p  = lock_user_string(arg2);
8730             p2 = lock_user_string(arg4);
8731             if (!p || !p2) {
8732                 ret = -TARGET_EFAULT;
8733             } else {
8734                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8735             }
8736             unlock_user(p2, arg4, 0);
8737             unlock_user(p, arg2, 0);
8738         }
8739         return ret;
8740 #endif
8741 #ifdef TARGET_NR_mkdir
8742     case TARGET_NR_mkdir:
8743         if (!(p = lock_user_string(arg1)))
8744             return -TARGET_EFAULT;
8745         ret = get_errno(mkdir(p, arg2));
8746         unlock_user(p, arg1, 0);
8747         return ret;
8748 #endif
8749 #if defined(TARGET_NR_mkdirat)
8750     case TARGET_NR_mkdirat:
8751         if (!(p = lock_user_string(arg2)))
8752             return -TARGET_EFAULT;
8753         ret = get_errno(mkdirat(arg1, p, arg3));
8754         unlock_user(p, arg2, 0);
8755         return ret;
8756 #endif
8757 #ifdef TARGET_NR_rmdir
8758     case TARGET_NR_rmdir:
8759         if (!(p = lock_user_string(arg1)))
8760             return -TARGET_EFAULT;
8761         ret = get_errno(rmdir(p));
8762         unlock_user(p, arg1, 0);
8763         return ret;
8764 #endif
8765     case TARGET_NR_dup:
8766         ret = get_errno(dup(arg1));
8767         if (ret >= 0) {
8768             fd_trans_dup(arg1, ret);
8769         }
8770         return ret;
8771 #ifdef TARGET_NR_pipe
8772     case TARGET_NR_pipe:
8773         return do_pipe(cpu_env, arg1, 0, 0);
8774 #endif
8775 #ifdef TARGET_NR_pipe2
8776     case TARGET_NR_pipe2:
8777         return do_pipe(cpu_env, arg1,
8778                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8779 #endif
8780     case TARGET_NR_times:
8781         {
8782             struct target_tms *tmsp;
8783             struct tms tms;
8784             ret = get_errno(times(&tms));
8785             if (arg1) {
8786                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8787                 if (!tmsp)
8788                     return -TARGET_EFAULT;
8789                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8790                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8791                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8792                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8793             }
8794             if (!is_error(ret))
8795                 ret = host_to_target_clock_t(ret);
8796         }
8797         return ret;
8798     case TARGET_NR_acct:
8799         if (arg1 == 0) {
8800             ret = get_errno(acct(NULL));
8801         } else {
8802             if (!(p = lock_user_string(arg1))) {
8803                 return -TARGET_EFAULT;
8804             }
8805             ret = get_errno(acct(path(p)));
8806             unlock_user(p, arg1, 0);
8807         }
8808         return ret;
8809 #ifdef TARGET_NR_umount2
8810     case TARGET_NR_umount2:
8811         if (!(p = lock_user_string(arg1)))
8812             return -TARGET_EFAULT;
8813         ret = get_errno(umount2(p, arg2));
8814         unlock_user(p, arg1, 0);
8815         return ret;
8816 #endif
8817     case TARGET_NR_ioctl:
8818         return do_ioctl(arg1, arg2, arg3);
8819 #ifdef TARGET_NR_fcntl
8820     case TARGET_NR_fcntl:
8821         return do_fcntl(arg1, arg2, arg3);
8822 #endif
8823     case TARGET_NR_setpgid:
8824         return get_errno(setpgid(arg1, arg2));
8825     case TARGET_NR_umask:
8826         return get_errno(umask(arg1));
8827     case TARGET_NR_chroot:
8828         if (!(p = lock_user_string(arg1)))
8829             return -TARGET_EFAULT;
8830         ret = get_errno(chroot(p));
8831         unlock_user(p, arg1, 0);
8832         return ret;
8833 #ifdef TARGET_NR_dup2
8834     case TARGET_NR_dup2:
8835         ret = get_errno(dup2(arg1, arg2));
8836         if (ret >= 0) {
8837             fd_trans_dup(arg1, arg2);
8838         }
8839         return ret;
8840 #endif
8841 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8842     case TARGET_NR_dup3:
8843     {
8844         int host_flags;
8845 
8846         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8847             return -EINVAL;
8848         }
8849         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8850         ret = get_errno(dup3(arg1, arg2, host_flags));
8851         if (ret >= 0) {
8852             fd_trans_dup(arg1, arg2);
8853         }
8854         return ret;
8855     }
8856 #endif
8857 #ifdef TARGET_NR_getppid /* not on alpha */
8858     case TARGET_NR_getppid:
8859         return get_errno(getppid());
8860 #endif
8861 #ifdef TARGET_NR_getpgrp
8862     case TARGET_NR_getpgrp:
8863         return get_errno(getpgrp());
8864 #endif
8865     case TARGET_NR_setsid:
8866         return get_errno(setsid());
8867 #ifdef TARGET_NR_sigaction
8868     case TARGET_NR_sigaction:
8869         {
8870 #if defined(TARGET_MIPS)
8871 	    struct target_sigaction act, oact, *pact, *old_act;
8872 
8873 	    if (arg2) {
8874                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8875                     return -TARGET_EFAULT;
8876 		act._sa_handler = old_act->_sa_handler;
8877 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8878 		act.sa_flags = old_act->sa_flags;
8879 		unlock_user_struct(old_act, arg2, 0);
8880 		pact = &act;
8881 	    } else {
8882 		pact = NULL;
8883 	    }
8884 
8885         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8886 
8887 	    if (!is_error(ret) && arg3) {
8888                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8889                     return -TARGET_EFAULT;
8890 		old_act->_sa_handler = oact._sa_handler;
8891 		old_act->sa_flags = oact.sa_flags;
8892 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8893 		old_act->sa_mask.sig[1] = 0;
8894 		old_act->sa_mask.sig[2] = 0;
8895 		old_act->sa_mask.sig[3] = 0;
8896 		unlock_user_struct(old_act, arg3, 1);
8897 	    }
8898 #else
8899             struct target_old_sigaction *old_act;
8900             struct target_sigaction act, oact, *pact;
8901             if (arg2) {
8902                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8903                     return -TARGET_EFAULT;
8904                 act._sa_handler = old_act->_sa_handler;
8905                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8906                 act.sa_flags = old_act->sa_flags;
8907 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8908                 act.sa_restorer = old_act->sa_restorer;
8909 #endif
8910                 unlock_user_struct(old_act, arg2, 0);
8911                 pact = &act;
8912             } else {
8913                 pact = NULL;
8914             }
8915             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8916             if (!is_error(ret) && arg3) {
8917                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8918                     return -TARGET_EFAULT;
8919                 old_act->_sa_handler = oact._sa_handler;
8920                 old_act->sa_mask = oact.sa_mask.sig[0];
8921                 old_act->sa_flags = oact.sa_flags;
8922 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8923                 old_act->sa_restorer = oact.sa_restorer;
8924 #endif
8925                 unlock_user_struct(old_act, arg3, 1);
8926             }
8927 #endif
8928         }
8929         return ret;
8930 #endif
8931     case TARGET_NR_rt_sigaction:
8932         {
8933             /*
8934              * For Alpha and SPARC this is a 5 argument syscall, with
8935              * a 'restorer' parameter which must be copied into the
8936              * sa_restorer field of the sigaction struct.
8937              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8938              * and arg5 is the sigsetsize.
8939              */
8940 #if defined(TARGET_ALPHA)
8941             target_ulong sigsetsize = arg4;
8942             target_ulong restorer = arg5;
8943 #elif defined(TARGET_SPARC)
8944             target_ulong restorer = arg4;
8945             target_ulong sigsetsize = arg5;
8946 #else
8947             target_ulong sigsetsize = arg4;
8948             target_ulong restorer = 0;
8949 #endif
8950             struct target_sigaction *act = NULL;
8951             struct target_sigaction *oact = NULL;
8952 
8953             if (sigsetsize != sizeof(target_sigset_t)) {
8954                 return -TARGET_EINVAL;
8955             }
8956             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8957                 return -TARGET_EFAULT;
8958             }
8959             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8960                 ret = -TARGET_EFAULT;
8961             } else {
8962                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
8963                 if (oact) {
8964                     unlock_user_struct(oact, arg3, 1);
8965                 }
8966             }
8967             if (act) {
8968                 unlock_user_struct(act, arg2, 0);
8969             }
8970         }
8971         return ret;
8972 #ifdef TARGET_NR_sgetmask /* not on alpha */
8973     case TARGET_NR_sgetmask:
8974         {
8975             sigset_t cur_set;
8976             abi_ulong target_set;
8977             ret = do_sigprocmask(0, NULL, &cur_set);
8978             if (!ret) {
8979                 host_to_target_old_sigset(&target_set, &cur_set);
8980                 ret = target_set;
8981             }
8982         }
8983         return ret;
8984 #endif
8985 #ifdef TARGET_NR_ssetmask /* not on alpha */
8986     case TARGET_NR_ssetmask:
8987         {
8988             sigset_t set, oset;
8989             abi_ulong target_set = arg1;
8990             target_to_host_old_sigset(&set, &target_set);
8991             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8992             if (!ret) {
8993                 host_to_target_old_sigset(&target_set, &oset);
8994                 ret = target_set;
8995             }
8996         }
8997         return ret;
8998 #endif
8999 #ifdef TARGET_NR_sigprocmask
9000     case TARGET_NR_sigprocmask:
9001         {
9002 #if defined(TARGET_ALPHA)
9003             sigset_t set, oldset;
9004             abi_ulong mask;
9005             int how;
9006 
9007             switch (arg1) {
9008             case TARGET_SIG_BLOCK:
9009                 how = SIG_BLOCK;
9010                 break;
9011             case TARGET_SIG_UNBLOCK:
9012                 how = SIG_UNBLOCK;
9013                 break;
9014             case TARGET_SIG_SETMASK:
9015                 how = SIG_SETMASK;
9016                 break;
9017             default:
9018                 return -TARGET_EINVAL;
9019             }
9020             mask = arg2;
9021             target_to_host_old_sigset(&set, &mask);
9022 
9023             ret = do_sigprocmask(how, &set, &oldset);
9024             if (!is_error(ret)) {
9025                 host_to_target_old_sigset(&mask, &oldset);
9026                 ret = mask;
9027                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9028             }
9029 #else
9030             sigset_t set, oldset, *set_ptr;
9031             int how;
9032 
9033             if (arg2) {
9034                 switch (arg1) {
9035                 case TARGET_SIG_BLOCK:
9036                     how = SIG_BLOCK;
9037                     break;
9038                 case TARGET_SIG_UNBLOCK:
9039                     how = SIG_UNBLOCK;
9040                     break;
9041                 case TARGET_SIG_SETMASK:
9042                     how = SIG_SETMASK;
9043                     break;
9044                 default:
9045                     return -TARGET_EINVAL;
9046                 }
9047                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9048                     return -TARGET_EFAULT;
9049                 target_to_host_old_sigset(&set, p);
9050                 unlock_user(p, arg2, 0);
9051                 set_ptr = &set;
9052             } else {
9053                 how = 0;
9054                 set_ptr = NULL;
9055             }
9056             ret = do_sigprocmask(how, set_ptr, &oldset);
9057             if (!is_error(ret) && arg3) {
9058                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9059                     return -TARGET_EFAULT;
9060                 host_to_target_old_sigset(p, &oldset);
9061                 unlock_user(p, arg3, sizeof(target_sigset_t));
9062             }
9063 #endif
9064         }
9065         return ret;
9066 #endif
9067     case TARGET_NR_rt_sigprocmask:
9068         {
9069             int how = arg1;
9070             sigset_t set, oldset, *set_ptr;
9071 
9072             if (arg4 != sizeof(target_sigset_t)) {
9073                 return -TARGET_EINVAL;
9074             }
9075 
9076             if (arg2) {
9077                 switch(how) {
9078                 case TARGET_SIG_BLOCK:
9079                     how = SIG_BLOCK;
9080                     break;
9081                 case TARGET_SIG_UNBLOCK:
9082                     how = SIG_UNBLOCK;
9083                     break;
9084                 case TARGET_SIG_SETMASK:
9085                     how = SIG_SETMASK;
9086                     break;
9087                 default:
9088                     return -TARGET_EINVAL;
9089                 }
9090                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9091                     return -TARGET_EFAULT;
9092                 target_to_host_sigset(&set, p);
9093                 unlock_user(p, arg2, 0);
9094                 set_ptr = &set;
9095             } else {
9096                 how = 0;
9097                 set_ptr = NULL;
9098             }
9099             ret = do_sigprocmask(how, set_ptr, &oldset);
9100             if (!is_error(ret) && arg3) {
9101                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9102                     return -TARGET_EFAULT;
9103                 host_to_target_sigset(p, &oldset);
9104                 unlock_user(p, arg3, sizeof(target_sigset_t));
9105             }
9106         }
9107         return ret;
9108 #ifdef TARGET_NR_sigpending
9109     case TARGET_NR_sigpending:
9110         {
9111             sigset_t set;
9112             ret = get_errno(sigpending(&set));
9113             if (!is_error(ret)) {
9114                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9115                     return -TARGET_EFAULT;
9116                 host_to_target_old_sigset(p, &set);
9117                 unlock_user(p, arg1, sizeof(target_sigset_t));
9118             }
9119         }
9120         return ret;
9121 #endif
9122     case TARGET_NR_rt_sigpending:
9123         {
9124             sigset_t set;
9125 
9126             /* Yes, this check is >, not != like most. We follow the kernel's
9127              * logic and it does it like this because it implements
9128              * NR_sigpending through the same code path, and in that case
9129              * the old_sigset_t is smaller in size.
9130              */
9131             if (arg2 > sizeof(target_sigset_t)) {
9132                 return -TARGET_EINVAL;
9133             }
9134 
9135             ret = get_errno(sigpending(&set));
9136             if (!is_error(ret)) {
9137                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9138                     return -TARGET_EFAULT;
9139                 host_to_target_sigset(p, &set);
9140                 unlock_user(p, arg1, sizeof(target_sigset_t));
9141             }
9142         }
9143         return ret;
9144 #ifdef TARGET_NR_sigsuspend
9145     case TARGET_NR_sigsuspend:
9146         {
9147             TaskState *ts = cpu->opaque;
9148 #if defined(TARGET_ALPHA)
9149             abi_ulong mask = arg1;
9150             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9151 #else
9152             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9153                 return -TARGET_EFAULT;
9154             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9155             unlock_user(p, arg1, 0);
9156 #endif
9157             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9158                                                SIGSET_T_SIZE));
9159             if (ret != -TARGET_ERESTARTSYS) {
9160                 ts->in_sigsuspend = 1;
9161             }
9162         }
9163         return ret;
9164 #endif
9165     case TARGET_NR_rt_sigsuspend:
9166         {
9167             TaskState *ts = cpu->opaque;
9168 
9169             if (arg2 != sizeof(target_sigset_t)) {
9170                 return -TARGET_EINVAL;
9171             }
9172             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9173                 return -TARGET_EFAULT;
9174             target_to_host_sigset(&ts->sigsuspend_mask, p);
9175             unlock_user(p, arg1, 0);
9176             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9177                                                SIGSET_T_SIZE));
9178             if (ret != -TARGET_ERESTARTSYS) {
9179                 ts->in_sigsuspend = 1;
9180             }
9181         }
9182         return ret;
9183 #ifdef TARGET_NR_rt_sigtimedwait
9184     case TARGET_NR_rt_sigtimedwait:
9185         {
9186             sigset_t set;
9187             struct timespec uts, *puts;
9188             siginfo_t uinfo;
9189 
9190             if (arg4 != sizeof(target_sigset_t)) {
9191                 return -TARGET_EINVAL;
9192             }
9193 
9194             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9195                 return -TARGET_EFAULT;
9196             target_to_host_sigset(&set, p);
9197             unlock_user(p, arg1, 0);
9198             if (arg3) {
9199                 puts = &uts;
9200                 if (target_to_host_timespec(puts, arg3)) {
9201                     return -TARGET_EFAULT;
9202                 }
9203             } else {
9204                 puts = NULL;
9205             }
9206             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9207                                                  SIGSET_T_SIZE));
9208             if (!is_error(ret)) {
9209                 if (arg2) {
9210                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9211                                   0);
9212                     if (!p) {
9213                         return -TARGET_EFAULT;
9214                     }
9215                     host_to_target_siginfo(p, &uinfo);
9216                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9217                 }
9218                 ret = host_to_target_signal(ret);
9219             }
9220         }
9221         return ret;
9222 #endif
9223 #ifdef TARGET_NR_rt_sigtimedwait_time64
9224     case TARGET_NR_rt_sigtimedwait_time64:
9225         {
9226             sigset_t set;
9227             struct timespec uts, *puts;
9228             siginfo_t uinfo;
9229 
9230             if (arg4 != sizeof(target_sigset_t)) {
9231                 return -TARGET_EINVAL;
9232             }
9233 
9234             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9235             if (!p) {
9236                 return -TARGET_EFAULT;
9237             }
9238             target_to_host_sigset(&set, p);
9239             unlock_user(p, arg1, 0);
9240             if (arg3) {
9241                 puts = &uts;
9242                 if (target_to_host_timespec64(puts, arg3)) {
9243                     return -TARGET_EFAULT;
9244                 }
9245             } else {
9246                 puts = NULL;
9247             }
9248             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9249                                                  SIGSET_T_SIZE));
9250             if (!is_error(ret)) {
9251                 if (arg2) {
9252                     p = lock_user(VERIFY_WRITE, arg2,
9253                                   sizeof(target_siginfo_t), 0);
9254                     if (!p) {
9255                         return -TARGET_EFAULT;
9256                     }
9257                     host_to_target_siginfo(p, &uinfo);
9258                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9259                 }
9260                 ret = host_to_target_signal(ret);
9261             }
9262         }
9263         return ret;
9264 #endif
9265     case TARGET_NR_rt_sigqueueinfo:
9266         {
9267             siginfo_t uinfo;
9268 
9269             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9270             if (!p) {
9271                 return -TARGET_EFAULT;
9272             }
9273             target_to_host_siginfo(&uinfo, p);
9274             unlock_user(p, arg3, 0);
9275             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9276         }
9277         return ret;
9278     case TARGET_NR_rt_tgsigqueueinfo:
9279         {
9280             siginfo_t uinfo;
9281 
9282             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9283             if (!p) {
9284                 return -TARGET_EFAULT;
9285             }
9286             target_to_host_siginfo(&uinfo, p);
9287             unlock_user(p, arg4, 0);
9288             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9289         }
9290         return ret;
9291 #ifdef TARGET_NR_sigreturn
9292     case TARGET_NR_sigreturn:
9293         if (block_signals()) {
9294             return -TARGET_ERESTARTSYS;
9295         }
9296         return do_sigreturn(cpu_env);
9297 #endif
9298     case TARGET_NR_rt_sigreturn:
9299         if (block_signals()) {
9300             return -TARGET_ERESTARTSYS;
9301         }
9302         return do_rt_sigreturn(cpu_env);
9303     case TARGET_NR_sethostname:
9304         if (!(p = lock_user_string(arg1)))
9305             return -TARGET_EFAULT;
9306         ret = get_errno(sethostname(p, arg2));
9307         unlock_user(p, arg1, 0);
9308         return ret;
9309 #ifdef TARGET_NR_setrlimit
9310     case TARGET_NR_setrlimit:
9311         {
9312             int resource = target_to_host_resource(arg1);
9313             struct target_rlimit *target_rlim;
9314             struct rlimit rlim;
9315             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9316                 return -TARGET_EFAULT;
9317             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9318             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9319             unlock_user_struct(target_rlim, arg2, 0);
9320             /*
9321              * If we just passed through resource limit settings for memory then
9322              * they would also apply to QEMU's own allocations, and QEMU will
9323              * crash or hang or die if its allocations fail. Ideally we would
9324              * track the guest allocations in QEMU and apply the limits ourselves.
9325              * For now, just tell the guest the call succeeded but don't actually
9326              * limit anything.
9327              */
9328             if (resource != RLIMIT_AS &&
9329                 resource != RLIMIT_DATA &&
9330                 resource != RLIMIT_STACK) {
9331                 return get_errno(setrlimit(resource, &rlim));
9332             } else {
9333                 return 0;
9334             }
9335         }
9336 #endif
9337 #ifdef TARGET_NR_getrlimit
9338     case TARGET_NR_getrlimit:
9339         {
9340             int resource = target_to_host_resource(arg1);
9341             struct target_rlimit *target_rlim;
9342             struct rlimit rlim;
9343 
9344             ret = get_errno(getrlimit(resource, &rlim));
9345             if (!is_error(ret)) {
9346                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9347                     return -TARGET_EFAULT;
9348                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9349                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9350                 unlock_user_struct(target_rlim, arg2, 1);
9351             }
9352         }
9353         return ret;
9354 #endif
9355     case TARGET_NR_getrusage:
9356         {
9357             struct rusage rusage;
9358             ret = get_errno(getrusage(arg1, &rusage));
9359             if (!is_error(ret)) {
9360                 ret = host_to_target_rusage(arg2, &rusage);
9361             }
9362         }
9363         return ret;
9364 #if defined(TARGET_NR_gettimeofday)
9365     case TARGET_NR_gettimeofday:
9366         {
9367             struct timeval tv;
9368             struct timezone tz;
9369 
9370             ret = get_errno(gettimeofday(&tv, &tz));
9371             if (!is_error(ret)) {
9372                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9373                     return -TARGET_EFAULT;
9374                 }
9375                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9376                     return -TARGET_EFAULT;
9377                 }
9378             }
9379         }
9380         return ret;
9381 #endif
9382 #if defined(TARGET_NR_settimeofday)
9383     case TARGET_NR_settimeofday:
9384         {
9385             struct timeval tv, *ptv = NULL;
9386             struct timezone tz, *ptz = NULL;
9387 
9388             if (arg1) {
9389                 if (copy_from_user_timeval(&tv, arg1)) {
9390                     return -TARGET_EFAULT;
9391                 }
9392                 ptv = &tv;
9393             }
9394 
9395             if (arg2) {
9396                 if (copy_from_user_timezone(&tz, arg2)) {
9397                     return -TARGET_EFAULT;
9398                 }
9399                 ptz = &tz;
9400             }
9401 
9402             return get_errno(settimeofday(ptv, ptz));
9403         }
9404 #endif
9405 #if defined(TARGET_NR_select)
9406     case TARGET_NR_select:
9407 #if defined(TARGET_WANT_NI_OLD_SELECT)
9408         /* some architectures used to have old_select here
9409          * but now ENOSYS it.
9410          */
9411         ret = -TARGET_ENOSYS;
9412 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9413         ret = do_old_select(arg1);
9414 #else
9415         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9416 #endif
9417         return ret;
9418 #endif
9419 #ifdef TARGET_NR_pselect6
9420     case TARGET_NR_pselect6:
9421         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9422 #endif
9423 #ifdef TARGET_NR_pselect6_time64
9424     case TARGET_NR_pselect6_time64:
9425         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9426 #endif
9427 #ifdef TARGET_NR_symlink
9428     case TARGET_NR_symlink:
9429         {
9430             void *p2;
9431             p = lock_user_string(arg1);
9432             p2 = lock_user_string(arg2);
9433             if (!p || !p2)
9434                 ret = -TARGET_EFAULT;
9435             else
9436                 ret = get_errno(symlink(p, p2));
9437             unlock_user(p2, arg2, 0);
9438             unlock_user(p, arg1, 0);
9439         }
9440         return ret;
9441 #endif
9442 #if defined(TARGET_NR_symlinkat)
9443     case TARGET_NR_symlinkat:
9444         {
9445             void *p2;
9446             p  = lock_user_string(arg1);
9447             p2 = lock_user_string(arg3);
9448             if (!p || !p2)
9449                 ret = -TARGET_EFAULT;
9450             else
9451                 ret = get_errno(symlinkat(p, arg2, p2));
9452             unlock_user(p2, arg3, 0);
9453             unlock_user(p, arg1, 0);
9454         }
9455         return ret;
9456 #endif
9457 #ifdef TARGET_NR_readlink
9458     case TARGET_NR_readlink:
9459         {
9460             void *p2;
9461             p = lock_user_string(arg1);
9462             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9463             if (!p || !p2) {
9464                 ret = -TARGET_EFAULT;
9465             } else if (!arg3) {
9466                 /* Short circuit this for the magic exe check. */
9467                 ret = -TARGET_EINVAL;
9468             } else if (is_proc_myself((const char *)p, "exe")) {
9469                 char real[PATH_MAX], *temp;
9470                 temp = realpath(exec_path, real);
9471                 /* Return value is # of bytes that we wrote to the buffer. */
9472                 if (temp == NULL) {
9473                     ret = get_errno(-1);
9474                 } else {
9475                     /* Don't worry about sign mismatch as earlier mapping
9476                      * logic would have thrown a bad address error. */
9477                     ret = MIN(strlen(real), arg3);
9478                     /* We cannot NUL terminate the string. */
9479                     memcpy(p2, real, ret);
9480                 }
9481             } else {
9482                 ret = get_errno(readlink(path(p), p2, arg3));
9483             }
9484             unlock_user(p2, arg2, ret);
9485             unlock_user(p, arg1, 0);
9486         }
9487         return ret;
9488 #endif
9489 #if defined(TARGET_NR_readlinkat)
9490     case TARGET_NR_readlinkat:
9491         {
9492             void *p2;
9493             p  = lock_user_string(arg2);
9494             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9495             if (!p || !p2) {
9496                 ret = -TARGET_EFAULT;
9497             } else if (is_proc_myself((const char *)p, "exe")) {
9498                 char real[PATH_MAX], *temp;
9499                 temp = realpath(exec_path, real);
9500                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9501                 snprintf((char *)p2, arg4, "%s", real);
9502             } else {
9503                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9504             }
9505             unlock_user(p2, arg3, ret);
9506             unlock_user(p, arg2, 0);
9507         }
9508         return ret;
9509 #endif
9510 #ifdef TARGET_NR_swapon
9511     case TARGET_NR_swapon:
9512         if (!(p = lock_user_string(arg1)))
9513             return -TARGET_EFAULT;
9514         ret = get_errno(swapon(p, arg2));
9515         unlock_user(p, arg1, 0);
9516         return ret;
9517 #endif
9518     case TARGET_NR_reboot:
9519         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9520            /* arg4 must be ignored in all other cases */
9521            p = lock_user_string(arg4);
9522            if (!p) {
9523                return -TARGET_EFAULT;
9524            }
9525            ret = get_errno(reboot(arg1, arg2, arg3, p));
9526            unlock_user(p, arg4, 0);
9527         } else {
9528            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9529         }
9530         return ret;
9531 #ifdef TARGET_NR_mmap
9532     case TARGET_NR_mmap:
9533 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9534     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9535     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9536     || defined(TARGET_S390X)
9537         {
9538             abi_ulong *v;
9539             abi_ulong v1, v2, v3, v4, v5, v6;
9540             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9541                 return -TARGET_EFAULT;
9542             v1 = tswapal(v[0]);
9543             v2 = tswapal(v[1]);
9544             v3 = tswapal(v[2]);
9545             v4 = tswapal(v[3]);
9546             v5 = tswapal(v[4]);
9547             v6 = tswapal(v[5]);
9548             unlock_user(v, arg1, 0);
9549             ret = get_errno(target_mmap(v1, v2, v3,
9550                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9551                                         v5, v6));
9552         }
9553 #else
9554         /* mmap pointers are always untagged */
9555         ret = get_errno(target_mmap(arg1, arg2, arg3,
9556                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9557                                     arg5,
9558                                     arg6));
9559 #endif
9560         return ret;
9561 #endif
9562 #ifdef TARGET_NR_mmap2
9563     case TARGET_NR_mmap2:
9564 #ifndef MMAP_SHIFT
9565 #define MMAP_SHIFT 12
9566 #endif
9567         ret = target_mmap(arg1, arg2, arg3,
9568                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9569                           arg5, arg6 << MMAP_SHIFT);
9570         return get_errno(ret);
9571 #endif
9572     case TARGET_NR_munmap:
9573         arg1 = cpu_untagged_addr(cpu, arg1);
9574         return get_errno(target_munmap(arg1, arg2));
9575     case TARGET_NR_mprotect:
9576         arg1 = cpu_untagged_addr(cpu, arg1);
9577         {
9578             TaskState *ts = cpu->opaque;
9579             /* Special hack to detect libc making the stack executable.  */
9580             if ((arg3 & PROT_GROWSDOWN)
9581                 && arg1 >= ts->info->stack_limit
9582                 && arg1 <= ts->info->start_stack) {
9583                 arg3 &= ~PROT_GROWSDOWN;
9584                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9585                 arg1 = ts->info->stack_limit;
9586             }
9587         }
9588         return get_errno(target_mprotect(arg1, arg2, arg3));
9589 #ifdef TARGET_NR_mremap
9590     case TARGET_NR_mremap:
9591         arg1 = cpu_untagged_addr(cpu, arg1);
9592         /* mremap new_addr (arg5) is always untagged */
9593         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9594 #endif
9595         /* ??? msync/mlock/munlock are broken for softmmu.  */
9596 #ifdef TARGET_NR_msync
9597     case TARGET_NR_msync:
9598         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9599 #endif
9600 #ifdef TARGET_NR_mlock
9601     case TARGET_NR_mlock:
9602         return get_errno(mlock(g2h(cpu, arg1), arg2));
9603 #endif
9604 #ifdef TARGET_NR_munlock
9605     case TARGET_NR_munlock:
9606         return get_errno(munlock(g2h(cpu, arg1), arg2));
9607 #endif
9608 #ifdef TARGET_NR_mlockall
9609     case TARGET_NR_mlockall:
9610         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9611 #endif
9612 #ifdef TARGET_NR_munlockall
9613     case TARGET_NR_munlockall:
9614         return get_errno(munlockall());
9615 #endif
9616 #ifdef TARGET_NR_truncate
9617     case TARGET_NR_truncate:
9618         if (!(p = lock_user_string(arg1)))
9619             return -TARGET_EFAULT;
9620         ret = get_errno(truncate(p, arg2));
9621         unlock_user(p, arg1, 0);
9622         return ret;
9623 #endif
9624 #ifdef TARGET_NR_ftruncate
9625     case TARGET_NR_ftruncate:
9626         return get_errno(ftruncate(arg1, arg2));
9627 #endif
9628     case TARGET_NR_fchmod:
9629         return get_errno(fchmod(arg1, arg2));
9630 #if defined(TARGET_NR_fchmodat)
9631     case TARGET_NR_fchmodat:
9632         if (!(p = lock_user_string(arg2)))
9633             return -TARGET_EFAULT;
9634         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9635         unlock_user(p, arg2, 0);
9636         return ret;
9637 #endif
9638     case TARGET_NR_getpriority:
9639         /* Note that negative values are valid for getpriority, so we must
9640            differentiate based on errno settings.  */
9641         errno = 0;
9642         ret = getpriority(arg1, arg2);
9643         if (ret == -1 && errno != 0) {
9644             return -host_to_target_errno(errno);
9645         }
9646 #ifdef TARGET_ALPHA
9647         /* Return value is the unbiased priority.  Signal no error.  */
9648         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9649 #else
9650         /* Return value is a biased priority to avoid negative numbers.  */
9651         ret = 20 - ret;
9652 #endif
9653         return ret;
9654     case TARGET_NR_setpriority:
9655         return get_errno(setpriority(arg1, arg2, arg3));
9656 #ifdef TARGET_NR_statfs
9657     case TARGET_NR_statfs:
9658         if (!(p = lock_user_string(arg1))) {
9659             return -TARGET_EFAULT;
9660         }
9661         ret = get_errno(statfs(path(p), &stfs));
9662         unlock_user(p, arg1, 0);
9663     convert_statfs:
9664         if (!is_error(ret)) {
9665             struct target_statfs *target_stfs;
9666 
9667             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9668                 return -TARGET_EFAULT;
9669             __put_user(stfs.f_type, &target_stfs->f_type);
9670             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9671             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9672             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9673             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9674             __put_user(stfs.f_files, &target_stfs->f_files);
9675             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9676             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9677             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9678             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9679             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9680 #ifdef _STATFS_F_FLAGS
9681             __put_user(stfs.f_flags, &target_stfs->f_flags);
9682 #else
9683             __put_user(0, &target_stfs->f_flags);
9684 #endif
9685             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9686             unlock_user_struct(target_stfs, arg2, 1);
9687         }
9688         return ret;
9689 #endif
9690 #ifdef TARGET_NR_fstatfs
9691     case TARGET_NR_fstatfs:
9692         ret = get_errno(fstatfs(arg1, &stfs));
9693         goto convert_statfs;
9694 #endif
9695 #ifdef TARGET_NR_statfs64
9696     case TARGET_NR_statfs64:
9697         if (!(p = lock_user_string(arg1))) {
9698             return -TARGET_EFAULT;
9699         }
9700         ret = get_errno(statfs(path(p), &stfs));
9701         unlock_user(p, arg1, 0);
9702     convert_statfs64:
9703         if (!is_error(ret)) {
9704             struct target_statfs64 *target_stfs;
9705 
9706             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9707                 return -TARGET_EFAULT;
9708             __put_user(stfs.f_type, &target_stfs->f_type);
9709             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9710             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9711             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9712             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9713             __put_user(stfs.f_files, &target_stfs->f_files);
9714             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9715             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9716             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9717             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9718             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9719 #ifdef _STATFS_F_FLAGS
9720             __put_user(stfs.f_flags, &target_stfs->f_flags);
9721 #else
9722             __put_user(0, &target_stfs->f_flags);
9723 #endif
9724             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9725             unlock_user_struct(target_stfs, arg3, 1);
9726         }
9727         return ret;
9728     case TARGET_NR_fstatfs64:
9729         ret = get_errno(fstatfs(arg1, &stfs));
9730         goto convert_statfs64;
9731 #endif
9732 #ifdef TARGET_NR_socketcall
9733     case TARGET_NR_socketcall:
9734         return do_socketcall(arg1, arg2);
9735 #endif
9736 #ifdef TARGET_NR_accept
9737     case TARGET_NR_accept:
9738         return do_accept4(arg1, arg2, arg3, 0);
9739 #endif
9740 #ifdef TARGET_NR_accept4
9741     case TARGET_NR_accept4:
9742         return do_accept4(arg1, arg2, arg3, arg4);
9743 #endif
9744 #ifdef TARGET_NR_bind
9745     case TARGET_NR_bind:
9746         return do_bind(arg1, arg2, arg3);
9747 #endif
9748 #ifdef TARGET_NR_connect
9749     case TARGET_NR_connect:
9750         return do_connect(arg1, arg2, arg3);
9751 #endif
9752 #ifdef TARGET_NR_getpeername
9753     case TARGET_NR_getpeername:
9754         return do_getpeername(arg1, arg2, arg3);
9755 #endif
9756 #ifdef TARGET_NR_getsockname
9757     case TARGET_NR_getsockname:
9758         return do_getsockname(arg1, arg2, arg3);
9759 #endif
9760 #ifdef TARGET_NR_getsockopt
9761     case TARGET_NR_getsockopt:
9762         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9763 #endif
9764 #ifdef TARGET_NR_listen
9765     case TARGET_NR_listen:
9766         return get_errno(listen(arg1, arg2));
9767 #endif
9768 #ifdef TARGET_NR_recv
9769     case TARGET_NR_recv:
9770         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9771 #endif
9772 #ifdef TARGET_NR_recvfrom
9773     case TARGET_NR_recvfrom:
9774         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9775 #endif
9776 #ifdef TARGET_NR_recvmsg
9777     case TARGET_NR_recvmsg:
9778         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9779 #endif
9780 #ifdef TARGET_NR_send
9781     case TARGET_NR_send:
9782         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9783 #endif
9784 #ifdef TARGET_NR_sendmsg
9785     case TARGET_NR_sendmsg:
9786         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9787 #endif
9788 #ifdef TARGET_NR_sendmmsg
9789     case TARGET_NR_sendmmsg:
9790         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9791 #endif
9792 #ifdef TARGET_NR_recvmmsg
9793     case TARGET_NR_recvmmsg:
9794         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9795 #endif
9796 #ifdef TARGET_NR_sendto
9797     case TARGET_NR_sendto:
9798         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9799 #endif
9800 #ifdef TARGET_NR_shutdown
9801     case TARGET_NR_shutdown:
9802         return get_errno(shutdown(arg1, arg2));
9803 #endif
9804 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9805     case TARGET_NR_getrandom:
9806         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9807         if (!p) {
9808             return -TARGET_EFAULT;
9809         }
9810         ret = get_errno(getrandom(p, arg2, arg3));
9811         unlock_user(p, arg1, ret);
9812         return ret;
9813 #endif
9814 #ifdef TARGET_NR_socket
9815     case TARGET_NR_socket:
9816         return do_socket(arg1, arg2, arg3);
9817 #endif
9818 #ifdef TARGET_NR_socketpair
9819     case TARGET_NR_socketpair:
9820         return do_socketpair(arg1, arg2, arg3, arg4);
9821 #endif
9822 #ifdef TARGET_NR_setsockopt
9823     case TARGET_NR_setsockopt:
9824         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9825 #endif
9826 #if defined(TARGET_NR_syslog)
9827     case TARGET_NR_syslog:
9828         {
9829             int len = arg2;
9830 
9831             switch (arg1) {
9832             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9833             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9834             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9835             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9836             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9837             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9838             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9839             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9840                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9841             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9842             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9843             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9844                 {
9845                     if (len < 0) {
9846                         return -TARGET_EINVAL;
9847                     }
9848                     if (len == 0) {
9849                         return 0;
9850                     }
9851                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9852                     if (!p) {
9853                         return -TARGET_EFAULT;
9854                     }
9855                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9856                     unlock_user(p, arg2, arg3);
9857                 }
9858                 return ret;
9859             default:
9860                 return -TARGET_EINVAL;
9861             }
9862         }
9863         break;
9864 #endif
9865     case TARGET_NR_setitimer:
9866         {
9867             struct itimerval value, ovalue, *pvalue;
9868 
9869             if (arg2) {
9870                 pvalue = &value;
9871                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9872                     || copy_from_user_timeval(&pvalue->it_value,
9873                                               arg2 + sizeof(struct target_timeval)))
9874                     return -TARGET_EFAULT;
9875             } else {
9876                 pvalue = NULL;
9877             }
9878             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9879             if (!is_error(ret) && arg3) {
9880                 if (copy_to_user_timeval(arg3,
9881                                          &ovalue.it_interval)
9882                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9883                                             &ovalue.it_value))
9884                     return -TARGET_EFAULT;
9885             }
9886         }
9887         return ret;
9888     case TARGET_NR_getitimer:
9889         {
9890             struct itimerval value;
9891 
9892             ret = get_errno(getitimer(arg1, &value));
9893             if (!is_error(ret) && arg2) {
9894                 if (copy_to_user_timeval(arg2,
9895                                          &value.it_interval)
9896                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9897                                             &value.it_value))
9898                     return -TARGET_EFAULT;
9899             }
9900         }
9901         return ret;
9902 #ifdef TARGET_NR_stat
9903     case TARGET_NR_stat:
9904         if (!(p = lock_user_string(arg1))) {
9905             return -TARGET_EFAULT;
9906         }
9907         ret = get_errno(stat(path(p), &st));
9908         unlock_user(p, arg1, 0);
9909         goto do_stat;
9910 #endif
9911 #ifdef TARGET_NR_lstat
9912     case TARGET_NR_lstat:
9913         if (!(p = lock_user_string(arg1))) {
9914             return -TARGET_EFAULT;
9915         }
9916         ret = get_errno(lstat(path(p), &st));
9917         unlock_user(p, arg1, 0);
9918         goto do_stat;
9919 #endif
9920 #ifdef TARGET_NR_fstat
9921     case TARGET_NR_fstat:
9922         {
9923             ret = get_errno(fstat(arg1, &st));
9924 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9925         do_stat:
9926 #endif
9927             if (!is_error(ret)) {
9928                 struct target_stat *target_st;
9929 
9930                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9931                     return -TARGET_EFAULT;
9932                 memset(target_st, 0, sizeof(*target_st));
9933                 __put_user(st.st_dev, &target_st->st_dev);
9934                 __put_user(st.st_ino, &target_st->st_ino);
9935                 __put_user(st.st_mode, &target_st->st_mode);
9936                 __put_user(st.st_uid, &target_st->st_uid);
9937                 __put_user(st.st_gid, &target_st->st_gid);
9938                 __put_user(st.st_nlink, &target_st->st_nlink);
9939                 __put_user(st.st_rdev, &target_st->st_rdev);
9940                 __put_user(st.st_size, &target_st->st_size);
9941                 __put_user(st.st_blksize, &target_st->st_blksize);
9942                 __put_user(st.st_blocks, &target_st->st_blocks);
9943                 __put_user(st.st_atime, &target_st->target_st_atime);
9944                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9945                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9946 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9947                 __put_user(st.st_atim.tv_nsec,
9948                            &target_st->target_st_atime_nsec);
9949                 __put_user(st.st_mtim.tv_nsec,
9950                            &target_st->target_st_mtime_nsec);
9951                 __put_user(st.st_ctim.tv_nsec,
9952                            &target_st->target_st_ctime_nsec);
9953 #endif
9954                 unlock_user_struct(target_st, arg2, 1);
9955             }
9956         }
9957         return ret;
9958 #endif
9959     case TARGET_NR_vhangup:
9960         return get_errno(vhangup());
9961 #ifdef TARGET_NR_syscall
9962     case TARGET_NR_syscall:
9963         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9964                           arg6, arg7, arg8, 0);
9965 #endif
9966 #if defined(TARGET_NR_wait4)
9967     case TARGET_NR_wait4:
9968         {
9969             int status;
9970             abi_long status_ptr = arg2;
9971             struct rusage rusage, *rusage_ptr;
9972             abi_ulong target_rusage = arg4;
9973             abi_long rusage_err;
9974             if (target_rusage)
9975                 rusage_ptr = &rusage;
9976             else
9977                 rusage_ptr = NULL;
9978             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9979             if (!is_error(ret)) {
9980                 if (status_ptr && ret) {
9981                     status = host_to_target_waitstatus(status);
9982                     if (put_user_s32(status, status_ptr))
9983                         return -TARGET_EFAULT;
9984                 }
9985                 if (target_rusage) {
9986                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9987                     if (rusage_err) {
9988                         ret = rusage_err;
9989                     }
9990                 }
9991             }
9992         }
9993         return ret;
9994 #endif
9995 #ifdef TARGET_NR_swapoff
9996     case TARGET_NR_swapoff:
9997         if (!(p = lock_user_string(arg1)))
9998             return -TARGET_EFAULT;
9999         ret = get_errno(swapoff(p));
10000         unlock_user(p, arg1, 0);
10001         return ret;
10002 #endif
10003     case TARGET_NR_sysinfo:
10004         {
10005             struct target_sysinfo *target_value;
10006             struct sysinfo value;
10007             ret = get_errno(sysinfo(&value));
10008             if (!is_error(ret) && arg1)
10009             {
10010                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10011                     return -TARGET_EFAULT;
10012                 __put_user(value.uptime, &target_value->uptime);
10013                 __put_user(value.loads[0], &target_value->loads[0]);
10014                 __put_user(value.loads[1], &target_value->loads[1]);
10015                 __put_user(value.loads[2], &target_value->loads[2]);
10016                 __put_user(value.totalram, &target_value->totalram);
10017                 __put_user(value.freeram, &target_value->freeram);
10018                 __put_user(value.sharedram, &target_value->sharedram);
10019                 __put_user(value.bufferram, &target_value->bufferram);
10020                 __put_user(value.totalswap, &target_value->totalswap);
10021                 __put_user(value.freeswap, &target_value->freeswap);
10022                 __put_user(value.procs, &target_value->procs);
10023                 __put_user(value.totalhigh, &target_value->totalhigh);
10024                 __put_user(value.freehigh, &target_value->freehigh);
10025                 __put_user(value.mem_unit, &target_value->mem_unit);
10026                 unlock_user_struct(target_value, arg1, 1);
10027             }
10028         }
10029         return ret;
10030 #ifdef TARGET_NR_ipc
10031     case TARGET_NR_ipc:
10032         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10033 #endif
10034 #ifdef TARGET_NR_semget
10035     case TARGET_NR_semget:
10036         return get_errno(semget(arg1, arg2, arg3));
10037 #endif
10038 #ifdef TARGET_NR_semop
10039     case TARGET_NR_semop:
10040         return do_semtimedop(arg1, arg2, arg3, 0, false);
10041 #endif
10042 #ifdef TARGET_NR_semtimedop
10043     case TARGET_NR_semtimedop:
10044         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10045 #endif
10046 #ifdef TARGET_NR_semtimedop_time64
10047     case TARGET_NR_semtimedop_time64:
10048         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10049 #endif
10050 #ifdef TARGET_NR_semctl
10051     case TARGET_NR_semctl:
10052         return do_semctl(arg1, arg2, arg3, arg4);
10053 #endif
10054 #ifdef TARGET_NR_msgctl
10055     case TARGET_NR_msgctl:
10056         return do_msgctl(arg1, arg2, arg3);
10057 #endif
10058 #ifdef TARGET_NR_msgget
10059     case TARGET_NR_msgget:
10060         return get_errno(msgget(arg1, arg2));
10061 #endif
10062 #ifdef TARGET_NR_msgrcv
10063     case TARGET_NR_msgrcv:
10064         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10065 #endif
10066 #ifdef TARGET_NR_msgsnd
10067     case TARGET_NR_msgsnd:
10068         return do_msgsnd(arg1, arg2, arg3, arg4);
10069 #endif
10070 #ifdef TARGET_NR_shmget
10071     case TARGET_NR_shmget:
10072         return get_errno(shmget(arg1, arg2, arg3));
10073 #endif
10074 #ifdef TARGET_NR_shmctl
10075     case TARGET_NR_shmctl:
10076         return do_shmctl(arg1, arg2, arg3);
10077 #endif
10078 #ifdef TARGET_NR_shmat
10079     case TARGET_NR_shmat:
10080         return do_shmat(cpu_env, arg1, arg2, arg3);
10081 #endif
10082 #ifdef TARGET_NR_shmdt
10083     case TARGET_NR_shmdt:
10084         return do_shmdt(arg1);
10085 #endif
10086     case TARGET_NR_fsync:
10087         return get_errno(fsync(arg1));
10088     case TARGET_NR_clone:
10089         /* Linux manages to have three different orderings for its
10090          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10091          * match the kernel's CONFIG_CLONE_* settings.
10092          * Microblaze is further special in that it uses a sixth
10093          * implicit argument to clone for the TLS pointer.
10094          */
10095 #if defined(TARGET_MICROBLAZE)
10096         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10097 #elif defined(TARGET_CLONE_BACKWARDS)
10098         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10099 #elif defined(TARGET_CLONE_BACKWARDS2)
10100         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10101 #else
10102         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10103 #endif
10104         return ret;
10105 #ifdef __NR_exit_group
10106         /* new thread calls */
10107     case TARGET_NR_exit_group:
10108         preexit_cleanup(cpu_env, arg1);
10109         return get_errno(exit_group(arg1));
10110 #endif
10111     case TARGET_NR_setdomainname:
10112         if (!(p = lock_user_string(arg1)))
10113             return -TARGET_EFAULT;
10114         ret = get_errno(setdomainname(p, arg2));
10115         unlock_user(p, arg1, 0);
10116         return ret;
10117     case TARGET_NR_uname:
10118         /* no need to transcode because we use the linux syscall */
10119         {
10120             struct new_utsname * buf;
10121 
10122             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10123                 return -TARGET_EFAULT;
10124             ret = get_errno(sys_uname(buf));
10125             if (!is_error(ret)) {
10126                 /* Overwrite the native machine name with whatever is being
10127                    emulated. */
10128                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10129                           sizeof(buf->machine));
10130                 /* Allow the user to override the reported release.  */
10131                 if (qemu_uname_release && *qemu_uname_release) {
10132                     g_strlcpy(buf->release, qemu_uname_release,
10133                               sizeof(buf->release));
10134                 }
10135             }
10136             unlock_user_struct(buf, arg1, 1);
10137         }
10138         return ret;
10139 #ifdef TARGET_I386
10140     case TARGET_NR_modify_ldt:
10141         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10142 #if !defined(TARGET_X86_64)
10143     case TARGET_NR_vm86:
10144         return do_vm86(cpu_env, arg1, arg2);
10145 #endif
10146 #endif
10147 #if defined(TARGET_NR_adjtimex)
10148     case TARGET_NR_adjtimex:
10149         {
10150             struct timex host_buf;
10151 
10152             if (target_to_host_timex(&host_buf, arg1) != 0) {
10153                 return -TARGET_EFAULT;
10154             }
10155             ret = get_errno(adjtimex(&host_buf));
10156             if (!is_error(ret)) {
10157                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10158                     return -TARGET_EFAULT;
10159                 }
10160             }
10161         }
10162         return ret;
10163 #endif
10164 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10165     case TARGET_NR_clock_adjtime:
10166         {
10167             struct timex htx, *phtx = &htx;
10168 
10169             if (target_to_host_timex(phtx, arg2) != 0) {
10170                 return -TARGET_EFAULT;
10171             }
10172             ret = get_errno(clock_adjtime(arg1, phtx));
10173             if (!is_error(ret) && phtx) {
10174                 if (host_to_target_timex(arg2, phtx) != 0) {
10175                     return -TARGET_EFAULT;
10176                 }
10177             }
10178         }
10179         return ret;
10180 #endif
10181 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10182     case TARGET_NR_clock_adjtime64:
10183         {
10184             struct timex htx;
10185 
10186             if (target_to_host_timex64(&htx, arg2) != 0) {
10187                 return -TARGET_EFAULT;
10188             }
10189             ret = get_errno(clock_adjtime(arg1, &htx));
10190             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10191                     return -TARGET_EFAULT;
10192             }
10193         }
10194         return ret;
10195 #endif
10196     case TARGET_NR_getpgid:
10197         return get_errno(getpgid(arg1));
10198     case TARGET_NR_fchdir:
10199         return get_errno(fchdir(arg1));
10200     case TARGET_NR_personality:
10201         return get_errno(personality(arg1));
10202 #ifdef TARGET_NR__llseek /* Not on alpha */
10203     case TARGET_NR__llseek:
10204         {
10205             int64_t res;
10206 #if !defined(__NR_llseek)
10207             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10208             if (res == -1) {
10209                 ret = get_errno(res);
10210             } else {
10211                 ret = 0;
10212             }
10213 #else
10214             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10215 #endif
10216             if ((ret == 0) && put_user_s64(res, arg4)) {
10217                 return -TARGET_EFAULT;
10218             }
10219         }
10220         return ret;
10221 #endif
10222 #ifdef TARGET_NR_getdents
10223     case TARGET_NR_getdents:
10224 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10225 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10226         {
10227             struct target_dirent *target_dirp;
10228             struct linux_dirent *dirp;
10229             abi_long count = arg3;
10230 
10231             dirp = g_try_malloc(count);
10232             if (!dirp) {
10233                 return -TARGET_ENOMEM;
10234             }
10235 
10236             ret = get_errno(sys_getdents(arg1, dirp, count));
10237             if (!is_error(ret)) {
10238                 struct linux_dirent *de;
10239 		struct target_dirent *tde;
10240                 int len = ret;
10241                 int reclen, treclen;
10242 		int count1, tnamelen;
10243 
10244 		count1 = 0;
10245                 de = dirp;
10246                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10247                     return -TARGET_EFAULT;
10248 		tde = target_dirp;
10249                 while (len > 0) {
10250                     reclen = de->d_reclen;
10251                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10252                     assert(tnamelen >= 0);
10253                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10254                     assert(count1 + treclen <= count);
10255                     tde->d_reclen = tswap16(treclen);
10256                     tde->d_ino = tswapal(de->d_ino);
10257                     tde->d_off = tswapal(de->d_off);
10258                     memcpy(tde->d_name, de->d_name, tnamelen);
10259                     de = (struct linux_dirent *)((char *)de + reclen);
10260                     len -= reclen;
10261                     tde = (struct target_dirent *)((char *)tde + treclen);
10262 		    count1 += treclen;
10263                 }
10264 		ret = count1;
10265                 unlock_user(target_dirp, arg2, ret);
10266             }
10267             g_free(dirp);
10268         }
10269 #else
10270         {
10271             struct linux_dirent *dirp;
10272             abi_long count = arg3;
10273 
10274             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10275                 return -TARGET_EFAULT;
10276             ret = get_errno(sys_getdents(arg1, dirp, count));
10277             if (!is_error(ret)) {
10278                 struct linux_dirent *de;
10279                 int len = ret;
10280                 int reclen;
10281                 de = dirp;
10282                 while (len > 0) {
10283                     reclen = de->d_reclen;
10284                     if (reclen > len)
10285                         break;
10286                     de->d_reclen = tswap16(reclen);
10287                     tswapls(&de->d_ino);
10288                     tswapls(&de->d_off);
10289                     de = (struct linux_dirent *)((char *)de + reclen);
10290                     len -= reclen;
10291                 }
10292             }
10293             unlock_user(dirp, arg2, ret);
10294         }
10295 #endif
10296 #else
10297         /* Implement getdents in terms of getdents64 */
10298         {
10299             struct linux_dirent64 *dirp;
10300             abi_long count = arg3;
10301 
10302             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10303             if (!dirp) {
10304                 return -TARGET_EFAULT;
10305             }
10306             ret = get_errno(sys_getdents64(arg1, dirp, count));
10307             if (!is_error(ret)) {
10308                 /* Convert the dirent64 structs to target dirent.  We do this
10309                  * in-place, since we can guarantee that a target_dirent is no
10310                  * larger than a dirent64; however this means we have to be
10311                  * careful to read everything before writing in the new format.
10312                  */
10313                 struct linux_dirent64 *de;
10314                 struct target_dirent *tde;
10315                 int len = ret;
10316                 int tlen = 0;
10317 
10318                 de = dirp;
10319                 tde = (struct target_dirent *)dirp;
10320                 while (len > 0) {
10321                     int namelen, treclen;
10322                     int reclen = de->d_reclen;
10323                     uint64_t ino = de->d_ino;
10324                     int64_t off = de->d_off;
10325                     uint8_t type = de->d_type;
10326 
10327                     namelen = strlen(de->d_name);
10328                     treclen = offsetof(struct target_dirent, d_name)
10329                         + namelen + 2;
10330                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10331 
10332                     memmove(tde->d_name, de->d_name, namelen + 1);
10333                     tde->d_ino = tswapal(ino);
10334                     tde->d_off = tswapal(off);
10335                     tde->d_reclen = tswap16(treclen);
10336                     /* The target_dirent type is in what was formerly a padding
10337                      * byte at the end of the structure:
10338                      */
10339                     *(((char *)tde) + treclen - 1) = type;
10340 
10341                     de = (struct linux_dirent64 *)((char *)de + reclen);
10342                     tde = (struct target_dirent *)((char *)tde + treclen);
10343                     len -= reclen;
10344                     tlen += treclen;
10345                 }
10346                 ret = tlen;
10347             }
10348             unlock_user(dirp, arg2, ret);
10349         }
10350 #endif
10351         return ret;
10352 #endif /* TARGET_NR_getdents */
10353 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10354     case TARGET_NR_getdents64:
10355         {
10356             struct linux_dirent64 *dirp;
10357             abi_long count = arg3;
10358             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10359                 return -TARGET_EFAULT;
10360             ret = get_errno(sys_getdents64(arg1, dirp, count));
10361             if (!is_error(ret)) {
10362                 struct linux_dirent64 *de;
10363                 int len = ret;
10364                 int reclen;
10365                 de = dirp;
10366                 while (len > 0) {
10367                     reclen = de->d_reclen;
10368                     if (reclen > len)
10369                         break;
10370                     de->d_reclen = tswap16(reclen);
10371                     tswap64s((uint64_t *)&de->d_ino);
10372                     tswap64s((uint64_t *)&de->d_off);
10373                     de = (struct linux_dirent64 *)((char *)de + reclen);
10374                     len -= reclen;
10375                 }
10376             }
10377             unlock_user(dirp, arg2, ret);
10378         }
10379         return ret;
10380 #endif /* TARGET_NR_getdents64 */
10381 #if defined(TARGET_NR__newselect)
10382     case TARGET_NR__newselect:
10383         return do_select(arg1, arg2, arg3, arg4, arg5);
10384 #endif
10385 #ifdef TARGET_NR_poll
10386     case TARGET_NR_poll:
10387         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10388 #endif
10389 #ifdef TARGET_NR_ppoll
10390     case TARGET_NR_ppoll:
10391         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10392 #endif
10393 #ifdef TARGET_NR_ppoll_time64
10394     case TARGET_NR_ppoll_time64:
10395         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10396 #endif
10397     case TARGET_NR_flock:
10398         /* NOTE: the flock constant seems to be the same for every
10399            Linux platform */
10400         return get_errno(safe_flock(arg1, arg2));
10401     case TARGET_NR_readv:
10402         {
10403             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10404             if (vec != NULL) {
10405                 ret = get_errno(safe_readv(arg1, vec, arg3));
10406                 unlock_iovec(vec, arg2, arg3, 1);
10407             } else {
10408                 ret = -host_to_target_errno(errno);
10409             }
10410         }
10411         return ret;
10412     case TARGET_NR_writev:
10413         {
10414             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10415             if (vec != NULL) {
10416                 ret = get_errno(safe_writev(arg1, vec, arg3));
10417                 unlock_iovec(vec, arg2, arg3, 0);
10418             } else {
10419                 ret = -host_to_target_errno(errno);
10420             }
10421         }
10422         return ret;
10423 #if defined(TARGET_NR_preadv)
10424     case TARGET_NR_preadv:
10425         {
10426             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10427             if (vec != NULL) {
10428                 unsigned long low, high;
10429 
10430                 target_to_host_low_high(arg4, arg5, &low, &high);
10431                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10432                 unlock_iovec(vec, arg2, arg3, 1);
10433             } else {
10434                 ret = -host_to_target_errno(errno);
10435            }
10436         }
10437         return ret;
10438 #endif
10439 #if defined(TARGET_NR_pwritev)
10440     case TARGET_NR_pwritev:
10441         {
10442             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10443             if (vec != NULL) {
10444                 unsigned long low, high;
10445 
10446                 target_to_host_low_high(arg4, arg5, &low, &high);
10447                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10448                 unlock_iovec(vec, arg2, arg3, 0);
10449             } else {
10450                 ret = -host_to_target_errno(errno);
10451            }
10452         }
10453         return ret;
10454 #endif
10455     case TARGET_NR_getsid:
10456         return get_errno(getsid(arg1));
10457 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10458     case TARGET_NR_fdatasync:
10459         return get_errno(fdatasync(arg1));
10460 #endif
10461     case TARGET_NR_sched_getaffinity:
10462         {
10463             unsigned int mask_size;
10464             unsigned long *mask;
10465 
10466             /*
10467              * sched_getaffinity needs multiples of ulong, so need to take
10468              * care of mismatches between target ulong and host ulong sizes.
10469              */
10470             if (arg2 & (sizeof(abi_ulong) - 1)) {
10471                 return -TARGET_EINVAL;
10472             }
10473             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10474 
10475             mask = alloca(mask_size);
10476             memset(mask, 0, mask_size);
10477             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10478 
10479             if (!is_error(ret)) {
10480                 if (ret > arg2) {
10481                     /* More data returned than the caller's buffer will fit.
10482                      * This only happens if sizeof(abi_long) < sizeof(long)
10483                      * and the caller passed us a buffer holding an odd number
10484                      * of abi_longs. If the host kernel is actually using the
10485                      * extra 4 bytes then fail EINVAL; otherwise we can just
10486                      * ignore them and only copy the interesting part.
10487                      */
10488                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10489                     if (numcpus > arg2 * 8) {
10490                         return -TARGET_EINVAL;
10491                     }
10492                     ret = arg2;
10493                 }
10494 
10495                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10496                     return -TARGET_EFAULT;
10497                 }
10498             }
10499         }
10500         return ret;
10501     case TARGET_NR_sched_setaffinity:
10502         {
10503             unsigned int mask_size;
10504             unsigned long *mask;
10505 
10506             /*
10507              * sched_setaffinity needs multiples of ulong, so need to take
10508              * care of mismatches between target ulong and host ulong sizes.
10509              */
10510             if (arg2 & (sizeof(abi_ulong) - 1)) {
10511                 return -TARGET_EINVAL;
10512             }
10513             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10514             mask = alloca(mask_size);
10515 
10516             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10517             if (ret) {
10518                 return ret;
10519             }
10520 
10521             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10522         }
10523     case TARGET_NR_getcpu:
10524         {
10525             unsigned cpu, node;
10526             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10527                                        arg2 ? &node : NULL,
10528                                        NULL));
10529             if (is_error(ret)) {
10530                 return ret;
10531             }
10532             if (arg1 && put_user_u32(cpu, arg1)) {
10533                 return -TARGET_EFAULT;
10534             }
10535             if (arg2 && put_user_u32(node, arg2)) {
10536                 return -TARGET_EFAULT;
10537             }
10538         }
10539         return ret;
10540     case TARGET_NR_sched_setparam:
10541         {
10542             struct sched_param *target_schp;
10543             struct sched_param schp;
10544 
10545             if (arg2 == 0) {
10546                 return -TARGET_EINVAL;
10547             }
10548             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10549                 return -TARGET_EFAULT;
10550             schp.sched_priority = tswap32(target_schp->sched_priority);
10551             unlock_user_struct(target_schp, arg2, 0);
10552             return get_errno(sched_setparam(arg1, &schp));
10553         }
10554     case TARGET_NR_sched_getparam:
10555         {
10556             struct sched_param *target_schp;
10557             struct sched_param schp;
10558 
10559             if (arg2 == 0) {
10560                 return -TARGET_EINVAL;
10561             }
10562             ret = get_errno(sched_getparam(arg1, &schp));
10563             if (!is_error(ret)) {
10564                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10565                     return -TARGET_EFAULT;
10566                 target_schp->sched_priority = tswap32(schp.sched_priority);
10567                 unlock_user_struct(target_schp, arg2, 1);
10568             }
10569         }
10570         return ret;
10571     case TARGET_NR_sched_setscheduler:
10572         {
10573             struct sched_param *target_schp;
10574             struct sched_param schp;
10575             if (arg3 == 0) {
10576                 return -TARGET_EINVAL;
10577             }
10578             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10579                 return -TARGET_EFAULT;
10580             schp.sched_priority = tswap32(target_schp->sched_priority);
10581             unlock_user_struct(target_schp, arg3, 0);
10582             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10583         }
10584     case TARGET_NR_sched_getscheduler:
10585         return get_errno(sched_getscheduler(arg1));
10586     case TARGET_NR_sched_yield:
10587         return get_errno(sched_yield());
10588     case TARGET_NR_sched_get_priority_max:
10589         return get_errno(sched_get_priority_max(arg1));
10590     case TARGET_NR_sched_get_priority_min:
10591         return get_errno(sched_get_priority_min(arg1));
10592 #ifdef TARGET_NR_sched_rr_get_interval
10593     case TARGET_NR_sched_rr_get_interval:
10594         {
10595             struct timespec ts;
10596             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10597             if (!is_error(ret)) {
10598                 ret = host_to_target_timespec(arg2, &ts);
10599             }
10600         }
10601         return ret;
10602 #endif
10603 #ifdef TARGET_NR_sched_rr_get_interval_time64
10604     case TARGET_NR_sched_rr_get_interval_time64:
10605         {
10606             struct timespec ts;
10607             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10608             if (!is_error(ret)) {
10609                 ret = host_to_target_timespec64(arg2, &ts);
10610             }
10611         }
10612         return ret;
10613 #endif
10614 #if defined(TARGET_NR_nanosleep)
10615     case TARGET_NR_nanosleep:
10616         {
10617             struct timespec req, rem;
10618             target_to_host_timespec(&req, arg1);
10619             ret = get_errno(safe_nanosleep(&req, &rem));
10620             if (is_error(ret) && arg2) {
10621                 host_to_target_timespec(arg2, &rem);
10622             }
10623         }
10624         return ret;
10625 #endif
10626     case TARGET_NR_prctl:
10627         switch (arg1) {
10628         case PR_GET_PDEATHSIG:
10629         {
10630             int deathsig;
10631             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10632             if (!is_error(ret) && arg2
10633                 && put_user_s32(deathsig, arg2)) {
10634                 return -TARGET_EFAULT;
10635             }
10636             return ret;
10637         }
10638 #ifdef PR_GET_NAME
10639         case PR_GET_NAME:
10640         {
10641             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10642             if (!name) {
10643                 return -TARGET_EFAULT;
10644             }
10645             ret = get_errno(prctl(arg1, (unsigned long)name,
10646                                   arg3, arg4, arg5));
10647             unlock_user(name, arg2, 16);
10648             return ret;
10649         }
10650         case PR_SET_NAME:
10651         {
10652             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10653             if (!name) {
10654                 return -TARGET_EFAULT;
10655             }
10656             ret = get_errno(prctl(arg1, (unsigned long)name,
10657                                   arg3, arg4, arg5));
10658             unlock_user(name, arg2, 0);
10659             return ret;
10660         }
10661 #endif
10662 #ifdef TARGET_MIPS
10663         case TARGET_PR_GET_FP_MODE:
10664         {
10665             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10666             ret = 0;
10667             if (env->CP0_Status & (1 << CP0St_FR)) {
10668                 ret |= TARGET_PR_FP_MODE_FR;
10669             }
10670             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10671                 ret |= TARGET_PR_FP_MODE_FRE;
10672             }
10673             return ret;
10674         }
10675         case TARGET_PR_SET_FP_MODE:
10676         {
10677             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10678             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10679             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10680             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10681             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10682 
10683             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10684                                             TARGET_PR_FP_MODE_FRE;
10685 
10686             /* If nothing to change, return right away, successfully.  */
10687             if (old_fr == new_fr && old_fre == new_fre) {
10688                 return 0;
10689             }
10690             /* Check the value is valid */
10691             if (arg2 & ~known_bits) {
10692                 return -TARGET_EOPNOTSUPP;
10693             }
10694             /* Setting FRE without FR is not supported.  */
10695             if (new_fre && !new_fr) {
10696                 return -TARGET_EOPNOTSUPP;
10697             }
10698             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10699                 /* FR1 is not supported */
10700                 return -TARGET_EOPNOTSUPP;
10701             }
10702             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10703                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10704                 /* cannot set FR=0 */
10705                 return -TARGET_EOPNOTSUPP;
10706             }
10707             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10708                 /* Cannot set FRE=1 */
10709                 return -TARGET_EOPNOTSUPP;
10710             }
10711 
10712             int i;
10713             fpr_t *fpr = env->active_fpu.fpr;
10714             for (i = 0; i < 32 ; i += 2) {
10715                 if (!old_fr && new_fr) {
10716                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10717                 } else if (old_fr && !new_fr) {
10718                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10719                 }
10720             }
10721 
10722             if (new_fr) {
10723                 env->CP0_Status |= (1 << CP0St_FR);
10724                 env->hflags |= MIPS_HFLAG_F64;
10725             } else {
10726                 env->CP0_Status &= ~(1 << CP0St_FR);
10727                 env->hflags &= ~MIPS_HFLAG_F64;
10728             }
10729             if (new_fre) {
10730                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10731                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10732                     env->hflags |= MIPS_HFLAG_FRE;
10733                 }
10734             } else {
10735                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10736                 env->hflags &= ~MIPS_HFLAG_FRE;
10737             }
10738 
10739             return 0;
10740         }
10741 #endif /* MIPS */
10742 #ifdef TARGET_AARCH64
10743         case TARGET_PR_SVE_SET_VL:
10744             /*
10745              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10746              * PR_SVE_VL_INHERIT.  Note the kernel definition
10747              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10748              * even though the current architectural maximum is VQ=16.
10749              */
10750             ret = -TARGET_EINVAL;
10751             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10752                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10753                 CPUARMState *env = cpu_env;
10754                 ARMCPU *cpu = env_archcpu(env);
10755                 uint32_t vq, old_vq;
10756 
10757                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10758                 vq = MAX(arg2 / 16, 1);
10759                 vq = MIN(vq, cpu->sve_max_vq);
10760 
10761                 if (vq < old_vq) {
10762                     aarch64_sve_narrow_vq(env, vq);
10763                 }
10764                 env->vfp.zcr_el[1] = vq - 1;
10765                 arm_rebuild_hflags(env);
10766                 ret = vq * 16;
10767             }
10768             return ret;
10769         case TARGET_PR_SVE_GET_VL:
10770             ret = -TARGET_EINVAL;
10771             {
10772                 ARMCPU *cpu = env_archcpu(cpu_env);
10773                 if (cpu_isar_feature(aa64_sve, cpu)) {
10774                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10775                 }
10776             }
10777             return ret;
10778         case TARGET_PR_PAC_RESET_KEYS:
10779             {
10780                 CPUARMState *env = cpu_env;
10781                 ARMCPU *cpu = env_archcpu(env);
10782 
10783                 if (arg3 || arg4 || arg5) {
10784                     return -TARGET_EINVAL;
10785                 }
10786                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10787                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10788                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10789                                TARGET_PR_PAC_APGAKEY);
10790                     int ret = 0;
10791                     Error *err = NULL;
10792 
10793                     if (arg2 == 0) {
10794                         arg2 = all;
10795                     } else if (arg2 & ~all) {
10796                         return -TARGET_EINVAL;
10797                     }
10798                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10799                         ret |= qemu_guest_getrandom(&env->keys.apia,
10800                                                     sizeof(ARMPACKey), &err);
10801                     }
10802                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10803                         ret |= qemu_guest_getrandom(&env->keys.apib,
10804                                                     sizeof(ARMPACKey), &err);
10805                     }
10806                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10807                         ret |= qemu_guest_getrandom(&env->keys.apda,
10808                                                     sizeof(ARMPACKey), &err);
10809                     }
10810                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10811                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10812                                                     sizeof(ARMPACKey), &err);
10813                     }
10814                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10815                         ret |= qemu_guest_getrandom(&env->keys.apga,
10816                                                     sizeof(ARMPACKey), &err);
10817                     }
10818                     if (ret != 0) {
10819                         /*
10820                          * Some unknown failure in the crypto.  The best
10821                          * we can do is log it and fail the syscall.
10822                          * The real syscall cannot fail this way.
10823                          */
10824                         qemu_log_mask(LOG_UNIMP,
10825                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10826                                       error_get_pretty(err));
10827                         error_free(err);
10828                         return -TARGET_EIO;
10829                     }
10830                     return 0;
10831                 }
10832             }
10833             return -TARGET_EINVAL;
10834         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10835             {
10836                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10837                 CPUARMState *env = cpu_env;
10838                 ARMCPU *cpu = env_archcpu(env);
10839 
10840                 if (cpu_isar_feature(aa64_mte, cpu)) {
10841                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10842                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10843                 }
10844 
10845                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10846                     return -TARGET_EINVAL;
10847                 }
10848                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10849 
10850                 if (cpu_isar_feature(aa64_mte, cpu)) {
10851                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10852                     case TARGET_PR_MTE_TCF_NONE:
10853                     case TARGET_PR_MTE_TCF_SYNC:
10854                     case TARGET_PR_MTE_TCF_ASYNC:
10855                         break;
10856                     default:
10857                         return -EINVAL;
10858                     }
10859 
10860                     /*
10861                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10862                      * Note that the syscall values are consistent with hw.
10863                      */
10864                     env->cp15.sctlr_el[1] =
10865                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10866                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10867 
10868                     /*
10869                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10870                      * Note that the syscall uses an include mask,
10871                      * and hardware uses an exclude mask -- invert.
10872                      */
10873                     env->cp15.gcr_el1 =
10874                         deposit64(env->cp15.gcr_el1, 0, 16,
10875                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10876                     arm_rebuild_hflags(env);
10877                 }
10878                 return 0;
10879             }
10880         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10881             {
10882                 abi_long ret = 0;
10883                 CPUARMState *env = cpu_env;
10884                 ARMCPU *cpu = env_archcpu(env);
10885 
10886                 if (arg2 || arg3 || arg4 || arg5) {
10887                     return -TARGET_EINVAL;
10888                 }
10889                 if (env->tagged_addr_enable) {
10890                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10891                 }
10892                 if (cpu_isar_feature(aa64_mte, cpu)) {
10893                     /* See above. */
10894                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10895                             << TARGET_PR_MTE_TCF_SHIFT);
10896                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10897                                     ~env->cp15.gcr_el1);
10898                 }
10899                 return ret;
10900             }
10901 #endif /* AARCH64 */
10902         case PR_GET_SECCOMP:
10903         case PR_SET_SECCOMP:
10904             /* Disable seccomp to prevent the target disabling syscalls we
10905              * need. */
10906             return -TARGET_EINVAL;
10907         default:
10908             /* Most prctl options have no pointer arguments */
10909             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10910         }
10911         break;
10912 #ifdef TARGET_NR_arch_prctl
10913     case TARGET_NR_arch_prctl:
10914         return do_arch_prctl(cpu_env, arg1, arg2);
10915 #endif
10916 #ifdef TARGET_NR_pread64
10917     case TARGET_NR_pread64:
10918         if (regpairs_aligned(cpu_env, num)) {
10919             arg4 = arg5;
10920             arg5 = arg6;
10921         }
10922         if (arg2 == 0 && arg3 == 0) {
10923             /* Special-case NULL buffer and zero length, which should succeed */
10924             p = 0;
10925         } else {
10926             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10927             if (!p) {
10928                 return -TARGET_EFAULT;
10929             }
10930         }
10931         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10932         unlock_user(p, arg2, ret);
10933         return ret;
10934     case TARGET_NR_pwrite64:
10935         if (regpairs_aligned(cpu_env, num)) {
10936             arg4 = arg5;
10937             arg5 = arg6;
10938         }
10939         if (arg2 == 0 && arg3 == 0) {
10940             /* Special-case NULL buffer and zero length, which should succeed */
10941             p = 0;
10942         } else {
10943             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10944             if (!p) {
10945                 return -TARGET_EFAULT;
10946             }
10947         }
10948         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10949         unlock_user(p, arg2, 0);
10950         return ret;
10951 #endif
10952     case TARGET_NR_getcwd:
10953         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10954             return -TARGET_EFAULT;
10955         ret = get_errno(sys_getcwd1(p, arg2));
10956         unlock_user(p, arg1, ret);
10957         return ret;
10958     case TARGET_NR_capget:
10959     case TARGET_NR_capset:
10960     {
10961         struct target_user_cap_header *target_header;
10962         struct target_user_cap_data *target_data = NULL;
10963         struct __user_cap_header_struct header;
10964         struct __user_cap_data_struct data[2];
10965         struct __user_cap_data_struct *dataptr = NULL;
10966         int i, target_datalen;
10967         int data_items = 1;
10968 
10969         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10970             return -TARGET_EFAULT;
10971         }
10972         header.version = tswap32(target_header->version);
10973         header.pid = tswap32(target_header->pid);
10974 
10975         if (header.version != _LINUX_CAPABILITY_VERSION) {
10976             /* Version 2 and up takes pointer to two user_data structs */
10977             data_items = 2;
10978         }
10979 
10980         target_datalen = sizeof(*target_data) * data_items;
10981 
10982         if (arg2) {
10983             if (num == TARGET_NR_capget) {
10984                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10985             } else {
10986                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10987             }
10988             if (!target_data) {
10989                 unlock_user_struct(target_header, arg1, 0);
10990                 return -TARGET_EFAULT;
10991             }
10992 
10993             if (num == TARGET_NR_capset) {
10994                 for (i = 0; i < data_items; i++) {
10995                     data[i].effective = tswap32(target_data[i].effective);
10996                     data[i].permitted = tswap32(target_data[i].permitted);
10997                     data[i].inheritable = tswap32(target_data[i].inheritable);
10998                 }
10999             }
11000 
11001             dataptr = data;
11002         }
11003 
11004         if (num == TARGET_NR_capget) {
11005             ret = get_errno(capget(&header, dataptr));
11006         } else {
11007             ret = get_errno(capset(&header, dataptr));
11008         }
11009 
11010         /* The kernel always updates version for both capget and capset */
11011         target_header->version = tswap32(header.version);
11012         unlock_user_struct(target_header, arg1, 1);
11013 
11014         if (arg2) {
11015             if (num == TARGET_NR_capget) {
11016                 for (i = 0; i < data_items; i++) {
11017                     target_data[i].effective = tswap32(data[i].effective);
11018                     target_data[i].permitted = tswap32(data[i].permitted);
11019                     target_data[i].inheritable = tswap32(data[i].inheritable);
11020                 }
11021                 unlock_user(target_data, arg2, target_datalen);
11022             } else {
11023                 unlock_user(target_data, arg2, 0);
11024             }
11025         }
11026         return ret;
11027     }
11028     case TARGET_NR_sigaltstack:
11029         return do_sigaltstack(arg1, arg2, cpu_env);
11030 
11031 #ifdef CONFIG_SENDFILE
11032 #ifdef TARGET_NR_sendfile
11033     case TARGET_NR_sendfile:
11034     {
11035         off_t *offp = NULL;
11036         off_t off;
11037         if (arg3) {
11038             ret = get_user_sal(off, arg3);
11039             if (is_error(ret)) {
11040                 return ret;
11041             }
11042             offp = &off;
11043         }
11044         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11045         if (!is_error(ret) && arg3) {
11046             abi_long ret2 = put_user_sal(off, arg3);
11047             if (is_error(ret2)) {
11048                 ret = ret2;
11049             }
11050         }
11051         return ret;
11052     }
11053 #endif
11054 #ifdef TARGET_NR_sendfile64
11055     case TARGET_NR_sendfile64:
11056     {
11057         off_t *offp = NULL;
11058         off_t off;
11059         if (arg3) {
11060             ret = get_user_s64(off, arg3);
11061             if (is_error(ret)) {
11062                 return ret;
11063             }
11064             offp = &off;
11065         }
11066         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11067         if (!is_error(ret) && arg3) {
11068             abi_long ret2 = put_user_s64(off, arg3);
11069             if (is_error(ret2)) {
11070                 ret = ret2;
11071             }
11072         }
11073         return ret;
11074     }
11075 #endif
11076 #endif
11077 #ifdef TARGET_NR_vfork
11078     case TARGET_NR_vfork:
11079         return get_errno(do_fork(cpu_env,
11080                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11081                          0, 0, 0, 0));
11082 #endif
11083 #ifdef TARGET_NR_ugetrlimit
11084     case TARGET_NR_ugetrlimit:
11085     {
11086 	struct rlimit rlim;
11087 	int resource = target_to_host_resource(arg1);
11088 	ret = get_errno(getrlimit(resource, &rlim));
11089 	if (!is_error(ret)) {
11090 	    struct target_rlimit *target_rlim;
11091             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11092                 return -TARGET_EFAULT;
11093 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11094 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11095             unlock_user_struct(target_rlim, arg2, 1);
11096 	}
11097         return ret;
11098     }
11099 #endif
11100 #ifdef TARGET_NR_truncate64
11101     case TARGET_NR_truncate64:
11102         if (!(p = lock_user_string(arg1)))
11103             return -TARGET_EFAULT;
11104 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11105         unlock_user(p, arg1, 0);
11106         return ret;
11107 #endif
11108 #ifdef TARGET_NR_ftruncate64
11109     case TARGET_NR_ftruncate64:
11110         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11111 #endif
11112 #ifdef TARGET_NR_stat64
11113     case TARGET_NR_stat64:
11114         if (!(p = lock_user_string(arg1))) {
11115             return -TARGET_EFAULT;
11116         }
11117         ret = get_errno(stat(path(p), &st));
11118         unlock_user(p, arg1, 0);
11119         if (!is_error(ret))
11120             ret = host_to_target_stat64(cpu_env, arg2, &st);
11121         return ret;
11122 #endif
11123 #ifdef TARGET_NR_lstat64
11124     case TARGET_NR_lstat64:
11125         if (!(p = lock_user_string(arg1))) {
11126             return -TARGET_EFAULT;
11127         }
11128         ret = get_errno(lstat(path(p), &st));
11129         unlock_user(p, arg1, 0);
11130         if (!is_error(ret))
11131             ret = host_to_target_stat64(cpu_env, arg2, &st);
11132         return ret;
11133 #endif
11134 #ifdef TARGET_NR_fstat64
11135     case TARGET_NR_fstat64:
11136         ret = get_errno(fstat(arg1, &st));
11137         if (!is_error(ret))
11138             ret = host_to_target_stat64(cpu_env, arg2, &st);
11139         return ret;
11140 #endif
11141 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11142 #ifdef TARGET_NR_fstatat64
11143     case TARGET_NR_fstatat64:
11144 #endif
11145 #ifdef TARGET_NR_newfstatat
11146     case TARGET_NR_newfstatat:
11147 #endif
11148         if (!(p = lock_user_string(arg2))) {
11149             return -TARGET_EFAULT;
11150         }
11151         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11152         unlock_user(p, arg2, 0);
11153         if (!is_error(ret))
11154             ret = host_to_target_stat64(cpu_env, arg3, &st);
11155         return ret;
11156 #endif
11157 #if defined(TARGET_NR_statx)
11158     case TARGET_NR_statx:
11159         {
11160             struct target_statx *target_stx;
11161             int dirfd = arg1;
11162             int flags = arg3;
11163 
11164             p = lock_user_string(arg2);
11165             if (p == NULL) {
11166                 return -TARGET_EFAULT;
11167             }
11168 #if defined(__NR_statx)
11169             {
11170                 /*
11171                  * It is assumed that struct statx is architecture independent.
11172                  */
11173                 struct target_statx host_stx;
11174                 int mask = arg4;
11175 
11176                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11177                 if (!is_error(ret)) {
11178                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11179                         unlock_user(p, arg2, 0);
11180                         return -TARGET_EFAULT;
11181                     }
11182                 }
11183 
11184                 if (ret != -TARGET_ENOSYS) {
11185                     unlock_user(p, arg2, 0);
11186                     return ret;
11187                 }
11188             }
11189 #endif
11190             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11191             unlock_user(p, arg2, 0);
11192 
11193             if (!is_error(ret)) {
11194                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11195                     return -TARGET_EFAULT;
11196                 }
11197                 memset(target_stx, 0, sizeof(*target_stx));
11198                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11199                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11200                 __put_user(st.st_ino, &target_stx->stx_ino);
11201                 __put_user(st.st_mode, &target_stx->stx_mode);
11202                 __put_user(st.st_uid, &target_stx->stx_uid);
11203                 __put_user(st.st_gid, &target_stx->stx_gid);
11204                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11205                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11206                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11207                 __put_user(st.st_size, &target_stx->stx_size);
11208                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11209                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11210                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11211                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11212                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11213                 unlock_user_struct(target_stx, arg5, 1);
11214             }
11215         }
11216         return ret;
11217 #endif
11218 #ifdef TARGET_NR_lchown
11219     case TARGET_NR_lchown:
11220         if (!(p = lock_user_string(arg1)))
11221             return -TARGET_EFAULT;
11222         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11223         unlock_user(p, arg1, 0);
11224         return ret;
11225 #endif
11226 #ifdef TARGET_NR_getuid
11227     case TARGET_NR_getuid:
11228         return get_errno(high2lowuid(getuid()));
11229 #endif
11230 #ifdef TARGET_NR_getgid
11231     case TARGET_NR_getgid:
11232         return get_errno(high2lowgid(getgid()));
11233 #endif
11234 #ifdef TARGET_NR_geteuid
11235     case TARGET_NR_geteuid:
11236         return get_errno(high2lowuid(geteuid()));
11237 #endif
11238 #ifdef TARGET_NR_getegid
11239     case TARGET_NR_getegid:
11240         return get_errno(high2lowgid(getegid()));
11241 #endif
11242     case TARGET_NR_setreuid:
11243         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11244     case TARGET_NR_setregid:
11245         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11246     case TARGET_NR_getgroups:
11247         {
11248             int gidsetsize = arg1;
11249             target_id *target_grouplist;
11250             gid_t *grouplist;
11251             int i;
11252 
11253             grouplist = alloca(gidsetsize * sizeof(gid_t));
11254             ret = get_errno(getgroups(gidsetsize, grouplist));
11255             if (gidsetsize == 0)
11256                 return ret;
11257             if (!is_error(ret)) {
11258                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11259                 if (!target_grouplist)
11260                     return -TARGET_EFAULT;
11261                 for(i = 0;i < ret; i++)
11262                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11263                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11264             }
11265         }
11266         return ret;
11267     case TARGET_NR_setgroups:
11268         {
11269             int gidsetsize = arg1;
11270             target_id *target_grouplist;
11271             gid_t *grouplist = NULL;
11272             int i;
11273             if (gidsetsize) {
11274                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11275                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11276                 if (!target_grouplist) {
11277                     return -TARGET_EFAULT;
11278                 }
11279                 for (i = 0; i < gidsetsize; i++) {
11280                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11281                 }
11282                 unlock_user(target_grouplist, arg2, 0);
11283             }
11284             return get_errno(setgroups(gidsetsize, grouplist));
11285         }
11286     case TARGET_NR_fchown:
11287         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11288 #if defined(TARGET_NR_fchownat)
11289     case TARGET_NR_fchownat:
11290         if (!(p = lock_user_string(arg2)))
11291             return -TARGET_EFAULT;
11292         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11293                                  low2highgid(arg4), arg5));
11294         unlock_user(p, arg2, 0);
11295         return ret;
11296 #endif
11297 #ifdef TARGET_NR_setresuid
11298     case TARGET_NR_setresuid:
11299         return get_errno(sys_setresuid(low2highuid(arg1),
11300                                        low2highuid(arg2),
11301                                        low2highuid(arg3)));
11302 #endif
11303 #ifdef TARGET_NR_getresuid
11304     case TARGET_NR_getresuid:
11305         {
11306             uid_t ruid, euid, suid;
11307             ret = get_errno(getresuid(&ruid, &euid, &suid));
11308             if (!is_error(ret)) {
11309                 if (put_user_id(high2lowuid(ruid), arg1)
11310                     || put_user_id(high2lowuid(euid), arg2)
11311                     || put_user_id(high2lowuid(suid), arg3))
11312                     return -TARGET_EFAULT;
11313             }
11314         }
11315         return ret;
11316 #endif
11317 #ifdef TARGET_NR_getresgid
11318     case TARGET_NR_setresgid:
11319         return get_errno(sys_setresgid(low2highgid(arg1),
11320                                        low2highgid(arg2),
11321                                        low2highgid(arg3)));
11322 #endif
11323 #ifdef TARGET_NR_getresgid
11324     case TARGET_NR_getresgid:
11325         {
11326             gid_t rgid, egid, sgid;
11327             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11328             if (!is_error(ret)) {
11329                 if (put_user_id(high2lowgid(rgid), arg1)
11330                     || put_user_id(high2lowgid(egid), arg2)
11331                     || put_user_id(high2lowgid(sgid), arg3))
11332                     return -TARGET_EFAULT;
11333             }
11334         }
11335         return ret;
11336 #endif
11337 #ifdef TARGET_NR_chown
11338     case TARGET_NR_chown:
11339         if (!(p = lock_user_string(arg1)))
11340             return -TARGET_EFAULT;
11341         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11342         unlock_user(p, arg1, 0);
11343         return ret;
11344 #endif
11345     case TARGET_NR_setuid:
11346         return get_errno(sys_setuid(low2highuid(arg1)));
11347     case TARGET_NR_setgid:
11348         return get_errno(sys_setgid(low2highgid(arg1)));
11349     case TARGET_NR_setfsuid:
11350         return get_errno(setfsuid(arg1));
11351     case TARGET_NR_setfsgid:
11352         return get_errno(setfsgid(arg1));
11353 
11354 #ifdef TARGET_NR_lchown32
11355     case TARGET_NR_lchown32:
11356         if (!(p = lock_user_string(arg1)))
11357             return -TARGET_EFAULT;
11358         ret = get_errno(lchown(p, arg2, arg3));
11359         unlock_user(p, arg1, 0);
11360         return ret;
11361 #endif
11362 #ifdef TARGET_NR_getuid32
11363     case TARGET_NR_getuid32:
11364         return get_errno(getuid());
11365 #endif
11366 
11367 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11368    /* Alpha specific */
11369     case TARGET_NR_getxuid:
11370          {
11371             uid_t euid;
11372             euid=geteuid();
11373             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11374          }
11375         return get_errno(getuid());
11376 #endif
11377 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11378    /* Alpha specific */
11379     case TARGET_NR_getxgid:
11380          {
11381             uid_t egid;
11382             egid=getegid();
11383             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11384          }
11385         return get_errno(getgid());
11386 #endif
11387 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11388     /* Alpha specific */
11389     case TARGET_NR_osf_getsysinfo:
11390         ret = -TARGET_EOPNOTSUPP;
11391         switch (arg1) {
11392           case TARGET_GSI_IEEE_FP_CONTROL:
11393             {
11394                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11395                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11396 
11397                 swcr &= ~SWCR_STATUS_MASK;
11398                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11399 
11400                 if (put_user_u64 (swcr, arg2))
11401                         return -TARGET_EFAULT;
11402                 ret = 0;
11403             }
11404             break;
11405 
11406           /* case GSI_IEEE_STATE_AT_SIGNAL:
11407              -- Not implemented in linux kernel.
11408              case GSI_UACPROC:
11409              -- Retrieves current unaligned access state; not much used.
11410              case GSI_PROC_TYPE:
11411              -- Retrieves implver information; surely not used.
11412              case GSI_GET_HWRPB:
11413              -- Grabs a copy of the HWRPB; surely not used.
11414           */
11415         }
11416         return ret;
11417 #endif
11418 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11419     /* Alpha specific */
11420     case TARGET_NR_osf_setsysinfo:
11421         ret = -TARGET_EOPNOTSUPP;
11422         switch (arg1) {
11423           case TARGET_SSI_IEEE_FP_CONTROL:
11424             {
11425                 uint64_t swcr, fpcr;
11426 
11427                 if (get_user_u64 (swcr, arg2)) {
11428                     return -TARGET_EFAULT;
11429                 }
11430 
11431                 /*
11432                  * The kernel calls swcr_update_status to update the
11433                  * status bits from the fpcr at every point that it
11434                  * could be queried.  Therefore, we store the status
11435                  * bits only in FPCR.
11436                  */
11437                 ((CPUAlphaState *)cpu_env)->swcr
11438                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11439 
11440                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11441                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11442                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11443                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11444                 ret = 0;
11445             }
11446             break;
11447 
11448           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11449             {
11450                 uint64_t exc, fpcr, fex;
11451 
11452                 if (get_user_u64(exc, arg2)) {
11453                     return -TARGET_EFAULT;
11454                 }
11455                 exc &= SWCR_STATUS_MASK;
11456                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11457 
11458                 /* Old exceptions are not signaled.  */
11459                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11460                 fex = exc & ~fex;
11461                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11462                 fex &= ((CPUArchState *)cpu_env)->swcr;
11463 
11464                 /* Update the hardware fpcr.  */
11465                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11466                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11467 
11468                 if (fex) {
11469                     int si_code = TARGET_FPE_FLTUNK;
11470                     target_siginfo_t info;
11471 
11472                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11473                         si_code = TARGET_FPE_FLTUND;
11474                     }
11475                     if (fex & SWCR_TRAP_ENABLE_INE) {
11476                         si_code = TARGET_FPE_FLTRES;
11477                     }
11478                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11479                         si_code = TARGET_FPE_FLTUND;
11480                     }
11481                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11482                         si_code = TARGET_FPE_FLTOVF;
11483                     }
11484                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11485                         si_code = TARGET_FPE_FLTDIV;
11486                     }
11487                     if (fex & SWCR_TRAP_ENABLE_INV) {
11488                         si_code = TARGET_FPE_FLTINV;
11489                     }
11490 
11491                     info.si_signo = SIGFPE;
11492                     info.si_errno = 0;
11493                     info.si_code = si_code;
11494                     info._sifields._sigfault._addr
11495                         = ((CPUArchState *)cpu_env)->pc;
11496                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11497                                  QEMU_SI_FAULT, &info);
11498                 }
11499                 ret = 0;
11500             }
11501             break;
11502 
11503           /* case SSI_NVPAIRS:
11504              -- Used with SSIN_UACPROC to enable unaligned accesses.
11505              case SSI_IEEE_STATE_AT_SIGNAL:
11506              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11507              -- Not implemented in linux kernel
11508           */
11509         }
11510         return ret;
11511 #endif
11512 #ifdef TARGET_NR_osf_sigprocmask
11513     /* Alpha specific.  */
11514     case TARGET_NR_osf_sigprocmask:
11515         {
11516             abi_ulong mask;
11517             int how;
11518             sigset_t set, oldset;
11519 
11520             switch(arg1) {
11521             case TARGET_SIG_BLOCK:
11522                 how = SIG_BLOCK;
11523                 break;
11524             case TARGET_SIG_UNBLOCK:
11525                 how = SIG_UNBLOCK;
11526                 break;
11527             case TARGET_SIG_SETMASK:
11528                 how = SIG_SETMASK;
11529                 break;
11530             default:
11531                 return -TARGET_EINVAL;
11532             }
11533             mask = arg2;
11534             target_to_host_old_sigset(&set, &mask);
11535             ret = do_sigprocmask(how, &set, &oldset);
11536             if (!ret) {
11537                 host_to_target_old_sigset(&mask, &oldset);
11538                 ret = mask;
11539             }
11540         }
11541         return ret;
11542 #endif
11543 
11544 #ifdef TARGET_NR_getgid32
11545     case TARGET_NR_getgid32:
11546         return get_errno(getgid());
11547 #endif
11548 #ifdef TARGET_NR_geteuid32
11549     case TARGET_NR_geteuid32:
11550         return get_errno(geteuid());
11551 #endif
11552 #ifdef TARGET_NR_getegid32
11553     case TARGET_NR_getegid32:
11554         return get_errno(getegid());
11555 #endif
11556 #ifdef TARGET_NR_setreuid32
11557     case TARGET_NR_setreuid32:
11558         return get_errno(setreuid(arg1, arg2));
11559 #endif
11560 #ifdef TARGET_NR_setregid32
11561     case TARGET_NR_setregid32:
11562         return get_errno(setregid(arg1, arg2));
11563 #endif
11564 #ifdef TARGET_NR_getgroups32
11565     case TARGET_NR_getgroups32:
11566         {
11567             int gidsetsize = arg1;
11568             uint32_t *target_grouplist;
11569             gid_t *grouplist;
11570             int i;
11571 
11572             grouplist = alloca(gidsetsize * sizeof(gid_t));
11573             ret = get_errno(getgroups(gidsetsize, grouplist));
11574             if (gidsetsize == 0)
11575                 return ret;
11576             if (!is_error(ret)) {
11577                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11578                 if (!target_grouplist) {
11579                     return -TARGET_EFAULT;
11580                 }
11581                 for(i = 0;i < ret; i++)
11582                     target_grouplist[i] = tswap32(grouplist[i]);
11583                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11584             }
11585         }
11586         return ret;
11587 #endif
11588 #ifdef TARGET_NR_setgroups32
11589     case TARGET_NR_setgroups32:
11590         {
11591             int gidsetsize = arg1;
11592             uint32_t *target_grouplist;
11593             gid_t *grouplist;
11594             int i;
11595 
11596             grouplist = alloca(gidsetsize * sizeof(gid_t));
11597             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11598             if (!target_grouplist) {
11599                 return -TARGET_EFAULT;
11600             }
11601             for(i = 0;i < gidsetsize; i++)
11602                 grouplist[i] = tswap32(target_grouplist[i]);
11603             unlock_user(target_grouplist, arg2, 0);
11604             return get_errno(setgroups(gidsetsize, grouplist));
11605         }
11606 #endif
11607 #ifdef TARGET_NR_fchown32
11608     case TARGET_NR_fchown32:
11609         return get_errno(fchown(arg1, arg2, arg3));
11610 #endif
11611 #ifdef TARGET_NR_setresuid32
11612     case TARGET_NR_setresuid32:
11613         return get_errno(sys_setresuid(arg1, arg2, arg3));
11614 #endif
11615 #ifdef TARGET_NR_getresuid32
11616     case TARGET_NR_getresuid32:
11617         {
11618             uid_t ruid, euid, suid;
11619             ret = get_errno(getresuid(&ruid, &euid, &suid));
11620             if (!is_error(ret)) {
11621                 if (put_user_u32(ruid, arg1)
11622                     || put_user_u32(euid, arg2)
11623                     || put_user_u32(suid, arg3))
11624                     return -TARGET_EFAULT;
11625             }
11626         }
11627         return ret;
11628 #endif
11629 #ifdef TARGET_NR_setresgid32
11630     case TARGET_NR_setresgid32:
11631         return get_errno(sys_setresgid(arg1, arg2, arg3));
11632 #endif
11633 #ifdef TARGET_NR_getresgid32
11634     case TARGET_NR_getresgid32:
11635         {
11636             gid_t rgid, egid, sgid;
11637             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11638             if (!is_error(ret)) {
11639                 if (put_user_u32(rgid, arg1)
11640                     || put_user_u32(egid, arg2)
11641                     || put_user_u32(sgid, arg3))
11642                     return -TARGET_EFAULT;
11643             }
11644         }
11645         return ret;
11646 #endif
11647 #ifdef TARGET_NR_chown32
11648     case TARGET_NR_chown32:
11649         if (!(p = lock_user_string(arg1)))
11650             return -TARGET_EFAULT;
11651         ret = get_errno(chown(p, arg2, arg3));
11652         unlock_user(p, arg1, 0);
11653         return ret;
11654 #endif
11655 #ifdef TARGET_NR_setuid32
11656     case TARGET_NR_setuid32:
11657         return get_errno(sys_setuid(arg1));
11658 #endif
11659 #ifdef TARGET_NR_setgid32
11660     case TARGET_NR_setgid32:
11661         return get_errno(sys_setgid(arg1));
11662 #endif
11663 #ifdef TARGET_NR_setfsuid32
11664     case TARGET_NR_setfsuid32:
11665         return get_errno(setfsuid(arg1));
11666 #endif
11667 #ifdef TARGET_NR_setfsgid32
11668     case TARGET_NR_setfsgid32:
11669         return get_errno(setfsgid(arg1));
11670 #endif
11671 #ifdef TARGET_NR_mincore
11672     case TARGET_NR_mincore:
11673         {
11674             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11675             if (!a) {
11676                 return -TARGET_ENOMEM;
11677             }
11678             p = lock_user_string(arg3);
11679             if (!p) {
11680                 ret = -TARGET_EFAULT;
11681             } else {
11682                 ret = get_errno(mincore(a, arg2, p));
11683                 unlock_user(p, arg3, ret);
11684             }
11685             unlock_user(a, arg1, 0);
11686         }
11687         return ret;
11688 #endif
11689 #ifdef TARGET_NR_arm_fadvise64_64
11690     case TARGET_NR_arm_fadvise64_64:
11691         /* arm_fadvise64_64 looks like fadvise64_64 but
11692          * with different argument order: fd, advice, offset, len
11693          * rather than the usual fd, offset, len, advice.
11694          * Note that offset and len are both 64-bit so appear as
11695          * pairs of 32-bit registers.
11696          */
11697         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11698                             target_offset64(arg5, arg6), arg2);
11699         return -host_to_target_errno(ret);
11700 #endif
11701 
11702 #if TARGET_ABI_BITS == 32
11703 
11704 #ifdef TARGET_NR_fadvise64_64
11705     case TARGET_NR_fadvise64_64:
11706 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11707         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11708         ret = arg2;
11709         arg2 = arg3;
11710         arg3 = arg4;
11711         arg4 = arg5;
11712         arg5 = arg6;
11713         arg6 = ret;
11714 #else
11715         /* 6 args: fd, offset (high, low), len (high, low), advice */
11716         if (regpairs_aligned(cpu_env, num)) {
11717             /* offset is in (3,4), len in (5,6) and advice in 7 */
11718             arg2 = arg3;
11719             arg3 = arg4;
11720             arg4 = arg5;
11721             arg5 = arg6;
11722             arg6 = arg7;
11723         }
11724 #endif
11725         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11726                             target_offset64(arg4, arg5), arg6);
11727         return -host_to_target_errno(ret);
11728 #endif
11729 
11730 #ifdef TARGET_NR_fadvise64
11731     case TARGET_NR_fadvise64:
11732         /* 5 args: fd, offset (high, low), len, advice */
11733         if (regpairs_aligned(cpu_env, num)) {
11734             /* offset is in (3,4), len in 5 and advice in 6 */
11735             arg2 = arg3;
11736             arg3 = arg4;
11737             arg4 = arg5;
11738             arg5 = arg6;
11739         }
11740         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11741         return -host_to_target_errno(ret);
11742 #endif
11743 
11744 #else /* not a 32-bit ABI */
11745 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11746 #ifdef TARGET_NR_fadvise64_64
11747     case TARGET_NR_fadvise64_64:
11748 #endif
11749 #ifdef TARGET_NR_fadvise64
11750     case TARGET_NR_fadvise64:
11751 #endif
11752 #ifdef TARGET_S390X
11753         switch (arg4) {
11754         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11755         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11756         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11757         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11758         default: break;
11759         }
11760 #endif
11761         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11762 #endif
11763 #endif /* end of 64-bit ABI fadvise handling */
11764 
11765 #ifdef TARGET_NR_madvise
11766     case TARGET_NR_madvise:
11767         /* A straight passthrough may not be safe because qemu sometimes
11768            turns private file-backed mappings into anonymous mappings.
11769            This will break MADV_DONTNEED.
11770            This is a hint, so ignoring and returning success is ok.  */
11771         return 0;
11772 #endif
11773 #ifdef TARGET_NR_fcntl64
11774     case TARGET_NR_fcntl64:
11775     {
11776         int cmd;
11777         struct flock64 fl;
11778         from_flock64_fn *copyfrom = copy_from_user_flock64;
11779         to_flock64_fn *copyto = copy_to_user_flock64;
11780 
11781 #ifdef TARGET_ARM
11782         if (!((CPUARMState *)cpu_env)->eabi) {
11783             copyfrom = copy_from_user_oabi_flock64;
11784             copyto = copy_to_user_oabi_flock64;
11785         }
11786 #endif
11787 
11788         cmd = target_to_host_fcntl_cmd(arg2);
11789         if (cmd == -TARGET_EINVAL) {
11790             return cmd;
11791         }
11792 
11793         switch(arg2) {
11794         case TARGET_F_GETLK64:
11795             ret = copyfrom(&fl, arg3);
11796             if (ret) {
11797                 break;
11798             }
11799             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11800             if (ret == 0) {
11801                 ret = copyto(arg3, &fl);
11802             }
11803 	    break;
11804 
11805         case TARGET_F_SETLK64:
11806         case TARGET_F_SETLKW64:
11807             ret = copyfrom(&fl, arg3);
11808             if (ret) {
11809                 break;
11810             }
11811             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11812 	    break;
11813         default:
11814             ret = do_fcntl(arg1, arg2, arg3);
11815             break;
11816         }
11817         return ret;
11818     }
11819 #endif
11820 #ifdef TARGET_NR_cacheflush
11821     case TARGET_NR_cacheflush:
11822         /* self-modifying code is handled automatically, so nothing needed */
11823         return 0;
11824 #endif
11825 #ifdef TARGET_NR_getpagesize
11826     case TARGET_NR_getpagesize:
11827         return TARGET_PAGE_SIZE;
11828 #endif
11829     case TARGET_NR_gettid:
11830         return get_errno(sys_gettid());
11831 #ifdef TARGET_NR_readahead
11832     case TARGET_NR_readahead:
11833 #if TARGET_ABI_BITS == 32
11834         if (regpairs_aligned(cpu_env, num)) {
11835             arg2 = arg3;
11836             arg3 = arg4;
11837             arg4 = arg5;
11838         }
11839         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11840 #else
11841         ret = get_errno(readahead(arg1, arg2, arg3));
11842 #endif
11843         return ret;
11844 #endif
11845 #ifdef CONFIG_ATTR
11846 #ifdef TARGET_NR_setxattr
11847     case TARGET_NR_listxattr:
11848     case TARGET_NR_llistxattr:
11849     {
11850         void *p, *b = 0;
11851         if (arg2) {
11852             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11853             if (!b) {
11854                 return -TARGET_EFAULT;
11855             }
11856         }
11857         p = lock_user_string(arg1);
11858         if (p) {
11859             if (num == TARGET_NR_listxattr) {
11860                 ret = get_errno(listxattr(p, b, arg3));
11861             } else {
11862                 ret = get_errno(llistxattr(p, b, arg3));
11863             }
11864         } else {
11865             ret = -TARGET_EFAULT;
11866         }
11867         unlock_user(p, arg1, 0);
11868         unlock_user(b, arg2, arg3);
11869         return ret;
11870     }
11871     case TARGET_NR_flistxattr:
11872     {
11873         void *b = 0;
11874         if (arg2) {
11875             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11876             if (!b) {
11877                 return -TARGET_EFAULT;
11878             }
11879         }
11880         ret = get_errno(flistxattr(arg1, b, arg3));
11881         unlock_user(b, arg2, arg3);
11882         return ret;
11883     }
11884     case TARGET_NR_setxattr:
11885     case TARGET_NR_lsetxattr:
11886         {
11887             void *p, *n, *v = 0;
11888             if (arg3) {
11889                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11890                 if (!v) {
11891                     return -TARGET_EFAULT;
11892                 }
11893             }
11894             p = lock_user_string(arg1);
11895             n = lock_user_string(arg2);
11896             if (p && n) {
11897                 if (num == TARGET_NR_setxattr) {
11898                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11899                 } else {
11900                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11901                 }
11902             } else {
11903                 ret = -TARGET_EFAULT;
11904             }
11905             unlock_user(p, arg1, 0);
11906             unlock_user(n, arg2, 0);
11907             unlock_user(v, arg3, 0);
11908         }
11909         return ret;
11910     case TARGET_NR_fsetxattr:
11911         {
11912             void *n, *v = 0;
11913             if (arg3) {
11914                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11915                 if (!v) {
11916                     return -TARGET_EFAULT;
11917                 }
11918             }
11919             n = lock_user_string(arg2);
11920             if (n) {
11921                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11922             } else {
11923                 ret = -TARGET_EFAULT;
11924             }
11925             unlock_user(n, arg2, 0);
11926             unlock_user(v, arg3, 0);
11927         }
11928         return ret;
11929     case TARGET_NR_getxattr:
11930     case TARGET_NR_lgetxattr:
11931         {
11932             void *p, *n, *v = 0;
11933             if (arg3) {
11934                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11935                 if (!v) {
11936                     return -TARGET_EFAULT;
11937                 }
11938             }
11939             p = lock_user_string(arg1);
11940             n = lock_user_string(arg2);
11941             if (p && n) {
11942                 if (num == TARGET_NR_getxattr) {
11943                     ret = get_errno(getxattr(p, n, v, arg4));
11944                 } else {
11945                     ret = get_errno(lgetxattr(p, n, v, arg4));
11946                 }
11947             } else {
11948                 ret = -TARGET_EFAULT;
11949             }
11950             unlock_user(p, arg1, 0);
11951             unlock_user(n, arg2, 0);
11952             unlock_user(v, arg3, arg4);
11953         }
11954         return ret;
11955     case TARGET_NR_fgetxattr:
11956         {
11957             void *n, *v = 0;
11958             if (arg3) {
11959                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11960                 if (!v) {
11961                     return -TARGET_EFAULT;
11962                 }
11963             }
11964             n = lock_user_string(arg2);
11965             if (n) {
11966                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11967             } else {
11968                 ret = -TARGET_EFAULT;
11969             }
11970             unlock_user(n, arg2, 0);
11971             unlock_user(v, arg3, arg4);
11972         }
11973         return ret;
11974     case TARGET_NR_removexattr:
11975     case TARGET_NR_lremovexattr:
11976         {
11977             void *p, *n;
11978             p = lock_user_string(arg1);
11979             n = lock_user_string(arg2);
11980             if (p && n) {
11981                 if (num == TARGET_NR_removexattr) {
11982                     ret = get_errno(removexattr(p, n));
11983                 } else {
11984                     ret = get_errno(lremovexattr(p, n));
11985                 }
11986             } else {
11987                 ret = -TARGET_EFAULT;
11988             }
11989             unlock_user(p, arg1, 0);
11990             unlock_user(n, arg2, 0);
11991         }
11992         return ret;
11993     case TARGET_NR_fremovexattr:
11994         {
11995             void *n;
11996             n = lock_user_string(arg2);
11997             if (n) {
11998                 ret = get_errno(fremovexattr(arg1, n));
11999             } else {
12000                 ret = -TARGET_EFAULT;
12001             }
12002             unlock_user(n, arg2, 0);
12003         }
12004         return ret;
12005 #endif
12006 #endif /* CONFIG_ATTR */
12007 #ifdef TARGET_NR_set_thread_area
12008     case TARGET_NR_set_thread_area:
12009 #if defined(TARGET_MIPS)
12010       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12011       return 0;
12012 #elif defined(TARGET_CRIS)
12013       if (arg1 & 0xff)
12014           ret = -TARGET_EINVAL;
12015       else {
12016           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12017           ret = 0;
12018       }
12019       return ret;
12020 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12021       return do_set_thread_area(cpu_env, arg1);
12022 #elif defined(TARGET_M68K)
12023       {
12024           TaskState *ts = cpu->opaque;
12025           ts->tp_value = arg1;
12026           return 0;
12027       }
12028 #else
12029       return -TARGET_ENOSYS;
12030 #endif
12031 #endif
12032 #ifdef TARGET_NR_get_thread_area
12033     case TARGET_NR_get_thread_area:
12034 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12035         return do_get_thread_area(cpu_env, arg1);
12036 #elif defined(TARGET_M68K)
12037         {
12038             TaskState *ts = cpu->opaque;
12039             return ts->tp_value;
12040         }
12041 #else
12042         return -TARGET_ENOSYS;
12043 #endif
12044 #endif
12045 #ifdef TARGET_NR_getdomainname
12046     case TARGET_NR_getdomainname:
12047         return -TARGET_ENOSYS;
12048 #endif
12049 
12050 #ifdef TARGET_NR_clock_settime
12051     case TARGET_NR_clock_settime:
12052     {
12053         struct timespec ts;
12054 
12055         ret = target_to_host_timespec(&ts, arg2);
12056         if (!is_error(ret)) {
12057             ret = get_errno(clock_settime(arg1, &ts));
12058         }
12059         return ret;
12060     }
12061 #endif
12062 #ifdef TARGET_NR_clock_settime64
12063     case TARGET_NR_clock_settime64:
12064     {
12065         struct timespec ts;
12066 
12067         ret = target_to_host_timespec64(&ts, arg2);
12068         if (!is_error(ret)) {
12069             ret = get_errno(clock_settime(arg1, &ts));
12070         }
12071         return ret;
12072     }
12073 #endif
12074 #ifdef TARGET_NR_clock_gettime
12075     case TARGET_NR_clock_gettime:
12076     {
12077         struct timespec ts;
12078         ret = get_errno(clock_gettime(arg1, &ts));
12079         if (!is_error(ret)) {
12080             ret = host_to_target_timespec(arg2, &ts);
12081         }
12082         return ret;
12083     }
12084 #endif
12085 #ifdef TARGET_NR_clock_gettime64
12086     case TARGET_NR_clock_gettime64:
12087     {
12088         struct timespec ts;
12089         ret = get_errno(clock_gettime(arg1, &ts));
12090         if (!is_error(ret)) {
12091             ret = host_to_target_timespec64(arg2, &ts);
12092         }
12093         return ret;
12094     }
12095 #endif
12096 #ifdef TARGET_NR_clock_getres
12097     case TARGET_NR_clock_getres:
12098     {
12099         struct timespec ts;
12100         ret = get_errno(clock_getres(arg1, &ts));
12101         if (!is_error(ret)) {
12102             host_to_target_timespec(arg2, &ts);
12103         }
12104         return ret;
12105     }
12106 #endif
12107 #ifdef TARGET_NR_clock_getres_time64
12108     case TARGET_NR_clock_getres_time64:
12109     {
12110         struct timespec ts;
12111         ret = get_errno(clock_getres(arg1, &ts));
12112         if (!is_error(ret)) {
12113             host_to_target_timespec64(arg2, &ts);
12114         }
12115         return ret;
12116     }
12117 #endif
12118 #ifdef TARGET_NR_clock_nanosleep
12119     case TARGET_NR_clock_nanosleep:
12120     {
12121         struct timespec ts;
12122         if (target_to_host_timespec(&ts, arg3)) {
12123             return -TARGET_EFAULT;
12124         }
12125         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12126                                              &ts, arg4 ? &ts : NULL));
12127         /*
12128          * if the call is interrupted by a signal handler, it fails
12129          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12130          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12131          */
12132         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12133             host_to_target_timespec(arg4, &ts)) {
12134               return -TARGET_EFAULT;
12135         }
12136 
12137         return ret;
12138     }
12139 #endif
12140 #ifdef TARGET_NR_clock_nanosleep_time64
12141     case TARGET_NR_clock_nanosleep_time64:
12142     {
12143         struct timespec ts;
12144 
12145         if (target_to_host_timespec64(&ts, arg3)) {
12146             return -TARGET_EFAULT;
12147         }
12148 
12149         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12150                                              &ts, arg4 ? &ts : NULL));
12151 
12152         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12153             host_to_target_timespec64(arg4, &ts)) {
12154             return -TARGET_EFAULT;
12155         }
12156         return ret;
12157     }
12158 #endif
12159 
12160 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12161     case TARGET_NR_set_tid_address:
12162         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12163 #endif
12164 
12165     case TARGET_NR_tkill:
12166         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12167 
12168     case TARGET_NR_tgkill:
12169         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12170                          target_to_host_signal(arg3)));
12171 
12172 #ifdef TARGET_NR_set_robust_list
12173     case TARGET_NR_set_robust_list:
12174     case TARGET_NR_get_robust_list:
12175         /* The ABI for supporting robust futexes has userspace pass
12176          * the kernel a pointer to a linked list which is updated by
12177          * userspace after the syscall; the list is walked by the kernel
12178          * when the thread exits. Since the linked list in QEMU guest
12179          * memory isn't a valid linked list for the host and we have
12180          * no way to reliably intercept the thread-death event, we can't
12181          * support these. Silently return ENOSYS so that guest userspace
12182          * falls back to a non-robust futex implementation (which should
12183          * be OK except in the corner case of the guest crashing while
12184          * holding a mutex that is shared with another process via
12185          * shared memory).
12186          */
12187         return -TARGET_ENOSYS;
12188 #endif
12189 
12190 #if defined(TARGET_NR_utimensat)
12191     case TARGET_NR_utimensat:
12192         {
12193             struct timespec *tsp, ts[2];
12194             if (!arg3) {
12195                 tsp = NULL;
12196             } else {
12197                 if (target_to_host_timespec(ts, arg3)) {
12198                     return -TARGET_EFAULT;
12199                 }
12200                 if (target_to_host_timespec(ts + 1, arg3 +
12201                                             sizeof(struct target_timespec))) {
12202                     return -TARGET_EFAULT;
12203                 }
12204                 tsp = ts;
12205             }
12206             if (!arg2)
12207                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12208             else {
12209                 if (!(p = lock_user_string(arg2))) {
12210                     return -TARGET_EFAULT;
12211                 }
12212                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12213                 unlock_user(p, arg2, 0);
12214             }
12215         }
12216         return ret;
12217 #endif
12218 #ifdef TARGET_NR_utimensat_time64
12219     case TARGET_NR_utimensat_time64:
12220         {
12221             struct timespec *tsp, ts[2];
12222             if (!arg3) {
12223                 tsp = NULL;
12224             } else {
12225                 if (target_to_host_timespec64(ts, arg3)) {
12226                     return -TARGET_EFAULT;
12227                 }
12228                 if (target_to_host_timespec64(ts + 1, arg3 +
12229                                      sizeof(struct target__kernel_timespec))) {
12230                     return -TARGET_EFAULT;
12231                 }
12232                 tsp = ts;
12233             }
12234             if (!arg2)
12235                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12236             else {
12237                 p = lock_user_string(arg2);
12238                 if (!p) {
12239                     return -TARGET_EFAULT;
12240                 }
12241                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12242                 unlock_user(p, arg2, 0);
12243             }
12244         }
12245         return ret;
12246 #endif
12247 #ifdef TARGET_NR_futex
12248     case TARGET_NR_futex:
12249         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12250 #endif
12251 #ifdef TARGET_NR_futex_time64
12252     case TARGET_NR_futex_time64:
12253         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12254 #endif
12255 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12256     case TARGET_NR_inotify_init:
12257         ret = get_errno(sys_inotify_init());
12258         if (ret >= 0) {
12259             fd_trans_register(ret, &target_inotify_trans);
12260         }
12261         return ret;
12262 #endif
12263 #ifdef CONFIG_INOTIFY1
12264 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12265     case TARGET_NR_inotify_init1:
12266         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12267                                           fcntl_flags_tbl)));
12268         if (ret >= 0) {
12269             fd_trans_register(ret, &target_inotify_trans);
12270         }
12271         return ret;
12272 #endif
12273 #endif
12274 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12275     case TARGET_NR_inotify_add_watch:
12276         p = lock_user_string(arg2);
12277         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12278         unlock_user(p, arg2, 0);
12279         return ret;
12280 #endif
12281 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12282     case TARGET_NR_inotify_rm_watch:
12283         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12284 #endif
12285 
12286 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12287     case TARGET_NR_mq_open:
12288         {
12289             struct mq_attr posix_mq_attr;
12290             struct mq_attr *pposix_mq_attr;
12291             int host_flags;
12292 
12293             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12294             pposix_mq_attr = NULL;
12295             if (arg4) {
12296                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12297                     return -TARGET_EFAULT;
12298                 }
12299                 pposix_mq_attr = &posix_mq_attr;
12300             }
12301             p = lock_user_string(arg1 - 1);
12302             if (!p) {
12303                 return -TARGET_EFAULT;
12304             }
12305             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12306             unlock_user (p, arg1, 0);
12307         }
12308         return ret;
12309 
12310     case TARGET_NR_mq_unlink:
12311         p = lock_user_string(arg1 - 1);
12312         if (!p) {
12313             return -TARGET_EFAULT;
12314         }
12315         ret = get_errno(mq_unlink(p));
12316         unlock_user (p, arg1, 0);
12317         return ret;
12318 
12319 #ifdef TARGET_NR_mq_timedsend
12320     case TARGET_NR_mq_timedsend:
12321         {
12322             struct timespec ts;
12323 
12324             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12325             if (arg5 != 0) {
12326                 if (target_to_host_timespec(&ts, arg5)) {
12327                     return -TARGET_EFAULT;
12328                 }
12329                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12330                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12331                     return -TARGET_EFAULT;
12332                 }
12333             } else {
12334                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12335             }
12336             unlock_user (p, arg2, arg3);
12337         }
12338         return ret;
12339 #endif
12340 #ifdef TARGET_NR_mq_timedsend_time64
12341     case TARGET_NR_mq_timedsend_time64:
12342         {
12343             struct timespec ts;
12344 
12345             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12346             if (arg5 != 0) {
12347                 if (target_to_host_timespec64(&ts, arg5)) {
12348                     return -TARGET_EFAULT;
12349                 }
12350                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12351                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12352                     return -TARGET_EFAULT;
12353                 }
12354             } else {
12355                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12356             }
12357             unlock_user(p, arg2, arg3);
12358         }
12359         return ret;
12360 #endif
12361 
12362 #ifdef TARGET_NR_mq_timedreceive
12363     case TARGET_NR_mq_timedreceive:
12364         {
12365             struct timespec ts;
12366             unsigned int prio;
12367 
12368             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12369             if (arg5 != 0) {
12370                 if (target_to_host_timespec(&ts, arg5)) {
12371                     return -TARGET_EFAULT;
12372                 }
12373                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12374                                                      &prio, &ts));
12375                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12376                     return -TARGET_EFAULT;
12377                 }
12378             } else {
12379                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12380                                                      &prio, NULL));
12381             }
12382             unlock_user (p, arg2, arg3);
12383             if (arg4 != 0)
12384                 put_user_u32(prio, arg4);
12385         }
12386         return ret;
12387 #endif
12388 #ifdef TARGET_NR_mq_timedreceive_time64
12389     case TARGET_NR_mq_timedreceive_time64:
12390         {
12391             struct timespec ts;
12392             unsigned int prio;
12393 
12394             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12395             if (arg5 != 0) {
12396                 if (target_to_host_timespec64(&ts, arg5)) {
12397                     return -TARGET_EFAULT;
12398                 }
12399                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12400                                                      &prio, &ts));
12401                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12402                     return -TARGET_EFAULT;
12403                 }
12404             } else {
12405                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12406                                                      &prio, NULL));
12407             }
12408             unlock_user(p, arg2, arg3);
12409             if (arg4 != 0) {
12410                 put_user_u32(prio, arg4);
12411             }
12412         }
12413         return ret;
12414 #endif
12415 
12416     /* Not implemented for now... */
12417 /*     case TARGET_NR_mq_notify: */
12418 /*         break; */
12419 
12420     case TARGET_NR_mq_getsetattr:
12421         {
12422             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12423             ret = 0;
12424             if (arg2 != 0) {
12425                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12426                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12427                                            &posix_mq_attr_out));
12428             } else if (arg3 != 0) {
12429                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12430             }
12431             if (ret == 0 && arg3 != 0) {
12432                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12433             }
12434         }
12435         return ret;
12436 #endif
12437 
12438 #ifdef CONFIG_SPLICE
12439 #ifdef TARGET_NR_tee
12440     case TARGET_NR_tee:
12441         {
12442             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12443         }
12444         return ret;
12445 #endif
12446 #ifdef TARGET_NR_splice
12447     case TARGET_NR_splice:
12448         {
12449             loff_t loff_in, loff_out;
12450             loff_t *ploff_in = NULL, *ploff_out = NULL;
12451             if (arg2) {
12452                 if (get_user_u64(loff_in, arg2)) {
12453                     return -TARGET_EFAULT;
12454                 }
12455                 ploff_in = &loff_in;
12456             }
12457             if (arg4) {
12458                 if (get_user_u64(loff_out, arg4)) {
12459                     return -TARGET_EFAULT;
12460                 }
12461                 ploff_out = &loff_out;
12462             }
12463             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12464             if (arg2) {
12465                 if (put_user_u64(loff_in, arg2)) {
12466                     return -TARGET_EFAULT;
12467                 }
12468             }
12469             if (arg4) {
12470                 if (put_user_u64(loff_out, arg4)) {
12471                     return -TARGET_EFAULT;
12472                 }
12473             }
12474         }
12475         return ret;
12476 #endif
12477 #ifdef TARGET_NR_vmsplice
12478 	case TARGET_NR_vmsplice:
12479         {
12480             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12481             if (vec != NULL) {
12482                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12483                 unlock_iovec(vec, arg2, arg3, 0);
12484             } else {
12485                 ret = -host_to_target_errno(errno);
12486             }
12487         }
12488         return ret;
12489 #endif
12490 #endif /* CONFIG_SPLICE */
12491 #ifdef CONFIG_EVENTFD
12492 #if defined(TARGET_NR_eventfd)
12493     case TARGET_NR_eventfd:
12494         ret = get_errno(eventfd(arg1, 0));
12495         if (ret >= 0) {
12496             fd_trans_register(ret, &target_eventfd_trans);
12497         }
12498         return ret;
12499 #endif
12500 #if defined(TARGET_NR_eventfd2)
12501     case TARGET_NR_eventfd2:
12502     {
12503         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12504         if (arg2 & TARGET_O_NONBLOCK) {
12505             host_flags |= O_NONBLOCK;
12506         }
12507         if (arg2 & TARGET_O_CLOEXEC) {
12508             host_flags |= O_CLOEXEC;
12509         }
12510         ret = get_errno(eventfd(arg1, host_flags));
12511         if (ret >= 0) {
12512             fd_trans_register(ret, &target_eventfd_trans);
12513         }
12514         return ret;
12515     }
12516 #endif
12517 #endif /* CONFIG_EVENTFD  */
12518 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12519     case TARGET_NR_fallocate:
12520 #if TARGET_ABI_BITS == 32
12521         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12522                                   target_offset64(arg5, arg6)));
12523 #else
12524         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12525 #endif
12526         return ret;
12527 #endif
12528 #if defined(CONFIG_SYNC_FILE_RANGE)
12529 #if defined(TARGET_NR_sync_file_range)
12530     case TARGET_NR_sync_file_range:
12531 #if TARGET_ABI_BITS == 32
12532 #if defined(TARGET_MIPS)
12533         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12534                                         target_offset64(arg5, arg6), arg7));
12535 #else
12536         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12537                                         target_offset64(arg4, arg5), arg6));
12538 #endif /* !TARGET_MIPS */
12539 #else
12540         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12541 #endif
12542         return ret;
12543 #endif
12544 #if defined(TARGET_NR_sync_file_range2) || \
12545     defined(TARGET_NR_arm_sync_file_range)
12546 #if defined(TARGET_NR_sync_file_range2)
12547     case TARGET_NR_sync_file_range2:
12548 #endif
12549 #if defined(TARGET_NR_arm_sync_file_range)
12550     case TARGET_NR_arm_sync_file_range:
12551 #endif
12552         /* This is like sync_file_range but the arguments are reordered */
12553 #if TARGET_ABI_BITS == 32
12554         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12555                                         target_offset64(arg5, arg6), arg2));
12556 #else
12557         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12558 #endif
12559         return ret;
12560 #endif
12561 #endif
12562 #if defined(TARGET_NR_signalfd4)
12563     case TARGET_NR_signalfd4:
12564         return do_signalfd4(arg1, arg2, arg4);
12565 #endif
12566 #if defined(TARGET_NR_signalfd)
12567     case TARGET_NR_signalfd:
12568         return do_signalfd4(arg1, arg2, 0);
12569 #endif
12570 #if defined(CONFIG_EPOLL)
12571 #if defined(TARGET_NR_epoll_create)
12572     case TARGET_NR_epoll_create:
12573         return get_errno(epoll_create(arg1));
12574 #endif
12575 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12576     case TARGET_NR_epoll_create1:
12577         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12578 #endif
12579 #if defined(TARGET_NR_epoll_ctl)
12580     case TARGET_NR_epoll_ctl:
12581     {
12582         struct epoll_event ep;
12583         struct epoll_event *epp = 0;
12584         if (arg4) {
12585             if (arg2 != EPOLL_CTL_DEL) {
12586                 struct target_epoll_event *target_ep;
12587                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12588                     return -TARGET_EFAULT;
12589                 }
12590                 ep.events = tswap32(target_ep->events);
12591                 /*
12592                  * The epoll_data_t union is just opaque data to the kernel,
12593                  * so we transfer all 64 bits across and need not worry what
12594                  * actual data type it is.
12595                  */
12596                 ep.data.u64 = tswap64(target_ep->data.u64);
12597                 unlock_user_struct(target_ep, arg4, 0);
12598             }
12599             /*
12600              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12601              * non-null pointer, even though this argument is ignored.
12602              *
12603              */
12604             epp = &ep;
12605         }
12606         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12607     }
12608 #endif
12609 
12610 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12611 #if defined(TARGET_NR_epoll_wait)
12612     case TARGET_NR_epoll_wait:
12613 #endif
12614 #if defined(TARGET_NR_epoll_pwait)
12615     case TARGET_NR_epoll_pwait:
12616 #endif
12617     {
12618         struct target_epoll_event *target_ep;
12619         struct epoll_event *ep;
12620         int epfd = arg1;
12621         int maxevents = arg3;
12622         int timeout = arg4;
12623 
12624         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12625             return -TARGET_EINVAL;
12626         }
12627 
12628         target_ep = lock_user(VERIFY_WRITE, arg2,
12629                               maxevents * sizeof(struct target_epoll_event), 1);
12630         if (!target_ep) {
12631             return -TARGET_EFAULT;
12632         }
12633 
12634         ep = g_try_new(struct epoll_event, maxevents);
12635         if (!ep) {
12636             unlock_user(target_ep, arg2, 0);
12637             return -TARGET_ENOMEM;
12638         }
12639 
12640         switch (num) {
12641 #if defined(TARGET_NR_epoll_pwait)
12642         case TARGET_NR_epoll_pwait:
12643         {
12644             target_sigset_t *target_set;
12645             sigset_t _set, *set = &_set;
12646 
12647             if (arg5) {
12648                 if (arg6 != sizeof(target_sigset_t)) {
12649                     ret = -TARGET_EINVAL;
12650                     break;
12651                 }
12652 
12653                 target_set = lock_user(VERIFY_READ, arg5,
12654                                        sizeof(target_sigset_t), 1);
12655                 if (!target_set) {
12656                     ret = -TARGET_EFAULT;
12657                     break;
12658                 }
12659                 target_to_host_sigset(set, target_set);
12660                 unlock_user(target_set, arg5, 0);
12661             } else {
12662                 set = NULL;
12663             }
12664 
12665             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12666                                              set, SIGSET_T_SIZE));
12667             break;
12668         }
12669 #endif
12670 #if defined(TARGET_NR_epoll_wait)
12671         case TARGET_NR_epoll_wait:
12672             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12673                                              NULL, 0));
12674             break;
12675 #endif
12676         default:
12677             ret = -TARGET_ENOSYS;
12678         }
12679         if (!is_error(ret)) {
12680             int i;
12681             for (i = 0; i < ret; i++) {
12682                 target_ep[i].events = tswap32(ep[i].events);
12683                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12684             }
12685             unlock_user(target_ep, arg2,
12686                         ret * sizeof(struct target_epoll_event));
12687         } else {
12688             unlock_user(target_ep, arg2, 0);
12689         }
12690         g_free(ep);
12691         return ret;
12692     }
12693 #endif
12694 #endif
12695 #ifdef TARGET_NR_prlimit64
12696     case TARGET_NR_prlimit64:
12697     {
12698         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12699         struct target_rlimit64 *target_rnew, *target_rold;
12700         struct host_rlimit64 rnew, rold, *rnewp = 0;
12701         int resource = target_to_host_resource(arg2);
12702 
12703         if (arg3 && (resource != RLIMIT_AS &&
12704                      resource != RLIMIT_DATA &&
12705                      resource != RLIMIT_STACK)) {
12706             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12707                 return -TARGET_EFAULT;
12708             }
12709             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12710             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12711             unlock_user_struct(target_rnew, arg3, 0);
12712             rnewp = &rnew;
12713         }
12714 
12715         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12716         if (!is_error(ret) && arg4) {
12717             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12718                 return -TARGET_EFAULT;
12719             }
12720             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12721             target_rold->rlim_max = tswap64(rold.rlim_max);
12722             unlock_user_struct(target_rold, arg4, 1);
12723         }
12724         return ret;
12725     }
12726 #endif
12727 #ifdef TARGET_NR_gethostname
12728     case TARGET_NR_gethostname:
12729     {
12730         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12731         if (name) {
12732             ret = get_errno(gethostname(name, arg2));
12733             unlock_user(name, arg1, arg2);
12734         } else {
12735             ret = -TARGET_EFAULT;
12736         }
12737         return ret;
12738     }
12739 #endif
12740 #ifdef TARGET_NR_atomic_cmpxchg_32
12741     case TARGET_NR_atomic_cmpxchg_32:
12742     {
12743         /* should use start_exclusive from main.c */
12744         abi_ulong mem_value;
12745         if (get_user_u32(mem_value, arg6)) {
12746             target_siginfo_t info;
12747             info.si_signo = SIGSEGV;
12748             info.si_errno = 0;
12749             info.si_code = TARGET_SEGV_MAPERR;
12750             info._sifields._sigfault._addr = arg6;
12751             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12752                          QEMU_SI_FAULT, &info);
12753             ret = 0xdeadbeef;
12754 
12755         }
12756         if (mem_value == arg2)
12757             put_user_u32(arg1, arg6);
12758         return mem_value;
12759     }
12760 #endif
12761 #ifdef TARGET_NR_atomic_barrier
12762     case TARGET_NR_atomic_barrier:
12763         /* Like the kernel implementation and the
12764            qemu arm barrier, no-op this? */
12765         return 0;
12766 #endif
12767 
12768 #ifdef TARGET_NR_timer_create
12769     case TARGET_NR_timer_create:
12770     {
12771         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12772 
12773         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12774 
12775         int clkid = arg1;
12776         int timer_index = next_free_host_timer();
12777 
12778         if (timer_index < 0) {
12779             ret = -TARGET_EAGAIN;
12780         } else {
12781             timer_t *phtimer = g_posix_timers  + timer_index;
12782 
12783             if (arg2) {
12784                 phost_sevp = &host_sevp;
12785                 ret = target_to_host_sigevent(phost_sevp, arg2);
12786                 if (ret != 0) {
12787                     return ret;
12788                 }
12789             }
12790 
12791             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12792             if (ret) {
12793                 phtimer = NULL;
12794             } else {
12795                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12796                     return -TARGET_EFAULT;
12797                 }
12798             }
12799         }
12800         return ret;
12801     }
12802 #endif
12803 
12804 #ifdef TARGET_NR_timer_settime
12805     case TARGET_NR_timer_settime:
12806     {
12807         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12808          * struct itimerspec * old_value */
12809         target_timer_t timerid = get_timer_id(arg1);
12810 
12811         if (timerid < 0) {
12812             ret = timerid;
12813         } else if (arg3 == 0) {
12814             ret = -TARGET_EINVAL;
12815         } else {
12816             timer_t htimer = g_posix_timers[timerid];
12817             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12818 
12819             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12820                 return -TARGET_EFAULT;
12821             }
12822             ret = get_errno(
12823                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12824             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12825                 return -TARGET_EFAULT;
12826             }
12827         }
12828         return ret;
12829     }
12830 #endif
12831 
12832 #ifdef TARGET_NR_timer_settime64
12833     case TARGET_NR_timer_settime64:
12834     {
12835         target_timer_t timerid = get_timer_id(arg1);
12836 
12837         if (timerid < 0) {
12838             ret = timerid;
12839         } else if (arg3 == 0) {
12840             ret = -TARGET_EINVAL;
12841         } else {
12842             timer_t htimer = g_posix_timers[timerid];
12843             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12844 
12845             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12846                 return -TARGET_EFAULT;
12847             }
12848             ret = get_errno(
12849                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12850             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12851                 return -TARGET_EFAULT;
12852             }
12853         }
12854         return ret;
12855     }
12856 #endif
12857 
12858 #ifdef TARGET_NR_timer_gettime
12859     case TARGET_NR_timer_gettime:
12860     {
12861         /* args: timer_t timerid, struct itimerspec *curr_value */
12862         target_timer_t timerid = get_timer_id(arg1);
12863 
12864         if (timerid < 0) {
12865             ret = timerid;
12866         } else if (!arg2) {
12867             ret = -TARGET_EFAULT;
12868         } else {
12869             timer_t htimer = g_posix_timers[timerid];
12870             struct itimerspec hspec;
12871             ret = get_errno(timer_gettime(htimer, &hspec));
12872 
12873             if (host_to_target_itimerspec(arg2, &hspec)) {
12874                 ret = -TARGET_EFAULT;
12875             }
12876         }
12877         return ret;
12878     }
12879 #endif
12880 
12881 #ifdef TARGET_NR_timer_gettime64
12882     case TARGET_NR_timer_gettime64:
12883     {
12884         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12885         target_timer_t timerid = get_timer_id(arg1);
12886 
12887         if (timerid < 0) {
12888             ret = timerid;
12889         } else if (!arg2) {
12890             ret = -TARGET_EFAULT;
12891         } else {
12892             timer_t htimer = g_posix_timers[timerid];
12893             struct itimerspec hspec;
12894             ret = get_errno(timer_gettime(htimer, &hspec));
12895 
12896             if (host_to_target_itimerspec64(arg2, &hspec)) {
12897                 ret = -TARGET_EFAULT;
12898             }
12899         }
12900         return ret;
12901     }
12902 #endif
12903 
12904 #ifdef TARGET_NR_timer_getoverrun
12905     case TARGET_NR_timer_getoverrun:
12906     {
12907         /* args: timer_t timerid */
12908         target_timer_t timerid = get_timer_id(arg1);
12909 
12910         if (timerid < 0) {
12911             ret = timerid;
12912         } else {
12913             timer_t htimer = g_posix_timers[timerid];
12914             ret = get_errno(timer_getoverrun(htimer));
12915         }
12916         return ret;
12917     }
12918 #endif
12919 
12920 #ifdef TARGET_NR_timer_delete
12921     case TARGET_NR_timer_delete:
12922     {
12923         /* args: timer_t timerid */
12924         target_timer_t timerid = get_timer_id(arg1);
12925 
12926         if (timerid < 0) {
12927             ret = timerid;
12928         } else {
12929             timer_t htimer = g_posix_timers[timerid];
12930             ret = get_errno(timer_delete(htimer));
12931             g_posix_timers[timerid] = 0;
12932         }
12933         return ret;
12934     }
12935 #endif
12936 
12937 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12938     case TARGET_NR_timerfd_create:
12939         return get_errno(timerfd_create(arg1,
12940                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12941 #endif
12942 
12943 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12944     case TARGET_NR_timerfd_gettime:
12945         {
12946             struct itimerspec its_curr;
12947 
12948             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12949 
12950             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12951                 return -TARGET_EFAULT;
12952             }
12953         }
12954         return ret;
12955 #endif
12956 
12957 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12958     case TARGET_NR_timerfd_gettime64:
12959         {
12960             struct itimerspec its_curr;
12961 
12962             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12963 
12964             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12965                 return -TARGET_EFAULT;
12966             }
12967         }
12968         return ret;
12969 #endif
12970 
12971 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12972     case TARGET_NR_timerfd_settime:
12973         {
12974             struct itimerspec its_new, its_old, *p_new;
12975 
12976             if (arg3) {
12977                 if (target_to_host_itimerspec(&its_new, arg3)) {
12978                     return -TARGET_EFAULT;
12979                 }
12980                 p_new = &its_new;
12981             } else {
12982                 p_new = NULL;
12983             }
12984 
12985             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12986 
12987             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12988                 return -TARGET_EFAULT;
12989             }
12990         }
12991         return ret;
12992 #endif
12993 
12994 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12995     case TARGET_NR_timerfd_settime64:
12996         {
12997             struct itimerspec its_new, its_old, *p_new;
12998 
12999             if (arg3) {
13000                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13001                     return -TARGET_EFAULT;
13002                 }
13003                 p_new = &its_new;
13004             } else {
13005                 p_new = NULL;
13006             }
13007 
13008             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13009 
13010             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13011                 return -TARGET_EFAULT;
13012             }
13013         }
13014         return ret;
13015 #endif
13016 
13017 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13018     case TARGET_NR_ioprio_get:
13019         return get_errno(ioprio_get(arg1, arg2));
13020 #endif
13021 
13022 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13023     case TARGET_NR_ioprio_set:
13024         return get_errno(ioprio_set(arg1, arg2, arg3));
13025 #endif
13026 
13027 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13028     case TARGET_NR_setns:
13029         return get_errno(setns(arg1, arg2));
13030 #endif
13031 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13032     case TARGET_NR_unshare:
13033         return get_errno(unshare(arg1));
13034 #endif
13035 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13036     case TARGET_NR_kcmp:
13037         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13038 #endif
13039 #ifdef TARGET_NR_swapcontext
13040     case TARGET_NR_swapcontext:
13041         /* PowerPC specific.  */
13042         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13043 #endif
13044 #ifdef TARGET_NR_memfd_create
13045     case TARGET_NR_memfd_create:
13046         p = lock_user_string(arg1);
13047         if (!p) {
13048             return -TARGET_EFAULT;
13049         }
13050         ret = get_errno(memfd_create(p, arg2));
13051         fd_trans_unregister(ret);
13052         unlock_user(p, arg1, 0);
13053         return ret;
13054 #endif
13055 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13056     case TARGET_NR_membarrier:
13057         return get_errno(membarrier(arg1, arg2));
13058 #endif
13059 
13060 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13061     case TARGET_NR_copy_file_range:
13062         {
13063             loff_t inoff, outoff;
13064             loff_t *pinoff = NULL, *poutoff = NULL;
13065 
13066             if (arg2) {
13067                 if (get_user_u64(inoff, arg2)) {
13068                     return -TARGET_EFAULT;
13069                 }
13070                 pinoff = &inoff;
13071             }
13072             if (arg4) {
13073                 if (get_user_u64(outoff, arg4)) {
13074                     return -TARGET_EFAULT;
13075                 }
13076                 poutoff = &outoff;
13077             }
13078             /* Do not sign-extend the count parameter. */
13079             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13080                                                  (abi_ulong)arg5, arg6));
13081             if (!is_error(ret) && ret > 0) {
13082                 if (arg2) {
13083                     if (put_user_u64(inoff, arg2)) {
13084                         return -TARGET_EFAULT;
13085                     }
13086                 }
13087                 if (arg4) {
13088                     if (put_user_u64(outoff, arg4)) {
13089                         return -TARGET_EFAULT;
13090                     }
13091                 }
13092             }
13093         }
13094         return ret;
13095 #endif
13096 
13097 #if defined(TARGET_NR_pivot_root)
13098     case TARGET_NR_pivot_root:
13099         {
13100             void *p2;
13101             p = lock_user_string(arg1); /* new_root */
13102             p2 = lock_user_string(arg2); /* put_old */
13103             if (!p || !p2) {
13104                 ret = -TARGET_EFAULT;
13105             } else {
13106                 ret = get_errno(pivot_root(p, p2));
13107             }
13108             unlock_user(p2, arg2, 0);
13109             unlock_user(p, arg1, 0);
13110         }
13111         return ret;
13112 #endif
13113 
13114     default:
13115         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13116         return -TARGET_ENOSYS;
13117     }
13118     return ret;
13119 }
13120 
13121 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13122                     abi_long arg2, abi_long arg3, abi_long arg4,
13123                     abi_long arg5, abi_long arg6, abi_long arg7,
13124                     abi_long arg8)
13125 {
13126     CPUState *cpu = env_cpu(cpu_env);
13127     abi_long ret;
13128 
13129 #ifdef DEBUG_ERESTARTSYS
13130     /* Debug-only code for exercising the syscall-restart code paths
13131      * in the per-architecture cpu main loops: restart every syscall
13132      * the guest makes once before letting it through.
13133      */
13134     {
13135         static bool flag;
13136         flag = !flag;
13137         if (flag) {
13138             return -TARGET_ERESTARTSYS;
13139         }
13140     }
13141 #endif
13142 
13143     record_syscall_start(cpu, num, arg1,
13144                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13145 
13146     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13147         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13148     }
13149 
13150     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13151                       arg5, arg6, arg7, arg8);
13152 
13153     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13154         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13155                           arg3, arg4, arg5, arg6);
13156     }
13157 
13158     record_syscall_return(cpu, num, ret);
13159     return ret;
13160 }
13161