xref: /openbmc/qemu/linux-user/syscall.c (revision 3ffe3268)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283 #endif
284 
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid)
287 
288 /* For the 64-bit guest on 32-bit host case we must emulate
289  * getdents using getdents64, because otherwise the host
290  * might hand us back more dirent records than we can fit
291  * into the guest buffer after structure format conversion.
292  * Otherwise we emulate getdents with getdents if the host has it.
293  */
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
296 #endif
297 
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300 #endif
301 #if (defined(TARGET_NR_getdents) && \
302       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305 #endif
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308           loff_t *, res, uint, wh);
309 #endif
310 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312           siginfo_t *, uinfo)
313 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group,int,error_code)
316 #endif
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address,int *,tidptr)
319 #endif
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322           const struct timespec *,timeout,int *,uaddr2,int,val3)
323 #endif
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326           const struct timespec *,timeout,int *,uaddr2,int,val3)
327 #endif
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330           unsigned long *, user_mask_ptr);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337           void *, arg);
338 _syscall2(int, capget, struct __user_cap_header_struct *, header,
339           struct __user_cap_data_struct *, data);
340 _syscall2(int, capset, struct __user_cap_header_struct *, header,
341           struct __user_cap_data_struct *, data);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get, int, which, int, who)
344 #endif
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347 #endif
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350 #endif
351 
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354           unsigned long, idx1, unsigned long, idx2)
355 #endif
356 
357 /*
358  * It is assumed that struct statx is architecture independent.
359  */
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362           unsigned int, mask, struct target_statx *, statxbuf)
363 #endif
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier, int, cmd, int, flags)
366 #endif
367 
368 static const bitmask_transtbl fcntl_flags_tbl[] = {
369   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382 #if defined(O_DIRECT)
383   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384 #endif
385 #if defined(O_NOATIME)
386   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387 #endif
388 #if defined(O_CLOEXEC)
389   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390 #endif
391 #if defined(O_PATH)
392   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393 #endif
394 #if defined(O_TMPFILE)
395   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396 #endif
397   /* Don't terminate the list prematurely on 64-bit host+guest.  */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400 #endif
401   { 0, 0, 0, 0 }
402 };
403 
404 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405 
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410           const struct timespec *,tsp,int,flags)
411 #else
412 static int sys_utimensat(int dirfd, const char *pathname,
413                          const struct timespec times[2], int flags)
414 {
415     errno = ENOSYS;
416     return -1;
417 }
418 #endif
419 #endif /* TARGET_NR_utimensat */
420 
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425           const char *, new, unsigned int, flags)
426 #else
427 static int sys_renameat2(int oldfd, const char *old,
428                          int newfd, const char *new, int flags)
429 {
430     if (flags == 0) {
431         return renameat(oldfd, old, newfd, new);
432     }
433     errno = ENOSYS;
434     return -1;
435 }
436 #endif
437 #endif /* TARGET_NR_renameat2 */
438 
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
441 
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
444 {
445   return (inotify_init());
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450 {
451   return (inotify_add_watch(fd, pathname, mask));
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd, int32_t wd)
456 {
457   return (inotify_rm_watch(fd, wd));
458 }
459 #endif
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags)
463 {
464   return (inotify_init1(flags));
465 }
466 #endif
467 #endif
468 #else
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY  */
475 
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
479 #endif
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64 {
483     uint64_t rlim_cur;
484     uint64_t rlim_max;
485 };
486 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487           const struct host_rlimit64 *, new_limit,
488           struct host_rlimit64 *, old_limit)
489 #endif
490 
491 
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers[32] = { 0, } ;
495 
496 static inline int next_free_host_timer(void)
497 {
498     int k ;
499     /* FIXME: Does finding the next free slot require a lock? */
500     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501         if (g_posix_timers[k] == 0) {
502             g_posix_timers[k] = (timer_t) 1;
503             return k;
504         }
505     }
506     return -1;
507 }
508 #endif
509 
510 #define ERRNO_TABLE_SIZE 1200
511 
512 static inline int host_to_target_errno(int host_errno)
513 {
514     switch (host_errno) {
515 #define E(X)  case X: return TARGET_##X;
516 #include "errnos.c.inc"
517 #undef E
518     default:
519         return host_errno;
520     }
521 }
522 
523 static inline int target_to_host_errno(int target_errno)
524 {
525     switch (target_errno) {
526 #define E(X)  case TARGET_##X: return X;
527 #include "errnos.c.inc"
528 #undef E
529     default:
530         return target_errno;
531     }
532 }
533 
534 static inline abi_long get_errno(abi_long ret)
535 {
536     if (ret == -1)
537         return -host_to_target_errno(errno);
538     else
539         return ret;
540 }
541 
542 const char *target_strerror(int err)
543 {
544     if (err == TARGET_ERESTARTSYS) {
545         return "To be restarted";
546     }
547     if (err == TARGET_QEMU_ESIGRETURN) {
548         return "Successful exit from sigreturn";
549     }
550 
551     if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
552         return NULL;
553     }
554     return strerror(target_to_host_errno(err));
555 }
556 
557 #define safe_syscall0(type, name) \
558 static type safe_##name(void) \
559 { \
560     return safe_syscall(__NR_##name); \
561 }
562 
563 #define safe_syscall1(type, name, type1, arg1) \
564 static type safe_##name(type1 arg1) \
565 { \
566     return safe_syscall(__NR_##name, arg1); \
567 }
568 
569 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
570 static type safe_##name(type1 arg1, type2 arg2) \
571 { \
572     return safe_syscall(__NR_##name, arg1, arg2); \
573 }
574 
575 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
576 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
577 { \
578     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
579 }
580 
581 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
582     type4, arg4) \
583 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
584 { \
585     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
586 }
587 
588 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
589     type4, arg4, type5, arg5) \
590 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
591     type5 arg5) \
592 { \
593     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
594 }
595 
596 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
597     type4, arg4, type5, arg5, type6, arg6) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
599     type5 arg5, type6 arg6) \
600 { \
601     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
602 }
603 
604 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
605 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
606 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
607               int, flags, mode_t, mode)
608 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
609 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
610               struct rusage *, rusage)
611 #endif
612 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
613               int, options, struct rusage *, rusage)
614 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
615 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
616     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
617 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
618               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
619 #endif
620 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
621 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
622               struct timespec *, tsp, const sigset_t *, sigmask,
623               size_t, sigsetsize)
624 #endif
625 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
626               int, maxevents, int, timeout, const sigset_t *, sigmask,
627               size_t, sigsetsize)
628 #if defined(__NR_futex)
629 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
630               const struct timespec *,timeout,int *,uaddr2,int,val3)
631 #endif
632 #if defined(__NR_futex_time64)
633 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
634               const struct timespec *,timeout,int *,uaddr2,int,val3)
635 #endif
636 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
637 safe_syscall2(int, kill, pid_t, pid, int, sig)
638 safe_syscall2(int, tkill, int, tid, int, sig)
639 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
640 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
641 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
642 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
643               unsigned long, pos_l, unsigned long, pos_h)
644 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
645               unsigned long, pos_l, unsigned long, pos_h)
646 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
647               socklen_t, addrlen)
648 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
649               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
650 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
651               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
652 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
653 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
654 safe_syscall2(int, flock, int, fd, int, operation)
655 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
656 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
657               const struct timespec *, uts, size_t, sigsetsize)
658 #endif
659 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
660               int, flags)
661 #if defined(TARGET_NR_nanosleep)
662 safe_syscall2(int, nanosleep, const struct timespec *, req,
663               struct timespec *, rem)
664 #endif
665 #if defined(TARGET_NR_clock_nanosleep) || \
666     defined(TARGET_NR_clock_nanosleep_time64)
667 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
668               const struct timespec *, req, struct timespec *, rem)
669 #endif
670 #ifdef __NR_ipc
671 #ifdef __s390x__
672 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
673               void *, ptr)
674 #else
675 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
676               void *, ptr, long, fifth)
677 #endif
678 #endif
679 #ifdef __NR_msgsnd
680 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
681               int, flags)
682 #endif
683 #ifdef __NR_msgrcv
684 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
685               long, msgtype, int, flags)
686 #endif
687 #ifdef __NR_semtimedop
688 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
689               unsigned, nsops, const struct timespec *, timeout)
690 #endif
691 #if defined(TARGET_NR_mq_timedsend) || \
692     defined(TARGET_NR_mq_timedsend_time64)
693 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
694               size_t, len, unsigned, prio, const struct timespec *, timeout)
695 #endif
696 #if defined(TARGET_NR_mq_timedreceive) || \
697     defined(TARGET_NR_mq_timedreceive_time64)
698 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
699               size_t, len, unsigned *, prio, const struct timespec *, timeout)
700 #endif
701 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
702 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
703               int, outfd, loff_t *, poutoff, size_t, length,
704               unsigned int, flags)
705 #endif
706 
707 /* We do ioctl like this rather than via safe_syscall3 to preserve the
708  * "third argument might be integer or pointer or not present" behaviour of
709  * the libc function.
710  */
711 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
712 /* Similarly for fcntl. Note that callers must always:
713  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
714  *  use the flock64 struct rather than unsuffixed flock
715  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
716  */
717 #ifdef __NR_fcntl64
718 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
719 #else
720 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
721 #endif
722 
723 static inline int host_to_target_sock_type(int host_type)
724 {
725     int target_type;
726 
727     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
728     case SOCK_DGRAM:
729         target_type = TARGET_SOCK_DGRAM;
730         break;
731     case SOCK_STREAM:
732         target_type = TARGET_SOCK_STREAM;
733         break;
734     default:
735         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
736         break;
737     }
738 
739 #if defined(SOCK_CLOEXEC)
740     if (host_type & SOCK_CLOEXEC) {
741         target_type |= TARGET_SOCK_CLOEXEC;
742     }
743 #endif
744 
745 #if defined(SOCK_NONBLOCK)
746     if (host_type & SOCK_NONBLOCK) {
747         target_type |= TARGET_SOCK_NONBLOCK;
748     }
749 #endif
750 
751     return target_type;
752 }
753 
754 static abi_ulong target_brk;
755 static abi_ulong target_original_brk;
756 static abi_ulong brk_page;
757 
758 void target_set_brk(abi_ulong new_brk)
759 {
760     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
761     brk_page = HOST_PAGE_ALIGN(target_brk);
762 }
763 
764 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
765 #define DEBUGF_BRK(message, args...)
766 
767 /* do_brk() must return target values and target errnos. */
768 abi_long do_brk(abi_ulong new_brk)
769 {
770     abi_long mapped_addr;
771     abi_ulong new_alloc_size;
772 
773     /* brk pointers are always untagged */
774 
775     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
776 
777     if (!new_brk) {
778         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
779         return target_brk;
780     }
781     if (new_brk < target_original_brk) {
782         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
783                    target_brk);
784         return target_brk;
785     }
786 
787     /* If the new brk is less than the highest page reserved to the
788      * target heap allocation, set it and we're almost done...  */
789     if (new_brk <= brk_page) {
790         /* Heap contents are initialized to zero, as for anonymous
791          * mapped pages.  */
792         if (new_brk > target_brk) {
793             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
794         }
795 	target_brk = new_brk;
796         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
797 	return target_brk;
798     }
799 
800     /* We need to allocate more memory after the brk... Note that
801      * we don't use MAP_FIXED because that will map over the top of
802      * any existing mapping (like the one with the host libc or qemu
803      * itself); instead we treat "mapped but at wrong address" as
804      * a failure and unmap again.
805      */
806     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
807     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
808                                         PROT_READ|PROT_WRITE,
809                                         MAP_ANON|MAP_PRIVATE, 0, 0));
810 
811     if (mapped_addr == brk_page) {
812         /* Heap contents are initialized to zero, as for anonymous
813          * mapped pages.  Technically the new pages are already
814          * initialized to zero since they *are* anonymous mapped
815          * pages, however we have to take care with the contents that
816          * come from the remaining part of the previous page: it may
817          * contains garbage data due to a previous heap usage (grown
818          * then shrunken).  */
819         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
820 
821         target_brk = new_brk;
822         brk_page = HOST_PAGE_ALIGN(target_brk);
823         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
824             target_brk);
825         return target_brk;
826     } else if (mapped_addr != -1) {
827         /* Mapped but at wrong address, meaning there wasn't actually
828          * enough space for this brk.
829          */
830         target_munmap(mapped_addr, new_alloc_size);
831         mapped_addr = -1;
832         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
833     }
834     else {
835         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
836     }
837 
838 #if defined(TARGET_ALPHA)
839     /* We (partially) emulate OSF/1 on Alpha, which requires we
840        return a proper errno, not an unchanged brk value.  */
841     return -TARGET_ENOMEM;
842 #endif
843     /* For everything else, return the previous break. */
844     return target_brk;
845 }
846 
847 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
848     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
849 static inline abi_long copy_from_user_fdset(fd_set *fds,
850                                             abi_ulong target_fds_addr,
851                                             int n)
852 {
853     int i, nw, j, k;
854     abi_ulong b, *target_fds;
855 
856     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
857     if (!(target_fds = lock_user(VERIFY_READ,
858                                  target_fds_addr,
859                                  sizeof(abi_ulong) * nw,
860                                  1)))
861         return -TARGET_EFAULT;
862 
863     FD_ZERO(fds);
864     k = 0;
865     for (i = 0; i < nw; i++) {
866         /* grab the abi_ulong */
867         __get_user(b, &target_fds[i]);
868         for (j = 0; j < TARGET_ABI_BITS; j++) {
869             /* check the bit inside the abi_ulong */
870             if ((b >> j) & 1)
871                 FD_SET(k, fds);
872             k++;
873         }
874     }
875 
876     unlock_user(target_fds, target_fds_addr, 0);
877 
878     return 0;
879 }
880 
881 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
882                                                  abi_ulong target_fds_addr,
883                                                  int n)
884 {
885     if (target_fds_addr) {
886         if (copy_from_user_fdset(fds, target_fds_addr, n))
887             return -TARGET_EFAULT;
888         *fds_ptr = fds;
889     } else {
890         *fds_ptr = NULL;
891     }
892     return 0;
893 }
894 
895 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
896                                           const fd_set *fds,
897                                           int n)
898 {
899     int i, nw, j, k;
900     abi_long v;
901     abi_ulong *target_fds;
902 
903     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
904     if (!(target_fds = lock_user(VERIFY_WRITE,
905                                  target_fds_addr,
906                                  sizeof(abi_ulong) * nw,
907                                  0)))
908         return -TARGET_EFAULT;
909 
910     k = 0;
911     for (i = 0; i < nw; i++) {
912         v = 0;
913         for (j = 0; j < TARGET_ABI_BITS; j++) {
914             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
915             k++;
916         }
917         __put_user(v, &target_fds[i]);
918     }
919 
920     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
921 
922     return 0;
923 }
924 #endif
925 
926 #if defined(__alpha__)
927 #define HOST_HZ 1024
928 #else
929 #define HOST_HZ 100
930 #endif
931 
932 static inline abi_long host_to_target_clock_t(long ticks)
933 {
934 #if HOST_HZ == TARGET_HZ
935     return ticks;
936 #else
937     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
938 #endif
939 }
940 
941 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
942                                              const struct rusage *rusage)
943 {
944     struct target_rusage *target_rusage;
945 
946     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
947         return -TARGET_EFAULT;
948     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
949     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
950     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
951     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
952     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
953     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
954     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
955     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
956     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
957     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
958     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
959     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
960     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
961     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
962     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
963     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
964     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
965     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
966     unlock_user_struct(target_rusage, target_addr, 1);
967 
968     return 0;
969 }
970 
971 #ifdef TARGET_NR_setrlimit
972 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
973 {
974     abi_ulong target_rlim_swap;
975     rlim_t result;
976 
977     target_rlim_swap = tswapal(target_rlim);
978     if (target_rlim_swap == TARGET_RLIM_INFINITY)
979         return RLIM_INFINITY;
980 
981     result = target_rlim_swap;
982     if (target_rlim_swap != (rlim_t)result)
983         return RLIM_INFINITY;
984 
985     return result;
986 }
987 #endif
988 
989 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
990 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
991 {
992     abi_ulong target_rlim_swap;
993     abi_ulong result;
994 
995     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
996         target_rlim_swap = TARGET_RLIM_INFINITY;
997     else
998         target_rlim_swap = rlim;
999     result = tswapal(target_rlim_swap);
1000 
1001     return result;
1002 }
1003 #endif
1004 
1005 static inline int target_to_host_resource(int code)
1006 {
1007     switch (code) {
1008     case TARGET_RLIMIT_AS:
1009         return RLIMIT_AS;
1010     case TARGET_RLIMIT_CORE:
1011         return RLIMIT_CORE;
1012     case TARGET_RLIMIT_CPU:
1013         return RLIMIT_CPU;
1014     case TARGET_RLIMIT_DATA:
1015         return RLIMIT_DATA;
1016     case TARGET_RLIMIT_FSIZE:
1017         return RLIMIT_FSIZE;
1018     case TARGET_RLIMIT_LOCKS:
1019         return RLIMIT_LOCKS;
1020     case TARGET_RLIMIT_MEMLOCK:
1021         return RLIMIT_MEMLOCK;
1022     case TARGET_RLIMIT_MSGQUEUE:
1023         return RLIMIT_MSGQUEUE;
1024     case TARGET_RLIMIT_NICE:
1025         return RLIMIT_NICE;
1026     case TARGET_RLIMIT_NOFILE:
1027         return RLIMIT_NOFILE;
1028     case TARGET_RLIMIT_NPROC:
1029         return RLIMIT_NPROC;
1030     case TARGET_RLIMIT_RSS:
1031         return RLIMIT_RSS;
1032     case TARGET_RLIMIT_RTPRIO:
1033         return RLIMIT_RTPRIO;
1034     case TARGET_RLIMIT_SIGPENDING:
1035         return RLIMIT_SIGPENDING;
1036     case TARGET_RLIMIT_STACK:
1037         return RLIMIT_STACK;
1038     default:
1039         return code;
1040     }
1041 }
1042 
1043 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1044                                               abi_ulong target_tv_addr)
1045 {
1046     struct target_timeval *target_tv;
1047 
1048     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1049         return -TARGET_EFAULT;
1050     }
1051 
1052     __get_user(tv->tv_sec, &target_tv->tv_sec);
1053     __get_user(tv->tv_usec, &target_tv->tv_usec);
1054 
1055     unlock_user_struct(target_tv, target_tv_addr, 0);
1056 
1057     return 0;
1058 }
1059 
1060 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1061                                             const struct timeval *tv)
1062 {
1063     struct target_timeval *target_tv;
1064 
1065     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1066         return -TARGET_EFAULT;
1067     }
1068 
1069     __put_user(tv->tv_sec, &target_tv->tv_sec);
1070     __put_user(tv->tv_usec, &target_tv->tv_usec);
1071 
1072     unlock_user_struct(target_tv, target_tv_addr, 1);
1073 
1074     return 0;
1075 }
1076 
1077 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1078 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1079                                                 abi_ulong target_tv_addr)
1080 {
1081     struct target__kernel_sock_timeval *target_tv;
1082 
1083     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1084         return -TARGET_EFAULT;
1085     }
1086 
1087     __get_user(tv->tv_sec, &target_tv->tv_sec);
1088     __get_user(tv->tv_usec, &target_tv->tv_usec);
1089 
1090     unlock_user_struct(target_tv, target_tv_addr, 0);
1091 
1092     return 0;
1093 }
1094 #endif
1095 
1096 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1097                                               const struct timeval *tv)
1098 {
1099     struct target__kernel_sock_timeval *target_tv;
1100 
1101     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1102         return -TARGET_EFAULT;
1103     }
1104 
1105     __put_user(tv->tv_sec, &target_tv->tv_sec);
1106     __put_user(tv->tv_usec, &target_tv->tv_usec);
1107 
1108     unlock_user_struct(target_tv, target_tv_addr, 1);
1109 
1110     return 0;
1111 }
1112 
1113 #if defined(TARGET_NR_futex) || \
1114     defined(TARGET_NR_rt_sigtimedwait) || \
1115     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1116     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1117     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1118     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1119     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1120     defined(TARGET_NR_timer_settime) || \
1121     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1122 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1123                                                abi_ulong target_addr)
1124 {
1125     struct target_timespec *target_ts;
1126 
1127     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1128         return -TARGET_EFAULT;
1129     }
1130     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1131     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1132     unlock_user_struct(target_ts, target_addr, 0);
1133     return 0;
1134 }
1135 #endif
1136 
1137 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1138     defined(TARGET_NR_timer_settime64) || \
1139     defined(TARGET_NR_mq_timedsend_time64) || \
1140     defined(TARGET_NR_mq_timedreceive_time64) || \
1141     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1142     defined(TARGET_NR_clock_nanosleep_time64) || \
1143     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1144     defined(TARGET_NR_utimensat) || \
1145     defined(TARGET_NR_utimensat_time64) || \
1146     defined(TARGET_NR_semtimedop_time64) || \
1147     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1148 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1149                                                  abi_ulong target_addr)
1150 {
1151     struct target__kernel_timespec *target_ts;
1152 
1153     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1154         return -TARGET_EFAULT;
1155     }
1156     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1157     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1158     /* in 32bit mode, this drops the padding */
1159     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1160     unlock_user_struct(target_ts, target_addr, 0);
1161     return 0;
1162 }
1163 #endif
1164 
1165 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1166                                                struct timespec *host_ts)
1167 {
1168     struct target_timespec *target_ts;
1169 
1170     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1171         return -TARGET_EFAULT;
1172     }
1173     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1174     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1175     unlock_user_struct(target_ts, target_addr, 1);
1176     return 0;
1177 }
1178 
1179 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1180                                                  struct timespec *host_ts)
1181 {
1182     struct target__kernel_timespec *target_ts;
1183 
1184     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1185         return -TARGET_EFAULT;
1186     }
1187     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1188     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1189     unlock_user_struct(target_ts, target_addr, 1);
1190     return 0;
1191 }
1192 
1193 #if defined(TARGET_NR_gettimeofday)
1194 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1195                                              struct timezone *tz)
1196 {
1197     struct target_timezone *target_tz;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1200         return -TARGET_EFAULT;
1201     }
1202 
1203     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1204     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1205 
1206     unlock_user_struct(target_tz, target_tz_addr, 1);
1207 
1208     return 0;
1209 }
1210 #endif
1211 
1212 #if defined(TARGET_NR_settimeofday)
1213 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1214                                                abi_ulong target_tz_addr)
1215 {
1216     struct target_timezone *target_tz;
1217 
1218     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1219         return -TARGET_EFAULT;
1220     }
1221 
1222     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1223     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1224 
1225     unlock_user_struct(target_tz, target_tz_addr, 0);
1226 
1227     return 0;
1228 }
1229 #endif
1230 
1231 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1232 #include <mqueue.h>
1233 
1234 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1235                                               abi_ulong target_mq_attr_addr)
1236 {
1237     struct target_mq_attr *target_mq_attr;
1238 
1239     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1240                           target_mq_attr_addr, 1))
1241         return -TARGET_EFAULT;
1242 
1243     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1244     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1245     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1246     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1247 
1248     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1249 
1250     return 0;
1251 }
1252 
1253 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1254                                             const struct mq_attr *attr)
1255 {
1256     struct target_mq_attr *target_mq_attr;
1257 
1258     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1259                           target_mq_attr_addr, 0))
1260         return -TARGET_EFAULT;
1261 
1262     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1263     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1264     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1265     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1266 
1267     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1268 
1269     return 0;
1270 }
1271 #endif
1272 
1273 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1274 /* do_select() must return target values and target errnos. */
1275 static abi_long do_select(int n,
1276                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1277                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1278 {
1279     fd_set rfds, wfds, efds;
1280     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1281     struct timeval tv;
1282     struct timespec ts, *ts_ptr;
1283     abi_long ret;
1284 
1285     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1286     if (ret) {
1287         return ret;
1288     }
1289     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1290     if (ret) {
1291         return ret;
1292     }
1293     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1294     if (ret) {
1295         return ret;
1296     }
1297 
1298     if (target_tv_addr) {
1299         if (copy_from_user_timeval(&tv, target_tv_addr))
1300             return -TARGET_EFAULT;
1301         ts.tv_sec = tv.tv_sec;
1302         ts.tv_nsec = tv.tv_usec * 1000;
1303         ts_ptr = &ts;
1304     } else {
1305         ts_ptr = NULL;
1306     }
1307 
1308     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1309                                   ts_ptr, NULL));
1310 
1311     if (!is_error(ret)) {
1312         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1313             return -TARGET_EFAULT;
1314         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1315             return -TARGET_EFAULT;
1316         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1317             return -TARGET_EFAULT;
1318 
1319         if (target_tv_addr) {
1320             tv.tv_sec = ts.tv_sec;
1321             tv.tv_usec = ts.tv_nsec / 1000;
1322             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1323                 return -TARGET_EFAULT;
1324             }
1325         }
1326     }
1327 
1328     return ret;
1329 }
1330 
1331 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1332 static abi_long do_old_select(abi_ulong arg1)
1333 {
1334     struct target_sel_arg_struct *sel;
1335     abi_ulong inp, outp, exp, tvp;
1336     long nsel;
1337 
1338     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1339         return -TARGET_EFAULT;
1340     }
1341 
1342     nsel = tswapal(sel->n);
1343     inp = tswapal(sel->inp);
1344     outp = tswapal(sel->outp);
1345     exp = tswapal(sel->exp);
1346     tvp = tswapal(sel->tvp);
1347 
1348     unlock_user_struct(sel, arg1, 0);
1349 
1350     return do_select(nsel, inp, outp, exp, tvp);
1351 }
1352 #endif
1353 #endif
1354 
1355 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1356 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1357                             abi_long arg4, abi_long arg5, abi_long arg6,
1358                             bool time64)
1359 {
1360     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1361     fd_set rfds, wfds, efds;
1362     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1363     struct timespec ts, *ts_ptr;
1364     abi_long ret;
1365 
1366     /*
1367      * The 6th arg is actually two args smashed together,
1368      * so we cannot use the C library.
1369      */
1370     sigset_t set;
1371     struct {
1372         sigset_t *set;
1373         size_t size;
1374     } sig, *sig_ptr;
1375 
1376     abi_ulong arg_sigset, arg_sigsize, *arg7;
1377     target_sigset_t *target_sigset;
1378 
1379     n = arg1;
1380     rfd_addr = arg2;
1381     wfd_addr = arg3;
1382     efd_addr = arg4;
1383     ts_addr = arg5;
1384 
1385     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1386     if (ret) {
1387         return ret;
1388     }
1389     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1390     if (ret) {
1391         return ret;
1392     }
1393     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1394     if (ret) {
1395         return ret;
1396     }
1397 
1398     /*
1399      * This takes a timespec, and not a timeval, so we cannot
1400      * use the do_select() helper ...
1401      */
1402     if (ts_addr) {
1403         if (time64) {
1404             if (target_to_host_timespec64(&ts, ts_addr)) {
1405                 return -TARGET_EFAULT;
1406             }
1407         } else {
1408             if (target_to_host_timespec(&ts, ts_addr)) {
1409                 return -TARGET_EFAULT;
1410             }
1411         }
1412             ts_ptr = &ts;
1413     } else {
1414         ts_ptr = NULL;
1415     }
1416 
1417     /* Extract the two packed args for the sigset */
1418     if (arg6) {
1419         sig_ptr = &sig;
1420         sig.size = SIGSET_T_SIZE;
1421 
1422         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1423         if (!arg7) {
1424             return -TARGET_EFAULT;
1425         }
1426         arg_sigset = tswapal(arg7[0]);
1427         arg_sigsize = tswapal(arg7[1]);
1428         unlock_user(arg7, arg6, 0);
1429 
1430         if (arg_sigset) {
1431             sig.set = &set;
1432             if (arg_sigsize != sizeof(*target_sigset)) {
1433                 /* Like the kernel, we enforce correct size sigsets */
1434                 return -TARGET_EINVAL;
1435             }
1436             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1437                                       sizeof(*target_sigset), 1);
1438             if (!target_sigset) {
1439                 return -TARGET_EFAULT;
1440             }
1441             target_to_host_sigset(&set, target_sigset);
1442             unlock_user(target_sigset, arg_sigset, 0);
1443         } else {
1444             sig.set = NULL;
1445         }
1446     } else {
1447         sig_ptr = NULL;
1448     }
1449 
1450     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1451                                   ts_ptr, sig_ptr));
1452 
1453     if (!is_error(ret)) {
1454         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1455             return -TARGET_EFAULT;
1456         }
1457         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1458             return -TARGET_EFAULT;
1459         }
1460         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1461             return -TARGET_EFAULT;
1462         }
1463         if (time64) {
1464             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1465                 return -TARGET_EFAULT;
1466             }
1467         } else {
1468             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1469                 return -TARGET_EFAULT;
1470             }
1471         }
1472     }
1473     return ret;
1474 }
1475 #endif
1476 
1477 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1478     defined(TARGET_NR_ppoll_time64)
1479 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1480                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1481 {
1482     struct target_pollfd *target_pfd;
1483     unsigned int nfds = arg2;
1484     struct pollfd *pfd;
1485     unsigned int i;
1486     abi_long ret;
1487 
1488     pfd = NULL;
1489     target_pfd = NULL;
1490     if (nfds) {
1491         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1492             return -TARGET_EINVAL;
1493         }
1494         target_pfd = lock_user(VERIFY_WRITE, arg1,
1495                                sizeof(struct target_pollfd) * nfds, 1);
1496         if (!target_pfd) {
1497             return -TARGET_EFAULT;
1498         }
1499 
1500         pfd = alloca(sizeof(struct pollfd) * nfds);
1501         for (i = 0; i < nfds; i++) {
1502             pfd[i].fd = tswap32(target_pfd[i].fd);
1503             pfd[i].events = tswap16(target_pfd[i].events);
1504         }
1505     }
1506     if (ppoll) {
1507         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1508         target_sigset_t *target_set;
1509         sigset_t _set, *set = &_set;
1510 
1511         if (arg3) {
1512             if (time64) {
1513                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1514                     unlock_user(target_pfd, arg1, 0);
1515                     return -TARGET_EFAULT;
1516                 }
1517             } else {
1518                 if (target_to_host_timespec(timeout_ts, arg3)) {
1519                     unlock_user(target_pfd, arg1, 0);
1520                     return -TARGET_EFAULT;
1521                 }
1522             }
1523         } else {
1524             timeout_ts = NULL;
1525         }
1526 
1527         if (arg4) {
1528             if (arg5 != sizeof(target_sigset_t)) {
1529                 unlock_user(target_pfd, arg1, 0);
1530                 return -TARGET_EINVAL;
1531             }
1532 
1533             target_set = lock_user(VERIFY_READ, arg4,
1534                                    sizeof(target_sigset_t), 1);
1535             if (!target_set) {
1536                 unlock_user(target_pfd, arg1, 0);
1537                 return -TARGET_EFAULT;
1538             }
1539             target_to_host_sigset(set, target_set);
1540         } else {
1541             set = NULL;
1542         }
1543 
1544         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1545                                    set, SIGSET_T_SIZE));
1546 
1547         if (!is_error(ret) && arg3) {
1548             if (time64) {
1549                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1550                     return -TARGET_EFAULT;
1551                 }
1552             } else {
1553                 if (host_to_target_timespec(arg3, timeout_ts)) {
1554                     return -TARGET_EFAULT;
1555                 }
1556             }
1557         }
1558         if (arg4) {
1559             unlock_user(target_set, arg4, 0);
1560         }
1561     } else {
1562           struct timespec ts, *pts;
1563 
1564           if (arg3 >= 0) {
1565               /* Convert ms to secs, ns */
1566               ts.tv_sec = arg3 / 1000;
1567               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1568               pts = &ts;
1569           } else {
1570               /* -ve poll() timeout means "infinite" */
1571               pts = NULL;
1572           }
1573           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1574     }
1575 
1576     if (!is_error(ret)) {
1577         for (i = 0; i < nfds; i++) {
1578             target_pfd[i].revents = tswap16(pfd[i].revents);
1579         }
1580     }
1581     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1582     return ret;
1583 }
1584 #endif
1585 
1586 static abi_long do_pipe2(int host_pipe[], int flags)
1587 {
1588 #ifdef CONFIG_PIPE2
1589     return pipe2(host_pipe, flags);
1590 #else
1591     return -ENOSYS;
1592 #endif
1593 }
1594 
1595 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1596                         int flags, int is_pipe2)
1597 {
1598     int host_pipe[2];
1599     abi_long ret;
1600     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1601 
1602     if (is_error(ret))
1603         return get_errno(ret);
1604 
1605     /* Several targets have special calling conventions for the original
1606        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1607     if (!is_pipe2) {
1608 #if defined(TARGET_ALPHA)
1609         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1610         return host_pipe[0];
1611 #elif defined(TARGET_MIPS)
1612         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1613         return host_pipe[0];
1614 #elif defined(TARGET_SH4)
1615         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1616         return host_pipe[0];
1617 #elif defined(TARGET_SPARC)
1618         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1619         return host_pipe[0];
1620 #endif
1621     }
1622 
1623     if (put_user_s32(host_pipe[0], pipedes)
1624         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1625         return -TARGET_EFAULT;
1626     return get_errno(ret);
1627 }
1628 
1629 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1630                                               abi_ulong target_addr,
1631                                               socklen_t len)
1632 {
1633     struct target_ip_mreqn *target_smreqn;
1634 
1635     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1636     if (!target_smreqn)
1637         return -TARGET_EFAULT;
1638     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1639     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1640     if (len == sizeof(struct target_ip_mreqn))
1641         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1642     unlock_user(target_smreqn, target_addr, 0);
1643 
1644     return 0;
1645 }
1646 
1647 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1648                                                abi_ulong target_addr,
1649                                                socklen_t len)
1650 {
1651     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1652     sa_family_t sa_family;
1653     struct target_sockaddr *target_saddr;
1654 
1655     if (fd_trans_target_to_host_addr(fd)) {
1656         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1657     }
1658 
1659     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1660     if (!target_saddr)
1661         return -TARGET_EFAULT;
1662 
1663     sa_family = tswap16(target_saddr->sa_family);
1664 
1665     /* Oops. The caller might send a incomplete sun_path; sun_path
1666      * must be terminated by \0 (see the manual page), but
1667      * unfortunately it is quite common to specify sockaddr_un
1668      * length as "strlen(x->sun_path)" while it should be
1669      * "strlen(...) + 1". We'll fix that here if needed.
1670      * Linux kernel has a similar feature.
1671      */
1672 
1673     if (sa_family == AF_UNIX) {
1674         if (len < unix_maxlen && len > 0) {
1675             char *cp = (char*)target_saddr;
1676 
1677             if ( cp[len-1] && !cp[len] )
1678                 len++;
1679         }
1680         if (len > unix_maxlen)
1681             len = unix_maxlen;
1682     }
1683 
1684     memcpy(addr, target_saddr, len);
1685     addr->sa_family = sa_family;
1686     if (sa_family == AF_NETLINK) {
1687         struct sockaddr_nl *nladdr;
1688 
1689         nladdr = (struct sockaddr_nl *)addr;
1690         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1691         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1692     } else if (sa_family == AF_PACKET) {
1693 	struct target_sockaddr_ll *lladdr;
1694 
1695 	lladdr = (struct target_sockaddr_ll *)addr;
1696 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1697 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1698     }
1699     unlock_user(target_saddr, target_addr, 0);
1700 
1701     return 0;
1702 }
1703 
1704 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1705                                                struct sockaddr *addr,
1706                                                socklen_t len)
1707 {
1708     struct target_sockaddr *target_saddr;
1709 
1710     if (len == 0) {
1711         return 0;
1712     }
1713     assert(addr);
1714 
1715     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1716     if (!target_saddr)
1717         return -TARGET_EFAULT;
1718     memcpy(target_saddr, addr, len);
1719     if (len >= offsetof(struct target_sockaddr, sa_family) +
1720         sizeof(target_saddr->sa_family)) {
1721         target_saddr->sa_family = tswap16(addr->sa_family);
1722     }
1723     if (addr->sa_family == AF_NETLINK &&
1724         len >= sizeof(struct target_sockaddr_nl)) {
1725         struct target_sockaddr_nl *target_nl =
1726                (struct target_sockaddr_nl *)target_saddr;
1727         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1728         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1729     } else if (addr->sa_family == AF_PACKET) {
1730         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1731         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1732         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1733     } else if (addr->sa_family == AF_INET6 &&
1734                len >= sizeof(struct target_sockaddr_in6)) {
1735         struct target_sockaddr_in6 *target_in6 =
1736                (struct target_sockaddr_in6 *)target_saddr;
1737         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1738     }
1739     unlock_user(target_saddr, target_addr, len);
1740 
1741     return 0;
1742 }
1743 
1744 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1745                                            struct target_msghdr *target_msgh)
1746 {
1747     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1748     abi_long msg_controllen;
1749     abi_ulong target_cmsg_addr;
1750     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1751     socklen_t space = 0;
1752 
1753     msg_controllen = tswapal(target_msgh->msg_controllen);
1754     if (msg_controllen < sizeof (struct target_cmsghdr))
1755         goto the_end;
1756     target_cmsg_addr = tswapal(target_msgh->msg_control);
1757     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1758     target_cmsg_start = target_cmsg;
1759     if (!target_cmsg)
1760         return -TARGET_EFAULT;
1761 
1762     while (cmsg && target_cmsg) {
1763         void *data = CMSG_DATA(cmsg);
1764         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1765 
1766         int len = tswapal(target_cmsg->cmsg_len)
1767             - sizeof(struct target_cmsghdr);
1768 
1769         space += CMSG_SPACE(len);
1770         if (space > msgh->msg_controllen) {
1771             space -= CMSG_SPACE(len);
1772             /* This is a QEMU bug, since we allocated the payload
1773              * area ourselves (unlike overflow in host-to-target
1774              * conversion, which is just the guest giving us a buffer
1775              * that's too small). It can't happen for the payload types
1776              * we currently support; if it becomes an issue in future
1777              * we would need to improve our allocation strategy to
1778              * something more intelligent than "twice the size of the
1779              * target buffer we're reading from".
1780              */
1781             qemu_log_mask(LOG_UNIMP,
1782                           ("Unsupported ancillary data %d/%d: "
1783                            "unhandled msg size\n"),
1784                           tswap32(target_cmsg->cmsg_level),
1785                           tswap32(target_cmsg->cmsg_type));
1786             break;
1787         }
1788 
1789         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1790             cmsg->cmsg_level = SOL_SOCKET;
1791         } else {
1792             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1793         }
1794         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1795         cmsg->cmsg_len = CMSG_LEN(len);
1796 
1797         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1798             int *fd = (int *)data;
1799             int *target_fd = (int *)target_data;
1800             int i, numfds = len / sizeof(int);
1801 
1802             for (i = 0; i < numfds; i++) {
1803                 __get_user(fd[i], target_fd + i);
1804             }
1805         } else if (cmsg->cmsg_level == SOL_SOCKET
1806                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1807             struct ucred *cred = (struct ucred *)data;
1808             struct target_ucred *target_cred =
1809                 (struct target_ucred *)target_data;
1810 
1811             __get_user(cred->pid, &target_cred->pid);
1812             __get_user(cred->uid, &target_cred->uid);
1813             __get_user(cred->gid, &target_cred->gid);
1814         } else {
1815             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1816                           cmsg->cmsg_level, cmsg->cmsg_type);
1817             memcpy(data, target_data, len);
1818         }
1819 
1820         cmsg = CMSG_NXTHDR(msgh, cmsg);
1821         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1822                                          target_cmsg_start);
1823     }
1824     unlock_user(target_cmsg, target_cmsg_addr, 0);
1825  the_end:
1826     msgh->msg_controllen = space;
1827     return 0;
1828 }
1829 
1830 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1831                                            struct msghdr *msgh)
1832 {
1833     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1834     abi_long msg_controllen;
1835     abi_ulong target_cmsg_addr;
1836     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1837     socklen_t space = 0;
1838 
1839     msg_controllen = tswapal(target_msgh->msg_controllen);
1840     if (msg_controllen < sizeof (struct target_cmsghdr))
1841         goto the_end;
1842     target_cmsg_addr = tswapal(target_msgh->msg_control);
1843     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1844     target_cmsg_start = target_cmsg;
1845     if (!target_cmsg)
1846         return -TARGET_EFAULT;
1847 
1848     while (cmsg && target_cmsg) {
1849         void *data = CMSG_DATA(cmsg);
1850         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1851 
1852         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1853         int tgt_len, tgt_space;
1854 
1855         /* We never copy a half-header but may copy half-data;
1856          * this is Linux's behaviour in put_cmsg(). Note that
1857          * truncation here is a guest problem (which we report
1858          * to the guest via the CTRUNC bit), unlike truncation
1859          * in target_to_host_cmsg, which is a QEMU bug.
1860          */
1861         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1862             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1863             break;
1864         }
1865 
1866         if (cmsg->cmsg_level == SOL_SOCKET) {
1867             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1868         } else {
1869             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1870         }
1871         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1872 
1873         /* Payload types which need a different size of payload on
1874          * the target must adjust tgt_len here.
1875          */
1876         tgt_len = len;
1877         switch (cmsg->cmsg_level) {
1878         case SOL_SOCKET:
1879             switch (cmsg->cmsg_type) {
1880             case SO_TIMESTAMP:
1881                 tgt_len = sizeof(struct target_timeval);
1882                 break;
1883             default:
1884                 break;
1885             }
1886             break;
1887         default:
1888             break;
1889         }
1890 
1891         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1892             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1893             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1894         }
1895 
1896         /* We must now copy-and-convert len bytes of payload
1897          * into tgt_len bytes of destination space. Bear in mind
1898          * that in both source and destination we may be dealing
1899          * with a truncated value!
1900          */
1901         switch (cmsg->cmsg_level) {
1902         case SOL_SOCKET:
1903             switch (cmsg->cmsg_type) {
1904             case SCM_RIGHTS:
1905             {
1906                 int *fd = (int *)data;
1907                 int *target_fd = (int *)target_data;
1908                 int i, numfds = tgt_len / sizeof(int);
1909 
1910                 for (i = 0; i < numfds; i++) {
1911                     __put_user(fd[i], target_fd + i);
1912                 }
1913                 break;
1914             }
1915             case SO_TIMESTAMP:
1916             {
1917                 struct timeval *tv = (struct timeval *)data;
1918                 struct target_timeval *target_tv =
1919                     (struct target_timeval *)target_data;
1920 
1921                 if (len != sizeof(struct timeval) ||
1922                     tgt_len != sizeof(struct target_timeval)) {
1923                     goto unimplemented;
1924                 }
1925 
1926                 /* copy struct timeval to target */
1927                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1928                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1929                 break;
1930             }
1931             case SCM_CREDENTIALS:
1932             {
1933                 struct ucred *cred = (struct ucred *)data;
1934                 struct target_ucred *target_cred =
1935                     (struct target_ucred *)target_data;
1936 
1937                 __put_user(cred->pid, &target_cred->pid);
1938                 __put_user(cred->uid, &target_cred->uid);
1939                 __put_user(cred->gid, &target_cred->gid);
1940                 break;
1941             }
1942             default:
1943                 goto unimplemented;
1944             }
1945             break;
1946 
1947         case SOL_IP:
1948             switch (cmsg->cmsg_type) {
1949             case IP_TTL:
1950             {
1951                 uint32_t *v = (uint32_t *)data;
1952                 uint32_t *t_int = (uint32_t *)target_data;
1953 
1954                 if (len != sizeof(uint32_t) ||
1955                     tgt_len != sizeof(uint32_t)) {
1956                     goto unimplemented;
1957                 }
1958                 __put_user(*v, t_int);
1959                 break;
1960             }
1961             case IP_RECVERR:
1962             {
1963                 struct errhdr_t {
1964                    struct sock_extended_err ee;
1965                    struct sockaddr_in offender;
1966                 };
1967                 struct errhdr_t *errh = (struct errhdr_t *)data;
1968                 struct errhdr_t *target_errh =
1969                     (struct errhdr_t *)target_data;
1970 
1971                 if (len != sizeof(struct errhdr_t) ||
1972                     tgt_len != sizeof(struct errhdr_t)) {
1973                     goto unimplemented;
1974                 }
1975                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1976                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1977                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1978                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1979                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1980                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1981                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1982                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1983                     (void *) &errh->offender, sizeof(errh->offender));
1984                 break;
1985             }
1986             default:
1987                 goto unimplemented;
1988             }
1989             break;
1990 
1991         case SOL_IPV6:
1992             switch (cmsg->cmsg_type) {
1993             case IPV6_HOPLIMIT:
1994             {
1995                 uint32_t *v = (uint32_t *)data;
1996                 uint32_t *t_int = (uint32_t *)target_data;
1997 
1998                 if (len != sizeof(uint32_t) ||
1999                     tgt_len != sizeof(uint32_t)) {
2000                     goto unimplemented;
2001                 }
2002                 __put_user(*v, t_int);
2003                 break;
2004             }
2005             case IPV6_RECVERR:
2006             {
2007                 struct errhdr6_t {
2008                    struct sock_extended_err ee;
2009                    struct sockaddr_in6 offender;
2010                 };
2011                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2012                 struct errhdr6_t *target_errh =
2013                     (struct errhdr6_t *)target_data;
2014 
2015                 if (len != sizeof(struct errhdr6_t) ||
2016                     tgt_len != sizeof(struct errhdr6_t)) {
2017                     goto unimplemented;
2018                 }
2019                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2020                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2021                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2022                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2023                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2024                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2025                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2026                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2027                     (void *) &errh->offender, sizeof(errh->offender));
2028                 break;
2029             }
2030             default:
2031                 goto unimplemented;
2032             }
2033             break;
2034 
2035         default:
2036         unimplemented:
2037             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2038                           cmsg->cmsg_level, cmsg->cmsg_type);
2039             memcpy(target_data, data, MIN(len, tgt_len));
2040             if (tgt_len > len) {
2041                 memset(target_data + len, 0, tgt_len - len);
2042             }
2043         }
2044 
2045         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2046         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2047         if (msg_controllen < tgt_space) {
2048             tgt_space = msg_controllen;
2049         }
2050         msg_controllen -= tgt_space;
2051         space += tgt_space;
2052         cmsg = CMSG_NXTHDR(msgh, cmsg);
2053         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2054                                          target_cmsg_start);
2055     }
2056     unlock_user(target_cmsg, target_cmsg_addr, space);
2057  the_end:
2058     target_msgh->msg_controllen = tswapal(space);
2059     return 0;
2060 }
2061 
2062 /* do_setsockopt() Must return target values and target errnos. */
2063 static abi_long do_setsockopt(int sockfd, int level, int optname,
2064                               abi_ulong optval_addr, socklen_t optlen)
2065 {
2066     abi_long ret;
2067     int val;
2068     struct ip_mreqn *ip_mreq;
2069     struct ip_mreq_source *ip_mreq_source;
2070 
2071     switch(level) {
2072     case SOL_TCP:
2073     case SOL_UDP:
2074         /* TCP and UDP options all take an 'int' value.  */
2075         if (optlen < sizeof(uint32_t))
2076             return -TARGET_EINVAL;
2077 
2078         if (get_user_u32(val, optval_addr))
2079             return -TARGET_EFAULT;
2080         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2081         break;
2082     case SOL_IP:
2083         switch(optname) {
2084         case IP_TOS:
2085         case IP_TTL:
2086         case IP_HDRINCL:
2087         case IP_ROUTER_ALERT:
2088         case IP_RECVOPTS:
2089         case IP_RETOPTS:
2090         case IP_PKTINFO:
2091         case IP_MTU_DISCOVER:
2092         case IP_RECVERR:
2093         case IP_RECVTTL:
2094         case IP_RECVTOS:
2095 #ifdef IP_FREEBIND
2096         case IP_FREEBIND:
2097 #endif
2098         case IP_MULTICAST_TTL:
2099         case IP_MULTICAST_LOOP:
2100             val = 0;
2101             if (optlen >= sizeof(uint32_t)) {
2102                 if (get_user_u32(val, optval_addr))
2103                     return -TARGET_EFAULT;
2104             } else if (optlen >= 1) {
2105                 if (get_user_u8(val, optval_addr))
2106                     return -TARGET_EFAULT;
2107             }
2108             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2109             break;
2110         case IP_ADD_MEMBERSHIP:
2111         case IP_DROP_MEMBERSHIP:
2112             if (optlen < sizeof (struct target_ip_mreq) ||
2113                 optlen > sizeof (struct target_ip_mreqn))
2114                 return -TARGET_EINVAL;
2115 
2116             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2117             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2118             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2119             break;
2120 
2121         case IP_BLOCK_SOURCE:
2122         case IP_UNBLOCK_SOURCE:
2123         case IP_ADD_SOURCE_MEMBERSHIP:
2124         case IP_DROP_SOURCE_MEMBERSHIP:
2125             if (optlen != sizeof (struct target_ip_mreq_source))
2126                 return -TARGET_EINVAL;
2127 
2128             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2129             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2130             unlock_user (ip_mreq_source, optval_addr, 0);
2131             break;
2132 
2133         default:
2134             goto unimplemented;
2135         }
2136         break;
2137     case SOL_IPV6:
2138         switch (optname) {
2139         case IPV6_MTU_DISCOVER:
2140         case IPV6_MTU:
2141         case IPV6_V6ONLY:
2142         case IPV6_RECVPKTINFO:
2143         case IPV6_UNICAST_HOPS:
2144         case IPV6_MULTICAST_HOPS:
2145         case IPV6_MULTICAST_LOOP:
2146         case IPV6_RECVERR:
2147         case IPV6_RECVHOPLIMIT:
2148         case IPV6_2292HOPLIMIT:
2149         case IPV6_CHECKSUM:
2150         case IPV6_ADDRFORM:
2151         case IPV6_2292PKTINFO:
2152         case IPV6_RECVTCLASS:
2153         case IPV6_RECVRTHDR:
2154         case IPV6_2292RTHDR:
2155         case IPV6_RECVHOPOPTS:
2156         case IPV6_2292HOPOPTS:
2157         case IPV6_RECVDSTOPTS:
2158         case IPV6_2292DSTOPTS:
2159         case IPV6_TCLASS:
2160         case IPV6_ADDR_PREFERENCES:
2161 #ifdef IPV6_RECVPATHMTU
2162         case IPV6_RECVPATHMTU:
2163 #endif
2164 #ifdef IPV6_TRANSPARENT
2165         case IPV6_TRANSPARENT:
2166 #endif
2167 #ifdef IPV6_FREEBIND
2168         case IPV6_FREEBIND:
2169 #endif
2170 #ifdef IPV6_RECVORIGDSTADDR
2171         case IPV6_RECVORIGDSTADDR:
2172 #endif
2173             val = 0;
2174             if (optlen < sizeof(uint32_t)) {
2175                 return -TARGET_EINVAL;
2176             }
2177             if (get_user_u32(val, optval_addr)) {
2178                 return -TARGET_EFAULT;
2179             }
2180             ret = get_errno(setsockopt(sockfd, level, optname,
2181                                        &val, sizeof(val)));
2182             break;
2183         case IPV6_PKTINFO:
2184         {
2185             struct in6_pktinfo pki;
2186 
2187             if (optlen < sizeof(pki)) {
2188                 return -TARGET_EINVAL;
2189             }
2190 
2191             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2192                 return -TARGET_EFAULT;
2193             }
2194 
2195             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2196 
2197             ret = get_errno(setsockopt(sockfd, level, optname,
2198                                        &pki, sizeof(pki)));
2199             break;
2200         }
2201         case IPV6_ADD_MEMBERSHIP:
2202         case IPV6_DROP_MEMBERSHIP:
2203         {
2204             struct ipv6_mreq ipv6mreq;
2205 
2206             if (optlen < sizeof(ipv6mreq)) {
2207                 return -TARGET_EINVAL;
2208             }
2209 
2210             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2211                 return -TARGET_EFAULT;
2212             }
2213 
2214             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2215 
2216             ret = get_errno(setsockopt(sockfd, level, optname,
2217                                        &ipv6mreq, sizeof(ipv6mreq)));
2218             break;
2219         }
2220         default:
2221             goto unimplemented;
2222         }
2223         break;
2224     case SOL_ICMPV6:
2225         switch (optname) {
2226         case ICMPV6_FILTER:
2227         {
2228             struct icmp6_filter icmp6f;
2229 
2230             if (optlen > sizeof(icmp6f)) {
2231                 optlen = sizeof(icmp6f);
2232             }
2233 
2234             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2235                 return -TARGET_EFAULT;
2236             }
2237 
2238             for (val = 0; val < 8; val++) {
2239                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2240             }
2241 
2242             ret = get_errno(setsockopt(sockfd, level, optname,
2243                                        &icmp6f, optlen));
2244             break;
2245         }
2246         default:
2247             goto unimplemented;
2248         }
2249         break;
2250     case SOL_RAW:
2251         switch (optname) {
2252         case ICMP_FILTER:
2253         case IPV6_CHECKSUM:
2254             /* those take an u32 value */
2255             if (optlen < sizeof(uint32_t)) {
2256                 return -TARGET_EINVAL;
2257             }
2258 
2259             if (get_user_u32(val, optval_addr)) {
2260                 return -TARGET_EFAULT;
2261             }
2262             ret = get_errno(setsockopt(sockfd, level, optname,
2263                                        &val, sizeof(val)));
2264             break;
2265 
2266         default:
2267             goto unimplemented;
2268         }
2269         break;
2270 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2271     case SOL_ALG:
2272         switch (optname) {
2273         case ALG_SET_KEY:
2274         {
2275             char *alg_key = g_malloc(optlen);
2276 
2277             if (!alg_key) {
2278                 return -TARGET_ENOMEM;
2279             }
2280             if (copy_from_user(alg_key, optval_addr, optlen)) {
2281                 g_free(alg_key);
2282                 return -TARGET_EFAULT;
2283             }
2284             ret = get_errno(setsockopt(sockfd, level, optname,
2285                                        alg_key, optlen));
2286             g_free(alg_key);
2287             break;
2288         }
2289         case ALG_SET_AEAD_AUTHSIZE:
2290         {
2291             ret = get_errno(setsockopt(sockfd, level, optname,
2292                                        NULL, optlen));
2293             break;
2294         }
2295         default:
2296             goto unimplemented;
2297         }
2298         break;
2299 #endif
2300     case TARGET_SOL_SOCKET:
2301         switch (optname) {
2302         case TARGET_SO_RCVTIMEO:
2303         {
2304                 struct timeval tv;
2305 
2306                 optname = SO_RCVTIMEO;
2307 
2308 set_timeout:
2309                 if (optlen != sizeof(struct target_timeval)) {
2310                     return -TARGET_EINVAL;
2311                 }
2312 
2313                 if (copy_from_user_timeval(&tv, optval_addr)) {
2314                     return -TARGET_EFAULT;
2315                 }
2316 
2317                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2318                                 &tv, sizeof(tv)));
2319                 return ret;
2320         }
2321         case TARGET_SO_SNDTIMEO:
2322                 optname = SO_SNDTIMEO;
2323                 goto set_timeout;
2324         case TARGET_SO_ATTACH_FILTER:
2325         {
2326                 struct target_sock_fprog *tfprog;
2327                 struct target_sock_filter *tfilter;
2328                 struct sock_fprog fprog;
2329                 struct sock_filter *filter;
2330                 int i;
2331 
2332                 if (optlen != sizeof(*tfprog)) {
2333                     return -TARGET_EINVAL;
2334                 }
2335                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2336                     return -TARGET_EFAULT;
2337                 }
2338                 if (!lock_user_struct(VERIFY_READ, tfilter,
2339                                       tswapal(tfprog->filter), 0)) {
2340                     unlock_user_struct(tfprog, optval_addr, 1);
2341                     return -TARGET_EFAULT;
2342                 }
2343 
2344                 fprog.len = tswap16(tfprog->len);
2345                 filter = g_try_new(struct sock_filter, fprog.len);
2346                 if (filter == NULL) {
2347                     unlock_user_struct(tfilter, tfprog->filter, 1);
2348                     unlock_user_struct(tfprog, optval_addr, 1);
2349                     return -TARGET_ENOMEM;
2350                 }
2351                 for (i = 0; i < fprog.len; i++) {
2352                     filter[i].code = tswap16(tfilter[i].code);
2353                     filter[i].jt = tfilter[i].jt;
2354                     filter[i].jf = tfilter[i].jf;
2355                     filter[i].k = tswap32(tfilter[i].k);
2356                 }
2357                 fprog.filter = filter;
2358 
2359                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2360                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2361                 g_free(filter);
2362 
2363                 unlock_user_struct(tfilter, tfprog->filter, 1);
2364                 unlock_user_struct(tfprog, optval_addr, 1);
2365                 return ret;
2366         }
2367 	case TARGET_SO_BINDTODEVICE:
2368 	{
2369 		char *dev_ifname, *addr_ifname;
2370 
2371 		if (optlen > IFNAMSIZ - 1) {
2372 		    optlen = IFNAMSIZ - 1;
2373 		}
2374 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2375 		if (!dev_ifname) {
2376 		    return -TARGET_EFAULT;
2377 		}
2378 		optname = SO_BINDTODEVICE;
2379 		addr_ifname = alloca(IFNAMSIZ);
2380 		memcpy(addr_ifname, dev_ifname, optlen);
2381 		addr_ifname[optlen] = 0;
2382 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2383                                            addr_ifname, optlen));
2384 		unlock_user (dev_ifname, optval_addr, 0);
2385 		return ret;
2386 	}
2387         case TARGET_SO_LINGER:
2388         {
2389                 struct linger lg;
2390                 struct target_linger *tlg;
2391 
2392                 if (optlen != sizeof(struct target_linger)) {
2393                     return -TARGET_EINVAL;
2394                 }
2395                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2396                     return -TARGET_EFAULT;
2397                 }
2398                 __get_user(lg.l_onoff, &tlg->l_onoff);
2399                 __get_user(lg.l_linger, &tlg->l_linger);
2400                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2401                                 &lg, sizeof(lg)));
2402                 unlock_user_struct(tlg, optval_addr, 0);
2403                 return ret;
2404         }
2405             /* Options with 'int' argument.  */
2406         case TARGET_SO_DEBUG:
2407 		optname = SO_DEBUG;
2408 		break;
2409         case TARGET_SO_REUSEADDR:
2410 		optname = SO_REUSEADDR;
2411 		break;
2412 #ifdef SO_REUSEPORT
2413         case TARGET_SO_REUSEPORT:
2414                 optname = SO_REUSEPORT;
2415                 break;
2416 #endif
2417         case TARGET_SO_TYPE:
2418 		optname = SO_TYPE;
2419 		break;
2420         case TARGET_SO_ERROR:
2421 		optname = SO_ERROR;
2422 		break;
2423         case TARGET_SO_DONTROUTE:
2424 		optname = SO_DONTROUTE;
2425 		break;
2426         case TARGET_SO_BROADCAST:
2427 		optname = SO_BROADCAST;
2428 		break;
2429         case TARGET_SO_SNDBUF:
2430 		optname = SO_SNDBUF;
2431 		break;
2432         case TARGET_SO_SNDBUFFORCE:
2433                 optname = SO_SNDBUFFORCE;
2434                 break;
2435         case TARGET_SO_RCVBUF:
2436 		optname = SO_RCVBUF;
2437 		break;
2438         case TARGET_SO_RCVBUFFORCE:
2439                 optname = SO_RCVBUFFORCE;
2440                 break;
2441         case TARGET_SO_KEEPALIVE:
2442 		optname = SO_KEEPALIVE;
2443 		break;
2444         case TARGET_SO_OOBINLINE:
2445 		optname = SO_OOBINLINE;
2446 		break;
2447         case TARGET_SO_NO_CHECK:
2448 		optname = SO_NO_CHECK;
2449 		break;
2450         case TARGET_SO_PRIORITY:
2451 		optname = SO_PRIORITY;
2452 		break;
2453 #ifdef SO_BSDCOMPAT
2454         case TARGET_SO_BSDCOMPAT:
2455 		optname = SO_BSDCOMPAT;
2456 		break;
2457 #endif
2458         case TARGET_SO_PASSCRED:
2459 		optname = SO_PASSCRED;
2460 		break;
2461         case TARGET_SO_PASSSEC:
2462                 optname = SO_PASSSEC;
2463                 break;
2464         case TARGET_SO_TIMESTAMP:
2465 		optname = SO_TIMESTAMP;
2466 		break;
2467         case TARGET_SO_RCVLOWAT:
2468 		optname = SO_RCVLOWAT;
2469 		break;
2470         default:
2471             goto unimplemented;
2472         }
2473 	if (optlen < sizeof(uint32_t))
2474             return -TARGET_EINVAL;
2475 
2476 	if (get_user_u32(val, optval_addr))
2477             return -TARGET_EFAULT;
2478 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2479         break;
2480 #ifdef SOL_NETLINK
2481     case SOL_NETLINK:
2482         switch (optname) {
2483         case NETLINK_PKTINFO:
2484         case NETLINK_ADD_MEMBERSHIP:
2485         case NETLINK_DROP_MEMBERSHIP:
2486         case NETLINK_BROADCAST_ERROR:
2487         case NETLINK_NO_ENOBUFS:
2488 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2489         case NETLINK_LISTEN_ALL_NSID:
2490         case NETLINK_CAP_ACK:
2491 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2493         case NETLINK_EXT_ACK:
2494 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2495 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2496         case NETLINK_GET_STRICT_CHK:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2498             break;
2499         default:
2500             goto unimplemented;
2501         }
2502         val = 0;
2503         if (optlen < sizeof(uint32_t)) {
2504             return -TARGET_EINVAL;
2505         }
2506         if (get_user_u32(val, optval_addr)) {
2507             return -TARGET_EFAULT;
2508         }
2509         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2510                                    sizeof(val)));
2511         break;
2512 #endif /* SOL_NETLINK */
2513     default:
2514     unimplemented:
2515         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2516                       level, optname);
2517         ret = -TARGET_ENOPROTOOPT;
2518     }
2519     return ret;
2520 }
2521 
2522 /* do_getsockopt() Must return target values and target errnos. */
2523 static abi_long do_getsockopt(int sockfd, int level, int optname,
2524                               abi_ulong optval_addr, abi_ulong optlen)
2525 {
2526     abi_long ret;
2527     int len, val;
2528     socklen_t lv;
2529 
2530     switch(level) {
2531     case TARGET_SOL_SOCKET:
2532         level = SOL_SOCKET;
2533         switch (optname) {
2534         /* These don't just return a single integer */
2535         case TARGET_SO_PEERNAME:
2536             goto unimplemented;
2537         case TARGET_SO_RCVTIMEO: {
2538             struct timeval tv;
2539             socklen_t tvlen;
2540 
2541             optname = SO_RCVTIMEO;
2542 
2543 get_timeout:
2544             if (get_user_u32(len, optlen)) {
2545                 return -TARGET_EFAULT;
2546             }
2547             if (len < 0) {
2548                 return -TARGET_EINVAL;
2549             }
2550 
2551             tvlen = sizeof(tv);
2552             ret = get_errno(getsockopt(sockfd, level, optname,
2553                                        &tv, &tvlen));
2554             if (ret < 0) {
2555                 return ret;
2556             }
2557             if (len > sizeof(struct target_timeval)) {
2558                 len = sizeof(struct target_timeval);
2559             }
2560             if (copy_to_user_timeval(optval_addr, &tv)) {
2561                 return -TARGET_EFAULT;
2562             }
2563             if (put_user_u32(len, optlen)) {
2564                 return -TARGET_EFAULT;
2565             }
2566             break;
2567         }
2568         case TARGET_SO_SNDTIMEO:
2569             optname = SO_SNDTIMEO;
2570             goto get_timeout;
2571         case TARGET_SO_PEERCRED: {
2572             struct ucred cr;
2573             socklen_t crlen;
2574             struct target_ucred *tcr;
2575 
2576             if (get_user_u32(len, optlen)) {
2577                 return -TARGET_EFAULT;
2578             }
2579             if (len < 0) {
2580                 return -TARGET_EINVAL;
2581             }
2582 
2583             crlen = sizeof(cr);
2584             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2585                                        &cr, &crlen));
2586             if (ret < 0) {
2587                 return ret;
2588             }
2589             if (len > crlen) {
2590                 len = crlen;
2591             }
2592             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2593                 return -TARGET_EFAULT;
2594             }
2595             __put_user(cr.pid, &tcr->pid);
2596             __put_user(cr.uid, &tcr->uid);
2597             __put_user(cr.gid, &tcr->gid);
2598             unlock_user_struct(tcr, optval_addr, 1);
2599             if (put_user_u32(len, optlen)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             break;
2603         }
2604         case TARGET_SO_PEERSEC: {
2605             char *name;
2606 
2607             if (get_user_u32(len, optlen)) {
2608                 return -TARGET_EFAULT;
2609             }
2610             if (len < 0) {
2611                 return -TARGET_EINVAL;
2612             }
2613             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2614             if (!name) {
2615                 return -TARGET_EFAULT;
2616             }
2617             lv = len;
2618             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2619                                        name, &lv));
2620             if (put_user_u32(lv, optlen)) {
2621                 ret = -TARGET_EFAULT;
2622             }
2623             unlock_user(name, optval_addr, lv);
2624             break;
2625         }
2626         case TARGET_SO_LINGER:
2627         {
2628             struct linger lg;
2629             socklen_t lglen;
2630             struct target_linger *tlg;
2631 
2632             if (get_user_u32(len, optlen)) {
2633                 return -TARGET_EFAULT;
2634             }
2635             if (len < 0) {
2636                 return -TARGET_EINVAL;
2637             }
2638 
2639             lglen = sizeof(lg);
2640             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2641                                        &lg, &lglen));
2642             if (ret < 0) {
2643                 return ret;
2644             }
2645             if (len > lglen) {
2646                 len = lglen;
2647             }
2648             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2649                 return -TARGET_EFAULT;
2650             }
2651             __put_user(lg.l_onoff, &tlg->l_onoff);
2652             __put_user(lg.l_linger, &tlg->l_linger);
2653             unlock_user_struct(tlg, optval_addr, 1);
2654             if (put_user_u32(len, optlen)) {
2655                 return -TARGET_EFAULT;
2656             }
2657             break;
2658         }
2659         /* Options with 'int' argument.  */
2660         case TARGET_SO_DEBUG:
2661             optname = SO_DEBUG;
2662             goto int_case;
2663         case TARGET_SO_REUSEADDR:
2664             optname = SO_REUSEADDR;
2665             goto int_case;
2666 #ifdef SO_REUSEPORT
2667         case TARGET_SO_REUSEPORT:
2668             optname = SO_REUSEPORT;
2669             goto int_case;
2670 #endif
2671         case TARGET_SO_TYPE:
2672             optname = SO_TYPE;
2673             goto int_case;
2674         case TARGET_SO_ERROR:
2675             optname = SO_ERROR;
2676             goto int_case;
2677         case TARGET_SO_DONTROUTE:
2678             optname = SO_DONTROUTE;
2679             goto int_case;
2680         case TARGET_SO_BROADCAST:
2681             optname = SO_BROADCAST;
2682             goto int_case;
2683         case TARGET_SO_SNDBUF:
2684             optname = SO_SNDBUF;
2685             goto int_case;
2686         case TARGET_SO_RCVBUF:
2687             optname = SO_RCVBUF;
2688             goto int_case;
2689         case TARGET_SO_KEEPALIVE:
2690             optname = SO_KEEPALIVE;
2691             goto int_case;
2692         case TARGET_SO_OOBINLINE:
2693             optname = SO_OOBINLINE;
2694             goto int_case;
2695         case TARGET_SO_NO_CHECK:
2696             optname = SO_NO_CHECK;
2697             goto int_case;
2698         case TARGET_SO_PRIORITY:
2699             optname = SO_PRIORITY;
2700             goto int_case;
2701 #ifdef SO_BSDCOMPAT
2702         case TARGET_SO_BSDCOMPAT:
2703             optname = SO_BSDCOMPAT;
2704             goto int_case;
2705 #endif
2706         case TARGET_SO_PASSCRED:
2707             optname = SO_PASSCRED;
2708             goto int_case;
2709         case TARGET_SO_TIMESTAMP:
2710             optname = SO_TIMESTAMP;
2711             goto int_case;
2712         case TARGET_SO_RCVLOWAT:
2713             optname = SO_RCVLOWAT;
2714             goto int_case;
2715         case TARGET_SO_ACCEPTCONN:
2716             optname = SO_ACCEPTCONN;
2717             goto int_case;
2718         case TARGET_SO_PROTOCOL:
2719             optname = SO_PROTOCOL;
2720             goto int_case;
2721         case TARGET_SO_DOMAIN:
2722             optname = SO_DOMAIN;
2723             goto int_case;
2724         default:
2725             goto int_case;
2726         }
2727         break;
2728     case SOL_TCP:
2729     case SOL_UDP:
2730         /* TCP and UDP options all take an 'int' value.  */
2731     int_case:
2732         if (get_user_u32(len, optlen))
2733             return -TARGET_EFAULT;
2734         if (len < 0)
2735             return -TARGET_EINVAL;
2736         lv = sizeof(lv);
2737         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2738         if (ret < 0)
2739             return ret;
2740         if (optname == SO_TYPE) {
2741             val = host_to_target_sock_type(val);
2742         }
2743         if (len > lv)
2744             len = lv;
2745         if (len == 4) {
2746             if (put_user_u32(val, optval_addr))
2747                 return -TARGET_EFAULT;
2748         } else {
2749             if (put_user_u8(val, optval_addr))
2750                 return -TARGET_EFAULT;
2751         }
2752         if (put_user_u32(len, optlen))
2753             return -TARGET_EFAULT;
2754         break;
2755     case SOL_IP:
2756         switch(optname) {
2757         case IP_TOS:
2758         case IP_TTL:
2759         case IP_HDRINCL:
2760         case IP_ROUTER_ALERT:
2761         case IP_RECVOPTS:
2762         case IP_RETOPTS:
2763         case IP_PKTINFO:
2764         case IP_MTU_DISCOVER:
2765         case IP_RECVERR:
2766         case IP_RECVTOS:
2767 #ifdef IP_FREEBIND
2768         case IP_FREEBIND:
2769 #endif
2770         case IP_MULTICAST_TTL:
2771         case IP_MULTICAST_LOOP:
2772             if (get_user_u32(len, optlen))
2773                 return -TARGET_EFAULT;
2774             if (len < 0)
2775                 return -TARGET_EINVAL;
2776             lv = sizeof(lv);
2777             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2778             if (ret < 0)
2779                 return ret;
2780             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2781                 len = 1;
2782                 if (put_user_u32(len, optlen)
2783                     || put_user_u8(val, optval_addr))
2784                     return -TARGET_EFAULT;
2785             } else {
2786                 if (len > sizeof(int))
2787                     len = sizeof(int);
2788                 if (put_user_u32(len, optlen)
2789                     || put_user_u32(val, optval_addr))
2790                     return -TARGET_EFAULT;
2791             }
2792             break;
2793         default:
2794             ret = -TARGET_ENOPROTOOPT;
2795             break;
2796         }
2797         break;
2798     case SOL_IPV6:
2799         switch (optname) {
2800         case IPV6_MTU_DISCOVER:
2801         case IPV6_MTU:
2802         case IPV6_V6ONLY:
2803         case IPV6_RECVPKTINFO:
2804         case IPV6_UNICAST_HOPS:
2805         case IPV6_MULTICAST_HOPS:
2806         case IPV6_MULTICAST_LOOP:
2807         case IPV6_RECVERR:
2808         case IPV6_RECVHOPLIMIT:
2809         case IPV6_2292HOPLIMIT:
2810         case IPV6_CHECKSUM:
2811         case IPV6_ADDRFORM:
2812         case IPV6_2292PKTINFO:
2813         case IPV6_RECVTCLASS:
2814         case IPV6_RECVRTHDR:
2815         case IPV6_2292RTHDR:
2816         case IPV6_RECVHOPOPTS:
2817         case IPV6_2292HOPOPTS:
2818         case IPV6_RECVDSTOPTS:
2819         case IPV6_2292DSTOPTS:
2820         case IPV6_TCLASS:
2821         case IPV6_ADDR_PREFERENCES:
2822 #ifdef IPV6_RECVPATHMTU
2823         case IPV6_RECVPATHMTU:
2824 #endif
2825 #ifdef IPV6_TRANSPARENT
2826         case IPV6_TRANSPARENT:
2827 #endif
2828 #ifdef IPV6_FREEBIND
2829         case IPV6_FREEBIND:
2830 #endif
2831 #ifdef IPV6_RECVORIGDSTADDR
2832         case IPV6_RECVORIGDSTADDR:
2833 #endif
2834             if (get_user_u32(len, optlen))
2835                 return -TARGET_EFAULT;
2836             if (len < 0)
2837                 return -TARGET_EINVAL;
2838             lv = sizeof(lv);
2839             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2840             if (ret < 0)
2841                 return ret;
2842             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2843                 len = 1;
2844                 if (put_user_u32(len, optlen)
2845                     || put_user_u8(val, optval_addr))
2846                     return -TARGET_EFAULT;
2847             } else {
2848                 if (len > sizeof(int))
2849                     len = sizeof(int);
2850                 if (put_user_u32(len, optlen)
2851                     || put_user_u32(val, optval_addr))
2852                     return -TARGET_EFAULT;
2853             }
2854             break;
2855         default:
2856             ret = -TARGET_ENOPROTOOPT;
2857             break;
2858         }
2859         break;
2860 #ifdef SOL_NETLINK
2861     case SOL_NETLINK:
2862         switch (optname) {
2863         case NETLINK_PKTINFO:
2864         case NETLINK_BROADCAST_ERROR:
2865         case NETLINK_NO_ENOBUFS:
2866 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2867         case NETLINK_LISTEN_ALL_NSID:
2868         case NETLINK_CAP_ACK:
2869 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2870 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2871         case NETLINK_EXT_ACK:
2872 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2873 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2874         case NETLINK_GET_STRICT_CHK:
2875 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2876             if (get_user_u32(len, optlen)) {
2877                 return -TARGET_EFAULT;
2878             }
2879             if (len != sizeof(val)) {
2880                 return -TARGET_EINVAL;
2881             }
2882             lv = len;
2883             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2884             if (ret < 0) {
2885                 return ret;
2886             }
2887             if (put_user_u32(lv, optlen)
2888                 || put_user_u32(val, optval_addr)) {
2889                 return -TARGET_EFAULT;
2890             }
2891             break;
2892 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2893         case NETLINK_LIST_MEMBERSHIPS:
2894         {
2895             uint32_t *results;
2896             int i;
2897             if (get_user_u32(len, optlen)) {
2898                 return -TARGET_EFAULT;
2899             }
2900             if (len < 0) {
2901                 return -TARGET_EINVAL;
2902             }
2903             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2904             if (!results && len > 0) {
2905                 return -TARGET_EFAULT;
2906             }
2907             lv = len;
2908             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2909             if (ret < 0) {
2910                 unlock_user(results, optval_addr, 0);
2911                 return ret;
2912             }
2913             /* swap host endianess to target endianess. */
2914             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2915                 results[i] = tswap32(results[i]);
2916             }
2917             if (put_user_u32(lv, optlen)) {
2918                 return -TARGET_EFAULT;
2919             }
2920             unlock_user(results, optval_addr, 0);
2921             break;
2922         }
2923 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2924         default:
2925             goto unimplemented;
2926         }
2927         break;
2928 #endif /* SOL_NETLINK */
2929     default:
2930     unimplemented:
2931         qemu_log_mask(LOG_UNIMP,
2932                       "getsockopt level=%d optname=%d not yet supported\n",
2933                       level, optname);
2934         ret = -TARGET_EOPNOTSUPP;
2935         break;
2936     }
2937     return ret;
2938 }
2939 
2940 /* Convert target low/high pair representing file offset into the host
2941  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2942  * as the kernel doesn't handle them either.
2943  */
2944 static void target_to_host_low_high(abi_ulong tlow,
2945                                     abi_ulong thigh,
2946                                     unsigned long *hlow,
2947                                     unsigned long *hhigh)
2948 {
2949     uint64_t off = tlow |
2950         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2951         TARGET_LONG_BITS / 2;
2952 
2953     *hlow = off;
2954     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2955 }
2956 
2957 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2958                                 abi_ulong count, int copy)
2959 {
2960     struct target_iovec *target_vec;
2961     struct iovec *vec;
2962     abi_ulong total_len, max_len;
2963     int i;
2964     int err = 0;
2965     bool bad_address = false;
2966 
2967     if (count == 0) {
2968         errno = 0;
2969         return NULL;
2970     }
2971     if (count > IOV_MAX) {
2972         errno = EINVAL;
2973         return NULL;
2974     }
2975 
2976     vec = g_try_new0(struct iovec, count);
2977     if (vec == NULL) {
2978         errno = ENOMEM;
2979         return NULL;
2980     }
2981 
2982     target_vec = lock_user(VERIFY_READ, target_addr,
2983                            count * sizeof(struct target_iovec), 1);
2984     if (target_vec == NULL) {
2985         err = EFAULT;
2986         goto fail2;
2987     }
2988 
2989     /* ??? If host page size > target page size, this will result in a
2990        value larger than what we can actually support.  */
2991     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2992     total_len = 0;
2993 
2994     for (i = 0; i < count; i++) {
2995         abi_ulong base = tswapal(target_vec[i].iov_base);
2996         abi_long len = tswapal(target_vec[i].iov_len);
2997 
2998         if (len < 0) {
2999             err = EINVAL;
3000             goto fail;
3001         } else if (len == 0) {
3002             /* Zero length pointer is ignored.  */
3003             vec[i].iov_base = 0;
3004         } else {
3005             vec[i].iov_base = lock_user(type, base, len, copy);
3006             /* If the first buffer pointer is bad, this is a fault.  But
3007              * subsequent bad buffers will result in a partial write; this
3008              * is realized by filling the vector with null pointers and
3009              * zero lengths. */
3010             if (!vec[i].iov_base) {
3011                 if (i == 0) {
3012                     err = EFAULT;
3013                     goto fail;
3014                 } else {
3015                     bad_address = true;
3016                 }
3017             }
3018             if (bad_address) {
3019                 len = 0;
3020             }
3021             if (len > max_len - total_len) {
3022                 len = max_len - total_len;
3023             }
3024         }
3025         vec[i].iov_len = len;
3026         total_len += len;
3027     }
3028 
3029     unlock_user(target_vec, target_addr, 0);
3030     return vec;
3031 
3032  fail:
3033     while (--i >= 0) {
3034         if (tswapal(target_vec[i].iov_len) > 0) {
3035             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3036         }
3037     }
3038     unlock_user(target_vec, target_addr, 0);
3039  fail2:
3040     g_free(vec);
3041     errno = err;
3042     return NULL;
3043 }
3044 
3045 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3046                          abi_ulong count, int copy)
3047 {
3048     struct target_iovec *target_vec;
3049     int i;
3050 
3051     target_vec = lock_user(VERIFY_READ, target_addr,
3052                            count * sizeof(struct target_iovec), 1);
3053     if (target_vec) {
3054         for (i = 0; i < count; i++) {
3055             abi_ulong base = tswapal(target_vec[i].iov_base);
3056             abi_long len = tswapal(target_vec[i].iov_len);
3057             if (len < 0) {
3058                 break;
3059             }
3060             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3061         }
3062         unlock_user(target_vec, target_addr, 0);
3063     }
3064 
3065     g_free(vec);
3066 }
3067 
3068 static inline int target_to_host_sock_type(int *type)
3069 {
3070     int host_type = 0;
3071     int target_type = *type;
3072 
3073     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3074     case TARGET_SOCK_DGRAM:
3075         host_type = SOCK_DGRAM;
3076         break;
3077     case TARGET_SOCK_STREAM:
3078         host_type = SOCK_STREAM;
3079         break;
3080     default:
3081         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3082         break;
3083     }
3084     if (target_type & TARGET_SOCK_CLOEXEC) {
3085 #if defined(SOCK_CLOEXEC)
3086         host_type |= SOCK_CLOEXEC;
3087 #else
3088         return -TARGET_EINVAL;
3089 #endif
3090     }
3091     if (target_type & TARGET_SOCK_NONBLOCK) {
3092 #if defined(SOCK_NONBLOCK)
3093         host_type |= SOCK_NONBLOCK;
3094 #elif !defined(O_NONBLOCK)
3095         return -TARGET_EINVAL;
3096 #endif
3097     }
3098     *type = host_type;
3099     return 0;
3100 }
3101 
3102 /* Try to emulate socket type flags after socket creation.  */
3103 static int sock_flags_fixup(int fd, int target_type)
3104 {
3105 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3106     if (target_type & TARGET_SOCK_NONBLOCK) {
3107         int flags = fcntl(fd, F_GETFL);
3108         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3109             close(fd);
3110             return -TARGET_EINVAL;
3111         }
3112     }
3113 #endif
3114     return fd;
3115 }
3116 
3117 /* do_socket() Must return target values and target errnos. */
3118 static abi_long do_socket(int domain, int type, int protocol)
3119 {
3120     int target_type = type;
3121     int ret;
3122 
3123     ret = target_to_host_sock_type(&type);
3124     if (ret) {
3125         return ret;
3126     }
3127 
3128     if (domain == PF_NETLINK && !(
3129 #ifdef CONFIG_RTNETLINK
3130          protocol == NETLINK_ROUTE ||
3131 #endif
3132          protocol == NETLINK_KOBJECT_UEVENT ||
3133          protocol == NETLINK_AUDIT)) {
3134         return -TARGET_EPROTONOSUPPORT;
3135     }
3136 
3137     if (domain == AF_PACKET ||
3138         (domain == AF_INET && type == SOCK_PACKET)) {
3139         protocol = tswap16(protocol);
3140     }
3141 
3142     ret = get_errno(socket(domain, type, protocol));
3143     if (ret >= 0) {
3144         ret = sock_flags_fixup(ret, target_type);
3145         if (type == SOCK_PACKET) {
3146             /* Manage an obsolete case :
3147              * if socket type is SOCK_PACKET, bind by name
3148              */
3149             fd_trans_register(ret, &target_packet_trans);
3150         } else if (domain == PF_NETLINK) {
3151             switch (protocol) {
3152 #ifdef CONFIG_RTNETLINK
3153             case NETLINK_ROUTE:
3154                 fd_trans_register(ret, &target_netlink_route_trans);
3155                 break;
3156 #endif
3157             case NETLINK_KOBJECT_UEVENT:
3158                 /* nothing to do: messages are strings */
3159                 break;
3160             case NETLINK_AUDIT:
3161                 fd_trans_register(ret, &target_netlink_audit_trans);
3162                 break;
3163             default:
3164                 g_assert_not_reached();
3165             }
3166         }
3167     }
3168     return ret;
3169 }
3170 
3171 /* do_bind() Must return target values and target errnos. */
3172 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3173                         socklen_t addrlen)
3174 {
3175     void *addr;
3176     abi_long ret;
3177 
3178     if ((int)addrlen < 0) {
3179         return -TARGET_EINVAL;
3180     }
3181 
3182     addr = alloca(addrlen+1);
3183 
3184     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3185     if (ret)
3186         return ret;
3187 
3188     return get_errno(bind(sockfd, addr, addrlen));
3189 }
3190 
3191 /* do_connect() Must return target values and target errnos. */
3192 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3193                            socklen_t addrlen)
3194 {
3195     void *addr;
3196     abi_long ret;
3197 
3198     if ((int)addrlen < 0) {
3199         return -TARGET_EINVAL;
3200     }
3201 
3202     addr = alloca(addrlen+1);
3203 
3204     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3205     if (ret)
3206         return ret;
3207 
3208     return get_errno(safe_connect(sockfd, addr, addrlen));
3209 }
3210 
3211 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3212 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3213                                       int flags, int send)
3214 {
3215     abi_long ret, len;
3216     struct msghdr msg;
3217     abi_ulong count;
3218     struct iovec *vec;
3219     abi_ulong target_vec;
3220 
3221     if (msgp->msg_name) {
3222         msg.msg_namelen = tswap32(msgp->msg_namelen);
3223         msg.msg_name = alloca(msg.msg_namelen+1);
3224         ret = target_to_host_sockaddr(fd, msg.msg_name,
3225                                       tswapal(msgp->msg_name),
3226                                       msg.msg_namelen);
3227         if (ret == -TARGET_EFAULT) {
3228             /* For connected sockets msg_name and msg_namelen must
3229              * be ignored, so returning EFAULT immediately is wrong.
3230              * Instead, pass a bad msg_name to the host kernel, and
3231              * let it decide whether to return EFAULT or not.
3232              */
3233             msg.msg_name = (void *)-1;
3234         } else if (ret) {
3235             goto out2;
3236         }
3237     } else {
3238         msg.msg_name = NULL;
3239         msg.msg_namelen = 0;
3240     }
3241     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3242     msg.msg_control = alloca(msg.msg_controllen);
3243     memset(msg.msg_control, 0, msg.msg_controllen);
3244 
3245     msg.msg_flags = tswap32(msgp->msg_flags);
3246 
3247     count = tswapal(msgp->msg_iovlen);
3248     target_vec = tswapal(msgp->msg_iov);
3249 
3250     if (count > IOV_MAX) {
3251         /* sendrcvmsg returns a different errno for this condition than
3252          * readv/writev, so we must catch it here before lock_iovec() does.
3253          */
3254         ret = -TARGET_EMSGSIZE;
3255         goto out2;
3256     }
3257 
3258     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3259                      target_vec, count, send);
3260     if (vec == NULL) {
3261         ret = -host_to_target_errno(errno);
3262         goto out2;
3263     }
3264     msg.msg_iovlen = count;
3265     msg.msg_iov = vec;
3266 
3267     if (send) {
3268         if (fd_trans_target_to_host_data(fd)) {
3269             void *host_msg;
3270 
3271             host_msg = g_malloc(msg.msg_iov->iov_len);
3272             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3273             ret = fd_trans_target_to_host_data(fd)(host_msg,
3274                                                    msg.msg_iov->iov_len);
3275             if (ret >= 0) {
3276                 msg.msg_iov->iov_base = host_msg;
3277                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3278             }
3279             g_free(host_msg);
3280         } else {
3281             ret = target_to_host_cmsg(&msg, msgp);
3282             if (ret == 0) {
3283                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3284             }
3285         }
3286     } else {
3287         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3288         if (!is_error(ret)) {
3289             len = ret;
3290             if (fd_trans_host_to_target_data(fd)) {
3291                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3292                                                MIN(msg.msg_iov->iov_len, len));
3293             } else {
3294                 ret = host_to_target_cmsg(msgp, &msg);
3295             }
3296             if (!is_error(ret)) {
3297                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3298                 msgp->msg_flags = tswap32(msg.msg_flags);
3299                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3300                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3301                                     msg.msg_name, msg.msg_namelen);
3302                     if (ret) {
3303                         goto out;
3304                     }
3305                 }
3306 
3307                 ret = len;
3308             }
3309         }
3310     }
3311 
3312 out:
3313     unlock_iovec(vec, target_vec, count, !send);
3314 out2:
3315     return ret;
3316 }
3317 
3318 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3319                                int flags, int send)
3320 {
3321     abi_long ret;
3322     struct target_msghdr *msgp;
3323 
3324     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3325                           msgp,
3326                           target_msg,
3327                           send ? 1 : 0)) {
3328         return -TARGET_EFAULT;
3329     }
3330     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3331     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3332     return ret;
3333 }
3334 
3335 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3336  * so it might not have this *mmsg-specific flag either.
3337  */
3338 #ifndef MSG_WAITFORONE
3339 #define MSG_WAITFORONE 0x10000
3340 #endif
3341 
3342 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3343                                 unsigned int vlen, unsigned int flags,
3344                                 int send)
3345 {
3346     struct target_mmsghdr *mmsgp;
3347     abi_long ret = 0;
3348     int i;
3349 
3350     if (vlen > UIO_MAXIOV) {
3351         vlen = UIO_MAXIOV;
3352     }
3353 
3354     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3355     if (!mmsgp) {
3356         return -TARGET_EFAULT;
3357     }
3358 
3359     for (i = 0; i < vlen; i++) {
3360         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3361         if (is_error(ret)) {
3362             break;
3363         }
3364         mmsgp[i].msg_len = tswap32(ret);
3365         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3366         if (flags & MSG_WAITFORONE) {
3367             flags |= MSG_DONTWAIT;
3368         }
3369     }
3370 
3371     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3372 
3373     /* Return number of datagrams sent if we sent any at all;
3374      * otherwise return the error.
3375      */
3376     if (i) {
3377         return i;
3378     }
3379     return ret;
3380 }
3381 
3382 /* do_accept4() Must return target values and target errnos. */
3383 static abi_long do_accept4(int fd, abi_ulong target_addr,
3384                            abi_ulong target_addrlen_addr, int flags)
3385 {
3386     socklen_t addrlen, ret_addrlen;
3387     void *addr;
3388     abi_long ret;
3389     int host_flags;
3390 
3391     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3392 
3393     if (target_addr == 0) {
3394         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3395     }
3396 
3397     /* linux returns EFAULT if addrlen pointer is invalid */
3398     if (get_user_u32(addrlen, target_addrlen_addr))
3399         return -TARGET_EFAULT;
3400 
3401     if ((int)addrlen < 0) {
3402         return -TARGET_EINVAL;
3403     }
3404 
3405     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3406         return -TARGET_EFAULT;
3407     }
3408 
3409     addr = alloca(addrlen);
3410 
3411     ret_addrlen = addrlen;
3412     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3413     if (!is_error(ret)) {
3414         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3415         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3416             ret = -TARGET_EFAULT;
3417         }
3418     }
3419     return ret;
3420 }
3421 
3422 /* do_getpeername() Must return target values and target errnos. */
3423 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3424                                abi_ulong target_addrlen_addr)
3425 {
3426     socklen_t addrlen, ret_addrlen;
3427     void *addr;
3428     abi_long ret;
3429 
3430     if (get_user_u32(addrlen, target_addrlen_addr))
3431         return -TARGET_EFAULT;
3432 
3433     if ((int)addrlen < 0) {
3434         return -TARGET_EINVAL;
3435     }
3436 
3437     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3438         return -TARGET_EFAULT;
3439     }
3440 
3441     addr = alloca(addrlen);
3442 
3443     ret_addrlen = addrlen;
3444     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3445     if (!is_error(ret)) {
3446         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3447         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3448             ret = -TARGET_EFAULT;
3449         }
3450     }
3451     return ret;
3452 }
3453 
3454 /* do_getsockname() Must return target values and target errnos. */
3455 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3456                                abi_ulong target_addrlen_addr)
3457 {
3458     socklen_t addrlen, ret_addrlen;
3459     void *addr;
3460     abi_long ret;
3461 
3462     if (get_user_u32(addrlen, target_addrlen_addr))
3463         return -TARGET_EFAULT;
3464 
3465     if ((int)addrlen < 0) {
3466         return -TARGET_EINVAL;
3467     }
3468 
3469     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3470         return -TARGET_EFAULT;
3471     }
3472 
3473     addr = alloca(addrlen);
3474 
3475     ret_addrlen = addrlen;
3476     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3477     if (!is_error(ret)) {
3478         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3479         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3480             ret = -TARGET_EFAULT;
3481         }
3482     }
3483     return ret;
3484 }
3485 
3486 /* do_socketpair() Must return target values and target errnos. */
3487 static abi_long do_socketpair(int domain, int type, int protocol,
3488                               abi_ulong target_tab_addr)
3489 {
3490     int tab[2];
3491     abi_long ret;
3492 
3493     target_to_host_sock_type(&type);
3494 
3495     ret = get_errno(socketpair(domain, type, protocol, tab));
3496     if (!is_error(ret)) {
3497         if (put_user_s32(tab[0], target_tab_addr)
3498             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3499             ret = -TARGET_EFAULT;
3500     }
3501     return ret;
3502 }
3503 
3504 /* do_sendto() Must return target values and target errnos. */
3505 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3506                           abi_ulong target_addr, socklen_t addrlen)
3507 {
3508     void *addr;
3509     void *host_msg;
3510     void *copy_msg = NULL;
3511     abi_long ret;
3512 
3513     if ((int)addrlen < 0) {
3514         return -TARGET_EINVAL;
3515     }
3516 
3517     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3518     if (!host_msg)
3519         return -TARGET_EFAULT;
3520     if (fd_trans_target_to_host_data(fd)) {
3521         copy_msg = host_msg;
3522         host_msg = g_malloc(len);
3523         memcpy(host_msg, copy_msg, len);
3524         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3525         if (ret < 0) {
3526             goto fail;
3527         }
3528     }
3529     if (target_addr) {
3530         addr = alloca(addrlen+1);
3531         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3532         if (ret) {
3533             goto fail;
3534         }
3535         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3536     } else {
3537         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3538     }
3539 fail:
3540     if (copy_msg) {
3541         g_free(host_msg);
3542         host_msg = copy_msg;
3543     }
3544     unlock_user(host_msg, msg, 0);
3545     return ret;
3546 }
3547 
3548 /* do_recvfrom() Must return target values and target errnos. */
3549 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3550                             abi_ulong target_addr,
3551                             abi_ulong target_addrlen)
3552 {
3553     socklen_t addrlen, ret_addrlen;
3554     void *addr;
3555     void *host_msg;
3556     abi_long ret;
3557 
3558     if (!msg) {
3559         host_msg = NULL;
3560     } else {
3561         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3562         if (!host_msg) {
3563             return -TARGET_EFAULT;
3564         }
3565     }
3566     if (target_addr) {
3567         if (get_user_u32(addrlen, target_addrlen)) {
3568             ret = -TARGET_EFAULT;
3569             goto fail;
3570         }
3571         if ((int)addrlen < 0) {
3572             ret = -TARGET_EINVAL;
3573             goto fail;
3574         }
3575         addr = alloca(addrlen);
3576         ret_addrlen = addrlen;
3577         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3578                                       addr, &ret_addrlen));
3579     } else {
3580         addr = NULL; /* To keep compiler quiet.  */
3581         addrlen = 0; /* To keep compiler quiet.  */
3582         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3583     }
3584     if (!is_error(ret)) {
3585         if (fd_trans_host_to_target_data(fd)) {
3586             abi_long trans;
3587             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3588             if (is_error(trans)) {
3589                 ret = trans;
3590                 goto fail;
3591             }
3592         }
3593         if (target_addr) {
3594             host_to_target_sockaddr(target_addr, addr,
3595                                     MIN(addrlen, ret_addrlen));
3596             if (put_user_u32(ret_addrlen, target_addrlen)) {
3597                 ret = -TARGET_EFAULT;
3598                 goto fail;
3599             }
3600         }
3601         unlock_user(host_msg, msg, len);
3602     } else {
3603 fail:
3604         unlock_user(host_msg, msg, 0);
3605     }
3606     return ret;
3607 }
3608 
3609 #ifdef TARGET_NR_socketcall
3610 /* do_socketcall() must return target values and target errnos. */
3611 static abi_long do_socketcall(int num, abi_ulong vptr)
3612 {
3613     static const unsigned nargs[] = { /* number of arguments per operation */
3614         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3615         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3616         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3617         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3618         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3619         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3620         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3621         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3622         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3623         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3624         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3625         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3626         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3627         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3628         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3629         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3630         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3631         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3632         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3633         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3634     };
3635     abi_long a[6]; /* max 6 args */
3636     unsigned i;
3637 
3638     /* check the range of the first argument num */
3639     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3640     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3641         return -TARGET_EINVAL;
3642     }
3643     /* ensure we have space for args */
3644     if (nargs[num] > ARRAY_SIZE(a)) {
3645         return -TARGET_EINVAL;
3646     }
3647     /* collect the arguments in a[] according to nargs[] */
3648     for (i = 0; i < nargs[num]; ++i) {
3649         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3650             return -TARGET_EFAULT;
3651         }
3652     }
3653     /* now when we have the args, invoke the appropriate underlying function */
3654     switch (num) {
3655     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3656         return do_socket(a[0], a[1], a[2]);
3657     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3658         return do_bind(a[0], a[1], a[2]);
3659     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3660         return do_connect(a[0], a[1], a[2]);
3661     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3662         return get_errno(listen(a[0], a[1]));
3663     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3664         return do_accept4(a[0], a[1], a[2], 0);
3665     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3666         return do_getsockname(a[0], a[1], a[2]);
3667     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3668         return do_getpeername(a[0], a[1], a[2]);
3669     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3670         return do_socketpair(a[0], a[1], a[2], a[3]);
3671     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3672         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3673     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3674         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3675     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3676         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3677     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3678         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3679     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3680         return get_errno(shutdown(a[0], a[1]));
3681     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3682         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3683     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3684         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3685     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3686         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3687     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3688         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3689     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3690         return do_accept4(a[0], a[1], a[2], a[3]);
3691     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3692         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3693     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3694         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3695     default:
3696         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3697         return -TARGET_EINVAL;
3698     }
3699 }
3700 #endif
3701 
3702 #define N_SHM_REGIONS	32
3703 
3704 static struct shm_region {
3705     abi_ulong start;
3706     abi_ulong size;
3707     bool in_use;
3708 } shm_regions[N_SHM_REGIONS];
3709 
3710 #ifndef TARGET_SEMID64_DS
3711 /* asm-generic version of this struct */
3712 struct target_semid64_ds
3713 {
3714   struct target_ipc_perm sem_perm;
3715   abi_ulong sem_otime;
3716 #if TARGET_ABI_BITS == 32
3717   abi_ulong __unused1;
3718 #endif
3719   abi_ulong sem_ctime;
3720 #if TARGET_ABI_BITS == 32
3721   abi_ulong __unused2;
3722 #endif
3723   abi_ulong sem_nsems;
3724   abi_ulong __unused3;
3725   abi_ulong __unused4;
3726 };
3727 #endif
3728 
3729 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3730                                                abi_ulong target_addr)
3731 {
3732     struct target_ipc_perm *target_ip;
3733     struct target_semid64_ds *target_sd;
3734 
3735     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3736         return -TARGET_EFAULT;
3737     target_ip = &(target_sd->sem_perm);
3738     host_ip->__key = tswap32(target_ip->__key);
3739     host_ip->uid = tswap32(target_ip->uid);
3740     host_ip->gid = tswap32(target_ip->gid);
3741     host_ip->cuid = tswap32(target_ip->cuid);
3742     host_ip->cgid = tswap32(target_ip->cgid);
3743 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3744     host_ip->mode = tswap32(target_ip->mode);
3745 #else
3746     host_ip->mode = tswap16(target_ip->mode);
3747 #endif
3748 #if defined(TARGET_PPC)
3749     host_ip->__seq = tswap32(target_ip->__seq);
3750 #else
3751     host_ip->__seq = tswap16(target_ip->__seq);
3752 #endif
3753     unlock_user_struct(target_sd, target_addr, 0);
3754     return 0;
3755 }
3756 
3757 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3758                                                struct ipc_perm *host_ip)
3759 {
3760     struct target_ipc_perm *target_ip;
3761     struct target_semid64_ds *target_sd;
3762 
3763     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3764         return -TARGET_EFAULT;
3765     target_ip = &(target_sd->sem_perm);
3766     target_ip->__key = tswap32(host_ip->__key);
3767     target_ip->uid = tswap32(host_ip->uid);
3768     target_ip->gid = tswap32(host_ip->gid);
3769     target_ip->cuid = tswap32(host_ip->cuid);
3770     target_ip->cgid = tswap32(host_ip->cgid);
3771 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3772     target_ip->mode = tswap32(host_ip->mode);
3773 #else
3774     target_ip->mode = tswap16(host_ip->mode);
3775 #endif
3776 #if defined(TARGET_PPC)
3777     target_ip->__seq = tswap32(host_ip->__seq);
3778 #else
3779     target_ip->__seq = tswap16(host_ip->__seq);
3780 #endif
3781     unlock_user_struct(target_sd, target_addr, 1);
3782     return 0;
3783 }
3784 
3785 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3786                                                abi_ulong target_addr)
3787 {
3788     struct target_semid64_ds *target_sd;
3789 
3790     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3791         return -TARGET_EFAULT;
3792     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3793         return -TARGET_EFAULT;
3794     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3795     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3796     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3797     unlock_user_struct(target_sd, target_addr, 0);
3798     return 0;
3799 }
3800 
3801 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3802                                                struct semid_ds *host_sd)
3803 {
3804     struct target_semid64_ds *target_sd;
3805 
3806     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3807         return -TARGET_EFAULT;
3808     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3809         return -TARGET_EFAULT;
3810     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3811     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3812     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3813     unlock_user_struct(target_sd, target_addr, 1);
3814     return 0;
3815 }
3816 
3817 struct target_seminfo {
3818     int semmap;
3819     int semmni;
3820     int semmns;
3821     int semmnu;
3822     int semmsl;
3823     int semopm;
3824     int semume;
3825     int semusz;
3826     int semvmx;
3827     int semaem;
3828 };
3829 
3830 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3831                                               struct seminfo *host_seminfo)
3832 {
3833     struct target_seminfo *target_seminfo;
3834     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3835         return -TARGET_EFAULT;
3836     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3837     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3838     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3839     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3840     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3841     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3842     __put_user(host_seminfo->semume, &target_seminfo->semume);
3843     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3844     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3845     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3846     unlock_user_struct(target_seminfo, target_addr, 1);
3847     return 0;
3848 }
3849 
3850 union semun {
3851 	int val;
3852 	struct semid_ds *buf;
3853 	unsigned short *array;
3854 	struct seminfo *__buf;
3855 };
3856 
3857 union target_semun {
3858 	int val;
3859 	abi_ulong buf;
3860 	abi_ulong array;
3861 	abi_ulong __buf;
3862 };
3863 
3864 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3865                                                abi_ulong target_addr)
3866 {
3867     int nsems;
3868     unsigned short *array;
3869     union semun semun;
3870     struct semid_ds semid_ds;
3871     int i, ret;
3872 
3873     semun.buf = &semid_ds;
3874 
3875     ret = semctl(semid, 0, IPC_STAT, semun);
3876     if (ret == -1)
3877         return get_errno(ret);
3878 
3879     nsems = semid_ds.sem_nsems;
3880 
3881     *host_array = g_try_new(unsigned short, nsems);
3882     if (!*host_array) {
3883         return -TARGET_ENOMEM;
3884     }
3885     array = lock_user(VERIFY_READ, target_addr,
3886                       nsems*sizeof(unsigned short), 1);
3887     if (!array) {
3888         g_free(*host_array);
3889         return -TARGET_EFAULT;
3890     }
3891 
3892     for(i=0; i<nsems; i++) {
3893         __get_user((*host_array)[i], &array[i]);
3894     }
3895     unlock_user(array, target_addr, 0);
3896 
3897     return 0;
3898 }
3899 
3900 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3901                                                unsigned short **host_array)
3902 {
3903     int nsems;
3904     unsigned short *array;
3905     union semun semun;
3906     struct semid_ds semid_ds;
3907     int i, ret;
3908 
3909     semun.buf = &semid_ds;
3910 
3911     ret = semctl(semid, 0, IPC_STAT, semun);
3912     if (ret == -1)
3913         return get_errno(ret);
3914 
3915     nsems = semid_ds.sem_nsems;
3916 
3917     array = lock_user(VERIFY_WRITE, target_addr,
3918                       nsems*sizeof(unsigned short), 0);
3919     if (!array)
3920         return -TARGET_EFAULT;
3921 
3922     for(i=0; i<nsems; i++) {
3923         __put_user((*host_array)[i], &array[i]);
3924     }
3925     g_free(*host_array);
3926     unlock_user(array, target_addr, 1);
3927 
3928     return 0;
3929 }
3930 
3931 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3932                                  abi_ulong target_arg)
3933 {
3934     union target_semun target_su = { .buf = target_arg };
3935     union semun arg;
3936     struct semid_ds dsarg;
3937     unsigned short *array = NULL;
3938     struct seminfo seminfo;
3939     abi_long ret = -TARGET_EINVAL;
3940     abi_long err;
3941     cmd &= 0xff;
3942 
3943     switch( cmd ) {
3944 	case GETVAL:
3945 	case SETVAL:
3946             /* In 64 bit cross-endian situations, we will erroneously pick up
3947              * the wrong half of the union for the "val" element.  To rectify
3948              * this, the entire 8-byte structure is byteswapped, followed by
3949 	     * a swap of the 4 byte val field. In other cases, the data is
3950 	     * already in proper host byte order. */
3951 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3952 		target_su.buf = tswapal(target_su.buf);
3953 		arg.val = tswap32(target_su.val);
3954 	    } else {
3955 		arg.val = target_su.val;
3956 	    }
3957             ret = get_errno(semctl(semid, semnum, cmd, arg));
3958             break;
3959 	case GETALL:
3960 	case SETALL:
3961             err = target_to_host_semarray(semid, &array, target_su.array);
3962             if (err)
3963                 return err;
3964             arg.array = array;
3965             ret = get_errno(semctl(semid, semnum, cmd, arg));
3966             err = host_to_target_semarray(semid, target_su.array, &array);
3967             if (err)
3968                 return err;
3969             break;
3970 	case IPC_STAT:
3971 	case IPC_SET:
3972 	case SEM_STAT:
3973             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3974             if (err)
3975                 return err;
3976             arg.buf = &dsarg;
3977             ret = get_errno(semctl(semid, semnum, cmd, arg));
3978             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3979             if (err)
3980                 return err;
3981             break;
3982 	case IPC_INFO:
3983 	case SEM_INFO:
3984             arg.__buf = &seminfo;
3985             ret = get_errno(semctl(semid, semnum, cmd, arg));
3986             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3987             if (err)
3988                 return err;
3989             break;
3990 	case IPC_RMID:
3991 	case GETPID:
3992 	case GETNCNT:
3993 	case GETZCNT:
3994             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3995             break;
3996     }
3997 
3998     return ret;
3999 }
4000 
4001 struct target_sembuf {
4002     unsigned short sem_num;
4003     short sem_op;
4004     short sem_flg;
4005 };
4006 
4007 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4008                                              abi_ulong target_addr,
4009                                              unsigned nsops)
4010 {
4011     struct target_sembuf *target_sembuf;
4012     int i;
4013 
4014     target_sembuf = lock_user(VERIFY_READ, target_addr,
4015                               nsops*sizeof(struct target_sembuf), 1);
4016     if (!target_sembuf)
4017         return -TARGET_EFAULT;
4018 
4019     for(i=0; i<nsops; i++) {
4020         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4021         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4022         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4023     }
4024 
4025     unlock_user(target_sembuf, target_addr, 0);
4026 
4027     return 0;
4028 }
4029 
4030 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4031     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4032 
4033 /*
4034  * This macro is required to handle the s390 variants, which passes the
4035  * arguments in a different order than default.
4036  */
4037 #ifdef __s390x__
4038 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4039   (__nsops), (__timeout), (__sops)
4040 #else
4041 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4042   (__nsops), 0, (__sops), (__timeout)
4043 #endif
4044 
4045 static inline abi_long do_semtimedop(int semid,
4046                                      abi_long ptr,
4047                                      unsigned nsops,
4048                                      abi_long timeout, bool time64)
4049 {
4050     struct sembuf *sops;
4051     struct timespec ts, *pts = NULL;
4052     abi_long ret;
4053 
4054     if (timeout) {
4055         pts = &ts;
4056         if (time64) {
4057             if (target_to_host_timespec64(pts, timeout)) {
4058                 return -TARGET_EFAULT;
4059             }
4060         } else {
4061             if (target_to_host_timespec(pts, timeout)) {
4062                 return -TARGET_EFAULT;
4063             }
4064         }
4065     }
4066 
4067     if (nsops > TARGET_SEMOPM) {
4068         return -TARGET_E2BIG;
4069     }
4070 
4071     sops = g_new(struct sembuf, nsops);
4072 
4073     if (target_to_host_sembuf(sops, ptr, nsops)) {
4074         g_free(sops);
4075         return -TARGET_EFAULT;
4076     }
4077 
4078     ret = -TARGET_ENOSYS;
4079 #ifdef __NR_semtimedop
4080     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4081 #endif
4082 #ifdef __NR_ipc
4083     if (ret == -TARGET_ENOSYS) {
4084         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4085                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4086     }
4087 #endif
4088     g_free(sops);
4089     return ret;
4090 }
4091 #endif
4092 
4093 struct target_msqid_ds
4094 {
4095     struct target_ipc_perm msg_perm;
4096     abi_ulong msg_stime;
4097 #if TARGET_ABI_BITS == 32
4098     abi_ulong __unused1;
4099 #endif
4100     abi_ulong msg_rtime;
4101 #if TARGET_ABI_BITS == 32
4102     abi_ulong __unused2;
4103 #endif
4104     abi_ulong msg_ctime;
4105 #if TARGET_ABI_BITS == 32
4106     abi_ulong __unused3;
4107 #endif
4108     abi_ulong __msg_cbytes;
4109     abi_ulong msg_qnum;
4110     abi_ulong msg_qbytes;
4111     abi_ulong msg_lspid;
4112     abi_ulong msg_lrpid;
4113     abi_ulong __unused4;
4114     abi_ulong __unused5;
4115 };
4116 
4117 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4118                                                abi_ulong target_addr)
4119 {
4120     struct target_msqid_ds *target_md;
4121 
4122     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4123         return -TARGET_EFAULT;
4124     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4125         return -TARGET_EFAULT;
4126     host_md->msg_stime = tswapal(target_md->msg_stime);
4127     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4128     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4129     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4130     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4131     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4132     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4133     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4134     unlock_user_struct(target_md, target_addr, 0);
4135     return 0;
4136 }
4137 
4138 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4139                                                struct msqid_ds *host_md)
4140 {
4141     struct target_msqid_ds *target_md;
4142 
4143     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4144         return -TARGET_EFAULT;
4145     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4146         return -TARGET_EFAULT;
4147     target_md->msg_stime = tswapal(host_md->msg_stime);
4148     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4149     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4150     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4151     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4152     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4153     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4154     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4155     unlock_user_struct(target_md, target_addr, 1);
4156     return 0;
4157 }
4158 
4159 struct target_msginfo {
4160     int msgpool;
4161     int msgmap;
4162     int msgmax;
4163     int msgmnb;
4164     int msgmni;
4165     int msgssz;
4166     int msgtql;
4167     unsigned short int msgseg;
4168 };
4169 
4170 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4171                                               struct msginfo *host_msginfo)
4172 {
4173     struct target_msginfo *target_msginfo;
4174     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4175         return -TARGET_EFAULT;
4176     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4177     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4178     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4179     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4180     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4181     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4182     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4183     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4184     unlock_user_struct(target_msginfo, target_addr, 1);
4185     return 0;
4186 }
4187 
4188 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4189 {
4190     struct msqid_ds dsarg;
4191     struct msginfo msginfo;
4192     abi_long ret = -TARGET_EINVAL;
4193 
4194     cmd &= 0xff;
4195 
4196     switch (cmd) {
4197     case IPC_STAT:
4198     case IPC_SET:
4199     case MSG_STAT:
4200         if (target_to_host_msqid_ds(&dsarg,ptr))
4201             return -TARGET_EFAULT;
4202         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4203         if (host_to_target_msqid_ds(ptr,&dsarg))
4204             return -TARGET_EFAULT;
4205         break;
4206     case IPC_RMID:
4207         ret = get_errno(msgctl(msgid, cmd, NULL));
4208         break;
4209     case IPC_INFO:
4210     case MSG_INFO:
4211         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4212         if (host_to_target_msginfo(ptr, &msginfo))
4213             return -TARGET_EFAULT;
4214         break;
4215     }
4216 
4217     return ret;
4218 }
4219 
4220 struct target_msgbuf {
4221     abi_long mtype;
4222     char	mtext[1];
4223 };
4224 
4225 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4226                                  ssize_t msgsz, int msgflg)
4227 {
4228     struct target_msgbuf *target_mb;
4229     struct msgbuf *host_mb;
4230     abi_long ret = 0;
4231 
4232     if (msgsz < 0) {
4233         return -TARGET_EINVAL;
4234     }
4235 
4236     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4237         return -TARGET_EFAULT;
4238     host_mb = g_try_malloc(msgsz + sizeof(long));
4239     if (!host_mb) {
4240         unlock_user_struct(target_mb, msgp, 0);
4241         return -TARGET_ENOMEM;
4242     }
4243     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4244     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4245     ret = -TARGET_ENOSYS;
4246 #ifdef __NR_msgsnd
4247     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4248 #endif
4249 #ifdef __NR_ipc
4250     if (ret == -TARGET_ENOSYS) {
4251 #ifdef __s390x__
4252         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4253                                  host_mb));
4254 #else
4255         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4256                                  host_mb, 0));
4257 #endif
4258     }
4259 #endif
4260     g_free(host_mb);
4261     unlock_user_struct(target_mb, msgp, 0);
4262 
4263     return ret;
4264 }
4265 
4266 #ifdef __NR_ipc
4267 #if defined(__sparc__)
4268 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4269 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4270 #elif defined(__s390x__)
4271 /* The s390 sys_ipc variant has only five parameters.  */
4272 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4273     ((long int[]){(long int)__msgp, __msgtyp})
4274 #else
4275 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4276     ((long int[]){(long int)__msgp, __msgtyp}), 0
4277 #endif
4278 #endif
4279 
4280 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4281                                  ssize_t msgsz, abi_long msgtyp,
4282                                  int msgflg)
4283 {
4284     struct target_msgbuf *target_mb;
4285     char *target_mtext;
4286     struct msgbuf *host_mb;
4287     abi_long ret = 0;
4288 
4289     if (msgsz < 0) {
4290         return -TARGET_EINVAL;
4291     }
4292 
4293     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4294         return -TARGET_EFAULT;
4295 
4296     host_mb = g_try_malloc(msgsz + sizeof(long));
4297     if (!host_mb) {
4298         ret = -TARGET_ENOMEM;
4299         goto end;
4300     }
4301     ret = -TARGET_ENOSYS;
4302 #ifdef __NR_msgrcv
4303     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4304 #endif
4305 #ifdef __NR_ipc
4306     if (ret == -TARGET_ENOSYS) {
4307         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4308                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4309     }
4310 #endif
4311 
4312     if (ret > 0) {
4313         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4314         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4315         if (!target_mtext) {
4316             ret = -TARGET_EFAULT;
4317             goto end;
4318         }
4319         memcpy(target_mb->mtext, host_mb->mtext, ret);
4320         unlock_user(target_mtext, target_mtext_addr, ret);
4321     }
4322 
4323     target_mb->mtype = tswapal(host_mb->mtype);
4324 
4325 end:
4326     if (target_mb)
4327         unlock_user_struct(target_mb, msgp, 1);
4328     g_free(host_mb);
4329     return ret;
4330 }
4331 
4332 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4333                                                abi_ulong target_addr)
4334 {
4335     struct target_shmid_ds *target_sd;
4336 
4337     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4338         return -TARGET_EFAULT;
4339     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4340         return -TARGET_EFAULT;
4341     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4342     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4343     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4344     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4345     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4346     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4347     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4348     unlock_user_struct(target_sd, target_addr, 0);
4349     return 0;
4350 }
4351 
4352 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4353                                                struct shmid_ds *host_sd)
4354 {
4355     struct target_shmid_ds *target_sd;
4356 
4357     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4358         return -TARGET_EFAULT;
4359     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4360         return -TARGET_EFAULT;
4361     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4362     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4363     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4364     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4365     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4366     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4367     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4368     unlock_user_struct(target_sd, target_addr, 1);
4369     return 0;
4370 }
4371 
4372 struct  target_shminfo {
4373     abi_ulong shmmax;
4374     abi_ulong shmmin;
4375     abi_ulong shmmni;
4376     abi_ulong shmseg;
4377     abi_ulong shmall;
4378 };
4379 
4380 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4381                                               struct shminfo *host_shminfo)
4382 {
4383     struct target_shminfo *target_shminfo;
4384     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4385         return -TARGET_EFAULT;
4386     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4387     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4388     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4389     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4390     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4391     unlock_user_struct(target_shminfo, target_addr, 1);
4392     return 0;
4393 }
4394 
4395 struct target_shm_info {
4396     int used_ids;
4397     abi_ulong shm_tot;
4398     abi_ulong shm_rss;
4399     abi_ulong shm_swp;
4400     abi_ulong swap_attempts;
4401     abi_ulong swap_successes;
4402 };
4403 
4404 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4405                                                struct shm_info *host_shm_info)
4406 {
4407     struct target_shm_info *target_shm_info;
4408     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4409         return -TARGET_EFAULT;
4410     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4411     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4412     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4413     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4414     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4415     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4416     unlock_user_struct(target_shm_info, target_addr, 1);
4417     return 0;
4418 }
4419 
4420 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4421 {
4422     struct shmid_ds dsarg;
4423     struct shminfo shminfo;
4424     struct shm_info shm_info;
4425     abi_long ret = -TARGET_EINVAL;
4426 
4427     cmd &= 0xff;
4428 
4429     switch(cmd) {
4430     case IPC_STAT:
4431     case IPC_SET:
4432     case SHM_STAT:
4433         if (target_to_host_shmid_ds(&dsarg, buf))
4434             return -TARGET_EFAULT;
4435         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4436         if (host_to_target_shmid_ds(buf, &dsarg))
4437             return -TARGET_EFAULT;
4438         break;
4439     case IPC_INFO:
4440         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4441         if (host_to_target_shminfo(buf, &shminfo))
4442             return -TARGET_EFAULT;
4443         break;
4444     case SHM_INFO:
4445         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4446         if (host_to_target_shm_info(buf, &shm_info))
4447             return -TARGET_EFAULT;
4448         break;
4449     case IPC_RMID:
4450     case SHM_LOCK:
4451     case SHM_UNLOCK:
4452         ret = get_errno(shmctl(shmid, cmd, NULL));
4453         break;
4454     }
4455 
4456     return ret;
4457 }
4458 
4459 #ifndef TARGET_FORCE_SHMLBA
4460 /* For most architectures, SHMLBA is the same as the page size;
4461  * some architectures have larger values, in which case they should
4462  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4463  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4464  * and defining its own value for SHMLBA.
4465  *
4466  * The kernel also permits SHMLBA to be set by the architecture to a
4467  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4468  * this means that addresses are rounded to the large size if
4469  * SHM_RND is set but addresses not aligned to that size are not rejected
4470  * as long as they are at least page-aligned. Since the only architecture
4471  * which uses this is ia64 this code doesn't provide for that oddity.
4472  */
4473 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4474 {
4475     return TARGET_PAGE_SIZE;
4476 }
4477 #endif
4478 
4479 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4480                                  int shmid, abi_ulong shmaddr, int shmflg)
4481 {
4482     CPUState *cpu = env_cpu(cpu_env);
4483     abi_long raddr;
4484     void *host_raddr;
4485     struct shmid_ds shm_info;
4486     int i,ret;
4487     abi_ulong shmlba;
4488 
4489     /* shmat pointers are always untagged */
4490 
4491     /* find out the length of the shared memory segment */
4492     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4493     if (is_error(ret)) {
4494         /* can't get length, bail out */
4495         return ret;
4496     }
4497 
4498     shmlba = target_shmlba(cpu_env);
4499 
4500     if (shmaddr & (shmlba - 1)) {
4501         if (shmflg & SHM_RND) {
4502             shmaddr &= ~(shmlba - 1);
4503         } else {
4504             return -TARGET_EINVAL;
4505         }
4506     }
4507     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4508         return -TARGET_EINVAL;
4509     }
4510 
4511     mmap_lock();
4512 
4513     /*
4514      * We're mapping shared memory, so ensure we generate code for parallel
4515      * execution and flush old translations.  This will work up to the level
4516      * supported by the host -- anything that requires EXCP_ATOMIC will not
4517      * be atomic with respect to an external process.
4518      */
4519     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4520         cpu->tcg_cflags |= CF_PARALLEL;
4521         tb_flush(cpu);
4522     }
4523 
4524     if (shmaddr)
4525         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4526     else {
4527         abi_ulong mmap_start;
4528 
4529         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4530         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4531 
4532         if (mmap_start == -1) {
4533             errno = ENOMEM;
4534             host_raddr = (void *)-1;
4535         } else
4536             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4537                                shmflg | SHM_REMAP);
4538     }
4539 
4540     if (host_raddr == (void *)-1) {
4541         mmap_unlock();
4542         return get_errno((long)host_raddr);
4543     }
4544     raddr=h2g((unsigned long)host_raddr);
4545 
4546     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4547                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4548                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4549 
4550     for (i = 0; i < N_SHM_REGIONS; i++) {
4551         if (!shm_regions[i].in_use) {
4552             shm_regions[i].in_use = true;
4553             shm_regions[i].start = raddr;
4554             shm_regions[i].size = shm_info.shm_segsz;
4555             break;
4556         }
4557     }
4558 
4559     mmap_unlock();
4560     return raddr;
4561 
4562 }
4563 
4564 static inline abi_long do_shmdt(abi_ulong shmaddr)
4565 {
4566     int i;
4567     abi_long rv;
4568 
4569     /* shmdt pointers are always untagged */
4570 
4571     mmap_lock();
4572 
4573     for (i = 0; i < N_SHM_REGIONS; ++i) {
4574         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4575             shm_regions[i].in_use = false;
4576             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4577             break;
4578         }
4579     }
4580     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4581 
4582     mmap_unlock();
4583 
4584     return rv;
4585 }
4586 
4587 #ifdef TARGET_NR_ipc
4588 /* ??? This only works with linear mappings.  */
4589 /* do_ipc() must return target values and target errnos. */
4590 static abi_long do_ipc(CPUArchState *cpu_env,
4591                        unsigned int call, abi_long first,
4592                        abi_long second, abi_long third,
4593                        abi_long ptr, abi_long fifth)
4594 {
4595     int version;
4596     abi_long ret = 0;
4597 
4598     version = call >> 16;
4599     call &= 0xffff;
4600 
4601     switch (call) {
4602     case IPCOP_semop:
4603         ret = do_semtimedop(first, ptr, second, 0, false);
4604         break;
4605     case IPCOP_semtimedop:
4606     /*
4607      * The s390 sys_ipc variant has only five parameters instead of six
4608      * (as for default variant) and the only difference is the handling of
4609      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4610      * to a struct timespec where the generic variant uses fifth parameter.
4611      */
4612 #if defined(TARGET_S390X)
4613         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4614 #else
4615         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4616 #endif
4617         break;
4618 
4619     case IPCOP_semget:
4620         ret = get_errno(semget(first, second, third));
4621         break;
4622 
4623     case IPCOP_semctl: {
4624         /* The semun argument to semctl is passed by value, so dereference the
4625          * ptr argument. */
4626         abi_ulong atptr;
4627         get_user_ual(atptr, ptr);
4628         ret = do_semctl(first, second, third, atptr);
4629         break;
4630     }
4631 
4632     case IPCOP_msgget:
4633         ret = get_errno(msgget(first, second));
4634         break;
4635 
4636     case IPCOP_msgsnd:
4637         ret = do_msgsnd(first, ptr, second, third);
4638         break;
4639 
4640     case IPCOP_msgctl:
4641         ret = do_msgctl(first, second, ptr);
4642         break;
4643 
4644     case IPCOP_msgrcv:
4645         switch (version) {
4646         case 0:
4647             {
4648                 struct target_ipc_kludge {
4649                     abi_long msgp;
4650                     abi_long msgtyp;
4651                 } *tmp;
4652 
4653                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4654                     ret = -TARGET_EFAULT;
4655                     break;
4656                 }
4657 
4658                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4659 
4660                 unlock_user_struct(tmp, ptr, 0);
4661                 break;
4662             }
4663         default:
4664             ret = do_msgrcv(first, ptr, second, fifth, third);
4665         }
4666         break;
4667 
4668     case IPCOP_shmat:
4669         switch (version) {
4670         default:
4671         {
4672             abi_ulong raddr;
4673             raddr = do_shmat(cpu_env, first, ptr, second);
4674             if (is_error(raddr))
4675                 return get_errno(raddr);
4676             if (put_user_ual(raddr, third))
4677                 return -TARGET_EFAULT;
4678             break;
4679         }
4680         case 1:
4681             ret = -TARGET_EINVAL;
4682             break;
4683         }
4684 	break;
4685     case IPCOP_shmdt:
4686         ret = do_shmdt(ptr);
4687 	break;
4688 
4689     case IPCOP_shmget:
4690 	/* IPC_* flag values are the same on all linux platforms */
4691 	ret = get_errno(shmget(first, second, third));
4692 	break;
4693 
4694 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4695     case IPCOP_shmctl:
4696         ret = do_shmctl(first, second, ptr);
4697         break;
4698     default:
4699         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4700                       call, version);
4701 	ret = -TARGET_ENOSYS;
4702 	break;
4703     }
4704     return ret;
4705 }
4706 #endif
4707 
4708 /* kernel structure types definitions */
4709 
4710 #define STRUCT(name, ...) STRUCT_ ## name,
4711 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4712 enum {
4713 #include "syscall_types.h"
4714 STRUCT_MAX
4715 };
4716 #undef STRUCT
4717 #undef STRUCT_SPECIAL
4718 
4719 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4720 #define STRUCT_SPECIAL(name)
4721 #include "syscall_types.h"
4722 #undef STRUCT
4723 #undef STRUCT_SPECIAL
4724 
4725 #define MAX_STRUCT_SIZE 4096
4726 
4727 #ifdef CONFIG_FIEMAP
4728 /* So fiemap access checks don't overflow on 32 bit systems.
4729  * This is very slightly smaller than the limit imposed by
4730  * the underlying kernel.
4731  */
4732 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4733                             / sizeof(struct fiemap_extent))
4734 
4735 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4736                                        int fd, int cmd, abi_long arg)
4737 {
4738     /* The parameter for this ioctl is a struct fiemap followed
4739      * by an array of struct fiemap_extent whose size is set
4740      * in fiemap->fm_extent_count. The array is filled in by the
4741      * ioctl.
4742      */
4743     int target_size_in, target_size_out;
4744     struct fiemap *fm;
4745     const argtype *arg_type = ie->arg_type;
4746     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4747     void *argptr, *p;
4748     abi_long ret;
4749     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4750     uint32_t outbufsz;
4751     int free_fm = 0;
4752 
4753     assert(arg_type[0] == TYPE_PTR);
4754     assert(ie->access == IOC_RW);
4755     arg_type++;
4756     target_size_in = thunk_type_size(arg_type, 0);
4757     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4758     if (!argptr) {
4759         return -TARGET_EFAULT;
4760     }
4761     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4762     unlock_user(argptr, arg, 0);
4763     fm = (struct fiemap *)buf_temp;
4764     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4765         return -TARGET_EINVAL;
4766     }
4767 
4768     outbufsz = sizeof (*fm) +
4769         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4770 
4771     if (outbufsz > MAX_STRUCT_SIZE) {
4772         /* We can't fit all the extents into the fixed size buffer.
4773          * Allocate one that is large enough and use it instead.
4774          */
4775         fm = g_try_malloc(outbufsz);
4776         if (!fm) {
4777             return -TARGET_ENOMEM;
4778         }
4779         memcpy(fm, buf_temp, sizeof(struct fiemap));
4780         free_fm = 1;
4781     }
4782     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4783     if (!is_error(ret)) {
4784         target_size_out = target_size_in;
4785         /* An extent_count of 0 means we were only counting the extents
4786          * so there are no structs to copy
4787          */
4788         if (fm->fm_extent_count != 0) {
4789             target_size_out += fm->fm_mapped_extents * extent_size;
4790         }
4791         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4792         if (!argptr) {
4793             ret = -TARGET_EFAULT;
4794         } else {
4795             /* Convert the struct fiemap */
4796             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4797             if (fm->fm_extent_count != 0) {
4798                 p = argptr + target_size_in;
4799                 /* ...and then all the struct fiemap_extents */
4800                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4801                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4802                                   THUNK_TARGET);
4803                     p += extent_size;
4804                 }
4805             }
4806             unlock_user(argptr, arg, target_size_out);
4807         }
4808     }
4809     if (free_fm) {
4810         g_free(fm);
4811     }
4812     return ret;
4813 }
4814 #endif
4815 
4816 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4817                                 int fd, int cmd, abi_long arg)
4818 {
4819     const argtype *arg_type = ie->arg_type;
4820     int target_size;
4821     void *argptr;
4822     int ret;
4823     struct ifconf *host_ifconf;
4824     uint32_t outbufsz;
4825     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4826     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4827     int target_ifreq_size;
4828     int nb_ifreq;
4829     int free_buf = 0;
4830     int i;
4831     int target_ifc_len;
4832     abi_long target_ifc_buf;
4833     int host_ifc_len;
4834     char *host_ifc_buf;
4835 
4836     assert(arg_type[0] == TYPE_PTR);
4837     assert(ie->access == IOC_RW);
4838 
4839     arg_type++;
4840     target_size = thunk_type_size(arg_type, 0);
4841 
4842     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4843     if (!argptr)
4844         return -TARGET_EFAULT;
4845     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4846     unlock_user(argptr, arg, 0);
4847 
4848     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4849     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4850     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4851 
4852     if (target_ifc_buf != 0) {
4853         target_ifc_len = host_ifconf->ifc_len;
4854         nb_ifreq = target_ifc_len / target_ifreq_size;
4855         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4856 
4857         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4858         if (outbufsz > MAX_STRUCT_SIZE) {
4859             /*
4860              * We can't fit all the extents into the fixed size buffer.
4861              * Allocate one that is large enough and use it instead.
4862              */
4863             host_ifconf = malloc(outbufsz);
4864             if (!host_ifconf) {
4865                 return -TARGET_ENOMEM;
4866             }
4867             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4868             free_buf = 1;
4869         }
4870         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4871 
4872         host_ifconf->ifc_len = host_ifc_len;
4873     } else {
4874       host_ifc_buf = NULL;
4875     }
4876     host_ifconf->ifc_buf = host_ifc_buf;
4877 
4878     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4879     if (!is_error(ret)) {
4880 	/* convert host ifc_len to target ifc_len */
4881 
4882         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4883         target_ifc_len = nb_ifreq * target_ifreq_size;
4884         host_ifconf->ifc_len = target_ifc_len;
4885 
4886 	/* restore target ifc_buf */
4887 
4888         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4889 
4890 	/* copy struct ifconf to target user */
4891 
4892         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4893         if (!argptr)
4894             return -TARGET_EFAULT;
4895         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4896         unlock_user(argptr, arg, target_size);
4897 
4898         if (target_ifc_buf != 0) {
4899             /* copy ifreq[] to target user */
4900             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4901             for (i = 0; i < nb_ifreq ; i++) {
4902                 thunk_convert(argptr + i * target_ifreq_size,
4903                               host_ifc_buf + i * sizeof(struct ifreq),
4904                               ifreq_arg_type, THUNK_TARGET);
4905             }
4906             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4907         }
4908     }
4909 
4910     if (free_buf) {
4911         free(host_ifconf);
4912     }
4913 
4914     return ret;
4915 }
4916 
4917 #if defined(CONFIG_USBFS)
4918 #if HOST_LONG_BITS > 64
4919 #error USBDEVFS thunks do not support >64 bit hosts yet.
4920 #endif
4921 struct live_urb {
4922     uint64_t target_urb_adr;
4923     uint64_t target_buf_adr;
4924     char *target_buf_ptr;
4925     struct usbdevfs_urb host_urb;
4926 };
4927 
4928 static GHashTable *usbdevfs_urb_hashtable(void)
4929 {
4930     static GHashTable *urb_hashtable;
4931 
4932     if (!urb_hashtable) {
4933         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4934     }
4935     return urb_hashtable;
4936 }
4937 
4938 static void urb_hashtable_insert(struct live_urb *urb)
4939 {
4940     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4941     g_hash_table_insert(urb_hashtable, urb, urb);
4942 }
4943 
4944 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4945 {
4946     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4947     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4948 }
4949 
4950 static void urb_hashtable_remove(struct live_urb *urb)
4951 {
4952     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4953     g_hash_table_remove(urb_hashtable, urb);
4954 }
4955 
4956 static abi_long
4957 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4958                           int fd, int cmd, abi_long arg)
4959 {
4960     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4961     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4962     struct live_urb *lurb;
4963     void *argptr;
4964     uint64_t hurb;
4965     int target_size;
4966     uintptr_t target_urb_adr;
4967     abi_long ret;
4968 
4969     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4970 
4971     memset(buf_temp, 0, sizeof(uint64_t));
4972     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4973     if (is_error(ret)) {
4974         return ret;
4975     }
4976 
4977     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4978     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4979     if (!lurb->target_urb_adr) {
4980         return -TARGET_EFAULT;
4981     }
4982     urb_hashtable_remove(lurb);
4983     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4984         lurb->host_urb.buffer_length);
4985     lurb->target_buf_ptr = NULL;
4986 
4987     /* restore the guest buffer pointer */
4988     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4989 
4990     /* update the guest urb struct */
4991     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4992     if (!argptr) {
4993         g_free(lurb);
4994         return -TARGET_EFAULT;
4995     }
4996     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4997     unlock_user(argptr, lurb->target_urb_adr, target_size);
4998 
4999     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5000     /* write back the urb handle */
5001     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5002     if (!argptr) {
5003         g_free(lurb);
5004         return -TARGET_EFAULT;
5005     }
5006 
5007     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5008     target_urb_adr = lurb->target_urb_adr;
5009     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5010     unlock_user(argptr, arg, target_size);
5011 
5012     g_free(lurb);
5013     return ret;
5014 }
5015 
5016 static abi_long
5017 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5018                              uint8_t *buf_temp __attribute__((unused)),
5019                              int fd, int cmd, abi_long arg)
5020 {
5021     struct live_urb *lurb;
5022 
5023     /* map target address back to host URB with metadata. */
5024     lurb = urb_hashtable_lookup(arg);
5025     if (!lurb) {
5026         return -TARGET_EFAULT;
5027     }
5028     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5029 }
5030 
5031 static abi_long
5032 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5033                             int fd, int cmd, abi_long arg)
5034 {
5035     const argtype *arg_type = ie->arg_type;
5036     int target_size;
5037     abi_long ret;
5038     void *argptr;
5039     int rw_dir;
5040     struct live_urb *lurb;
5041 
5042     /*
5043      * each submitted URB needs to map to a unique ID for the
5044      * kernel, and that unique ID needs to be a pointer to
5045      * host memory.  hence, we need to malloc for each URB.
5046      * isochronous transfers have a variable length struct.
5047      */
5048     arg_type++;
5049     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5050 
5051     /* construct host copy of urb and metadata */
5052     lurb = g_try_malloc0(sizeof(struct live_urb));
5053     if (!lurb) {
5054         return -TARGET_ENOMEM;
5055     }
5056 
5057     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5058     if (!argptr) {
5059         g_free(lurb);
5060         return -TARGET_EFAULT;
5061     }
5062     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5063     unlock_user(argptr, arg, 0);
5064 
5065     lurb->target_urb_adr = arg;
5066     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5067 
5068     /* buffer space used depends on endpoint type so lock the entire buffer */
5069     /* control type urbs should check the buffer contents for true direction */
5070     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5071     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5072         lurb->host_urb.buffer_length, 1);
5073     if (lurb->target_buf_ptr == NULL) {
5074         g_free(lurb);
5075         return -TARGET_EFAULT;
5076     }
5077 
5078     /* update buffer pointer in host copy */
5079     lurb->host_urb.buffer = lurb->target_buf_ptr;
5080 
5081     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5082     if (is_error(ret)) {
5083         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5084         g_free(lurb);
5085     } else {
5086         urb_hashtable_insert(lurb);
5087     }
5088 
5089     return ret;
5090 }
5091 #endif /* CONFIG_USBFS */
5092 
5093 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5094                             int cmd, abi_long arg)
5095 {
5096     void *argptr;
5097     struct dm_ioctl *host_dm;
5098     abi_long guest_data;
5099     uint32_t guest_data_size;
5100     int target_size;
5101     const argtype *arg_type = ie->arg_type;
5102     abi_long ret;
5103     void *big_buf = NULL;
5104     char *host_data;
5105 
5106     arg_type++;
5107     target_size = thunk_type_size(arg_type, 0);
5108     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5109     if (!argptr) {
5110         ret = -TARGET_EFAULT;
5111         goto out;
5112     }
5113     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5114     unlock_user(argptr, arg, 0);
5115 
5116     /* buf_temp is too small, so fetch things into a bigger buffer */
5117     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5118     memcpy(big_buf, buf_temp, target_size);
5119     buf_temp = big_buf;
5120     host_dm = big_buf;
5121 
5122     guest_data = arg + host_dm->data_start;
5123     if ((guest_data - arg) < 0) {
5124         ret = -TARGET_EINVAL;
5125         goto out;
5126     }
5127     guest_data_size = host_dm->data_size - host_dm->data_start;
5128     host_data = (char*)host_dm + host_dm->data_start;
5129 
5130     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5131     if (!argptr) {
5132         ret = -TARGET_EFAULT;
5133         goto out;
5134     }
5135 
5136     switch (ie->host_cmd) {
5137     case DM_REMOVE_ALL:
5138     case DM_LIST_DEVICES:
5139     case DM_DEV_CREATE:
5140     case DM_DEV_REMOVE:
5141     case DM_DEV_SUSPEND:
5142     case DM_DEV_STATUS:
5143     case DM_DEV_WAIT:
5144     case DM_TABLE_STATUS:
5145     case DM_TABLE_CLEAR:
5146     case DM_TABLE_DEPS:
5147     case DM_LIST_VERSIONS:
5148         /* no input data */
5149         break;
5150     case DM_DEV_RENAME:
5151     case DM_DEV_SET_GEOMETRY:
5152         /* data contains only strings */
5153         memcpy(host_data, argptr, guest_data_size);
5154         break;
5155     case DM_TARGET_MSG:
5156         memcpy(host_data, argptr, guest_data_size);
5157         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5158         break;
5159     case DM_TABLE_LOAD:
5160     {
5161         void *gspec = argptr;
5162         void *cur_data = host_data;
5163         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5164         int spec_size = thunk_type_size(arg_type, 0);
5165         int i;
5166 
5167         for (i = 0; i < host_dm->target_count; i++) {
5168             struct dm_target_spec *spec = cur_data;
5169             uint32_t next;
5170             int slen;
5171 
5172             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5173             slen = strlen((char*)gspec + spec_size) + 1;
5174             next = spec->next;
5175             spec->next = sizeof(*spec) + slen;
5176             strcpy((char*)&spec[1], gspec + spec_size);
5177             gspec += next;
5178             cur_data += spec->next;
5179         }
5180         break;
5181     }
5182     default:
5183         ret = -TARGET_EINVAL;
5184         unlock_user(argptr, guest_data, 0);
5185         goto out;
5186     }
5187     unlock_user(argptr, guest_data, 0);
5188 
5189     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5190     if (!is_error(ret)) {
5191         guest_data = arg + host_dm->data_start;
5192         guest_data_size = host_dm->data_size - host_dm->data_start;
5193         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5194         switch (ie->host_cmd) {
5195         case DM_REMOVE_ALL:
5196         case DM_DEV_CREATE:
5197         case DM_DEV_REMOVE:
5198         case DM_DEV_RENAME:
5199         case DM_DEV_SUSPEND:
5200         case DM_DEV_STATUS:
5201         case DM_TABLE_LOAD:
5202         case DM_TABLE_CLEAR:
5203         case DM_TARGET_MSG:
5204         case DM_DEV_SET_GEOMETRY:
5205             /* no return data */
5206             break;
5207         case DM_LIST_DEVICES:
5208         {
5209             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5210             uint32_t remaining_data = guest_data_size;
5211             void *cur_data = argptr;
5212             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5213             int nl_size = 12; /* can't use thunk_size due to alignment */
5214 
5215             while (1) {
5216                 uint32_t next = nl->next;
5217                 if (next) {
5218                     nl->next = nl_size + (strlen(nl->name) + 1);
5219                 }
5220                 if (remaining_data < nl->next) {
5221                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5222                     break;
5223                 }
5224                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5225                 strcpy(cur_data + nl_size, nl->name);
5226                 cur_data += nl->next;
5227                 remaining_data -= nl->next;
5228                 if (!next) {
5229                     break;
5230                 }
5231                 nl = (void*)nl + next;
5232             }
5233             break;
5234         }
5235         case DM_DEV_WAIT:
5236         case DM_TABLE_STATUS:
5237         {
5238             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5239             void *cur_data = argptr;
5240             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5241             int spec_size = thunk_type_size(arg_type, 0);
5242             int i;
5243 
5244             for (i = 0; i < host_dm->target_count; i++) {
5245                 uint32_t next = spec->next;
5246                 int slen = strlen((char*)&spec[1]) + 1;
5247                 spec->next = (cur_data - argptr) + spec_size + slen;
5248                 if (guest_data_size < spec->next) {
5249                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5250                     break;
5251                 }
5252                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5253                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5254                 cur_data = argptr + spec->next;
5255                 spec = (void*)host_dm + host_dm->data_start + next;
5256             }
5257             break;
5258         }
5259         case DM_TABLE_DEPS:
5260         {
5261             void *hdata = (void*)host_dm + host_dm->data_start;
5262             int count = *(uint32_t*)hdata;
5263             uint64_t *hdev = hdata + 8;
5264             uint64_t *gdev = argptr + 8;
5265             int i;
5266 
5267             *(uint32_t*)argptr = tswap32(count);
5268             for (i = 0; i < count; i++) {
5269                 *gdev = tswap64(*hdev);
5270                 gdev++;
5271                 hdev++;
5272             }
5273             break;
5274         }
5275         case DM_LIST_VERSIONS:
5276         {
5277             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5278             uint32_t remaining_data = guest_data_size;
5279             void *cur_data = argptr;
5280             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5281             int vers_size = thunk_type_size(arg_type, 0);
5282 
5283             while (1) {
5284                 uint32_t next = vers->next;
5285                 if (next) {
5286                     vers->next = vers_size + (strlen(vers->name) + 1);
5287                 }
5288                 if (remaining_data < vers->next) {
5289                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5290                     break;
5291                 }
5292                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5293                 strcpy(cur_data + vers_size, vers->name);
5294                 cur_data += vers->next;
5295                 remaining_data -= vers->next;
5296                 if (!next) {
5297                     break;
5298                 }
5299                 vers = (void*)vers + next;
5300             }
5301             break;
5302         }
5303         default:
5304             unlock_user(argptr, guest_data, 0);
5305             ret = -TARGET_EINVAL;
5306             goto out;
5307         }
5308         unlock_user(argptr, guest_data, guest_data_size);
5309 
5310         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5311         if (!argptr) {
5312             ret = -TARGET_EFAULT;
5313             goto out;
5314         }
5315         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5316         unlock_user(argptr, arg, target_size);
5317     }
5318 out:
5319     g_free(big_buf);
5320     return ret;
5321 }
5322 
5323 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5324                                int cmd, abi_long arg)
5325 {
5326     void *argptr;
5327     int target_size;
5328     const argtype *arg_type = ie->arg_type;
5329     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5330     abi_long ret;
5331 
5332     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5333     struct blkpg_partition host_part;
5334 
5335     /* Read and convert blkpg */
5336     arg_type++;
5337     target_size = thunk_type_size(arg_type, 0);
5338     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5339     if (!argptr) {
5340         ret = -TARGET_EFAULT;
5341         goto out;
5342     }
5343     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5344     unlock_user(argptr, arg, 0);
5345 
5346     switch (host_blkpg->op) {
5347     case BLKPG_ADD_PARTITION:
5348     case BLKPG_DEL_PARTITION:
5349         /* payload is struct blkpg_partition */
5350         break;
5351     default:
5352         /* Unknown opcode */
5353         ret = -TARGET_EINVAL;
5354         goto out;
5355     }
5356 
5357     /* Read and convert blkpg->data */
5358     arg = (abi_long)(uintptr_t)host_blkpg->data;
5359     target_size = thunk_type_size(part_arg_type, 0);
5360     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5361     if (!argptr) {
5362         ret = -TARGET_EFAULT;
5363         goto out;
5364     }
5365     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5366     unlock_user(argptr, arg, 0);
5367 
5368     /* Swizzle the data pointer to our local copy and call! */
5369     host_blkpg->data = &host_part;
5370     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5371 
5372 out:
5373     return ret;
5374 }
5375 
5376 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5377                                 int fd, int cmd, abi_long arg)
5378 {
5379     const argtype *arg_type = ie->arg_type;
5380     const StructEntry *se;
5381     const argtype *field_types;
5382     const int *dst_offsets, *src_offsets;
5383     int target_size;
5384     void *argptr;
5385     abi_ulong *target_rt_dev_ptr = NULL;
5386     unsigned long *host_rt_dev_ptr = NULL;
5387     abi_long ret;
5388     int i;
5389 
5390     assert(ie->access == IOC_W);
5391     assert(*arg_type == TYPE_PTR);
5392     arg_type++;
5393     assert(*arg_type == TYPE_STRUCT);
5394     target_size = thunk_type_size(arg_type, 0);
5395     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5396     if (!argptr) {
5397         return -TARGET_EFAULT;
5398     }
5399     arg_type++;
5400     assert(*arg_type == (int)STRUCT_rtentry);
5401     se = struct_entries + *arg_type++;
5402     assert(se->convert[0] == NULL);
5403     /* convert struct here to be able to catch rt_dev string */
5404     field_types = se->field_types;
5405     dst_offsets = se->field_offsets[THUNK_HOST];
5406     src_offsets = se->field_offsets[THUNK_TARGET];
5407     for (i = 0; i < se->nb_fields; i++) {
5408         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5409             assert(*field_types == TYPE_PTRVOID);
5410             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5411             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5412             if (*target_rt_dev_ptr != 0) {
5413                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5414                                                   tswapal(*target_rt_dev_ptr));
5415                 if (!*host_rt_dev_ptr) {
5416                     unlock_user(argptr, arg, 0);
5417                     return -TARGET_EFAULT;
5418                 }
5419             } else {
5420                 *host_rt_dev_ptr = 0;
5421             }
5422             field_types++;
5423             continue;
5424         }
5425         field_types = thunk_convert(buf_temp + dst_offsets[i],
5426                                     argptr + src_offsets[i],
5427                                     field_types, THUNK_HOST);
5428     }
5429     unlock_user(argptr, arg, 0);
5430 
5431     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5432 
5433     assert(host_rt_dev_ptr != NULL);
5434     assert(target_rt_dev_ptr != NULL);
5435     if (*host_rt_dev_ptr != 0) {
5436         unlock_user((void *)*host_rt_dev_ptr,
5437                     *target_rt_dev_ptr, 0);
5438     }
5439     return ret;
5440 }
5441 
5442 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5443                                      int fd, int cmd, abi_long arg)
5444 {
5445     int sig = target_to_host_signal(arg);
5446     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5447 }
5448 
5449 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5450                                     int fd, int cmd, abi_long arg)
5451 {
5452     struct timeval tv;
5453     abi_long ret;
5454 
5455     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5456     if (is_error(ret)) {
5457         return ret;
5458     }
5459 
5460     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5461         if (copy_to_user_timeval(arg, &tv)) {
5462             return -TARGET_EFAULT;
5463         }
5464     } else {
5465         if (copy_to_user_timeval64(arg, &tv)) {
5466             return -TARGET_EFAULT;
5467         }
5468     }
5469 
5470     return ret;
5471 }
5472 
5473 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5474                                       int fd, int cmd, abi_long arg)
5475 {
5476     struct timespec ts;
5477     abi_long ret;
5478 
5479     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5480     if (is_error(ret)) {
5481         return ret;
5482     }
5483 
5484     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5485         if (host_to_target_timespec(arg, &ts)) {
5486             return -TARGET_EFAULT;
5487         }
5488     } else{
5489         if (host_to_target_timespec64(arg, &ts)) {
5490             return -TARGET_EFAULT;
5491         }
5492     }
5493 
5494     return ret;
5495 }
5496 
5497 #ifdef TIOCGPTPEER
5498 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5499                                      int fd, int cmd, abi_long arg)
5500 {
5501     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5502     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5503 }
5504 #endif
5505 
5506 #ifdef HAVE_DRM_H
5507 
5508 static void unlock_drm_version(struct drm_version *host_ver,
5509                                struct target_drm_version *target_ver,
5510                                bool copy)
5511 {
5512     unlock_user(host_ver->name, target_ver->name,
5513                                 copy ? host_ver->name_len : 0);
5514     unlock_user(host_ver->date, target_ver->date,
5515                                 copy ? host_ver->date_len : 0);
5516     unlock_user(host_ver->desc, target_ver->desc,
5517                                 copy ? host_ver->desc_len : 0);
5518 }
5519 
5520 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5521                                           struct target_drm_version *target_ver)
5522 {
5523     memset(host_ver, 0, sizeof(*host_ver));
5524 
5525     __get_user(host_ver->name_len, &target_ver->name_len);
5526     if (host_ver->name_len) {
5527         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5528                                    target_ver->name_len, 0);
5529         if (!host_ver->name) {
5530             return -EFAULT;
5531         }
5532     }
5533 
5534     __get_user(host_ver->date_len, &target_ver->date_len);
5535     if (host_ver->date_len) {
5536         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5537                                    target_ver->date_len, 0);
5538         if (!host_ver->date) {
5539             goto err;
5540         }
5541     }
5542 
5543     __get_user(host_ver->desc_len, &target_ver->desc_len);
5544     if (host_ver->desc_len) {
5545         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5546                                    target_ver->desc_len, 0);
5547         if (!host_ver->desc) {
5548             goto err;
5549         }
5550     }
5551 
5552     return 0;
5553 err:
5554     unlock_drm_version(host_ver, target_ver, false);
5555     return -EFAULT;
5556 }
5557 
5558 static inline void host_to_target_drmversion(
5559                                           struct target_drm_version *target_ver,
5560                                           struct drm_version *host_ver)
5561 {
5562     __put_user(host_ver->version_major, &target_ver->version_major);
5563     __put_user(host_ver->version_minor, &target_ver->version_minor);
5564     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5565     __put_user(host_ver->name_len, &target_ver->name_len);
5566     __put_user(host_ver->date_len, &target_ver->date_len);
5567     __put_user(host_ver->desc_len, &target_ver->desc_len);
5568     unlock_drm_version(host_ver, target_ver, true);
5569 }
5570 
5571 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5572                              int fd, int cmd, abi_long arg)
5573 {
5574     struct drm_version *ver;
5575     struct target_drm_version *target_ver;
5576     abi_long ret;
5577 
5578     switch (ie->host_cmd) {
5579     case DRM_IOCTL_VERSION:
5580         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5581             return -TARGET_EFAULT;
5582         }
5583         ver = (struct drm_version *)buf_temp;
5584         ret = target_to_host_drmversion(ver, target_ver);
5585         if (!is_error(ret)) {
5586             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5587             if (is_error(ret)) {
5588                 unlock_drm_version(ver, target_ver, false);
5589             } else {
5590                 host_to_target_drmversion(target_ver, ver);
5591             }
5592         }
5593         unlock_user_struct(target_ver, arg, 0);
5594         return ret;
5595     }
5596     return -TARGET_ENOSYS;
5597 }
5598 
5599 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5600                                            struct drm_i915_getparam *gparam,
5601                                            int fd, abi_long arg)
5602 {
5603     abi_long ret;
5604     int value;
5605     struct target_drm_i915_getparam *target_gparam;
5606 
5607     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5608         return -TARGET_EFAULT;
5609     }
5610 
5611     __get_user(gparam->param, &target_gparam->param);
5612     gparam->value = &value;
5613     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5614     put_user_s32(value, target_gparam->value);
5615 
5616     unlock_user_struct(target_gparam, arg, 0);
5617     return ret;
5618 }
5619 
5620 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5621                                   int fd, int cmd, abi_long arg)
5622 {
5623     switch (ie->host_cmd) {
5624     case DRM_IOCTL_I915_GETPARAM:
5625         return do_ioctl_drm_i915_getparam(ie,
5626                                           (struct drm_i915_getparam *)buf_temp,
5627                                           fd, arg);
5628     default:
5629         return -TARGET_ENOSYS;
5630     }
5631 }
5632 
5633 #endif
5634 
5635 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5636                                         int fd, int cmd, abi_long arg)
5637 {
5638     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5639     struct tun_filter *target_filter;
5640     char *target_addr;
5641 
5642     assert(ie->access == IOC_W);
5643 
5644     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5645     if (!target_filter) {
5646         return -TARGET_EFAULT;
5647     }
5648     filter->flags = tswap16(target_filter->flags);
5649     filter->count = tswap16(target_filter->count);
5650     unlock_user(target_filter, arg, 0);
5651 
5652     if (filter->count) {
5653         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5654             MAX_STRUCT_SIZE) {
5655             return -TARGET_EFAULT;
5656         }
5657 
5658         target_addr = lock_user(VERIFY_READ,
5659                                 arg + offsetof(struct tun_filter, addr),
5660                                 filter->count * ETH_ALEN, 1);
5661         if (!target_addr) {
5662             return -TARGET_EFAULT;
5663         }
5664         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5665         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5666     }
5667 
5668     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5669 }
5670 
5671 IOCTLEntry ioctl_entries[] = {
5672 #define IOCTL(cmd, access, ...) \
5673     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5674 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5675     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5676 #define IOCTL_IGNORE(cmd) \
5677     { TARGET_ ## cmd, 0, #cmd },
5678 #include "ioctls.h"
5679     { 0, 0, },
5680 };
5681 
5682 /* ??? Implement proper locking for ioctls.  */
5683 /* do_ioctl() Must return target values and target errnos. */
5684 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5685 {
5686     const IOCTLEntry *ie;
5687     const argtype *arg_type;
5688     abi_long ret;
5689     uint8_t buf_temp[MAX_STRUCT_SIZE];
5690     int target_size;
5691     void *argptr;
5692 
5693     ie = ioctl_entries;
5694     for(;;) {
5695         if (ie->target_cmd == 0) {
5696             qemu_log_mask(
5697                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5698             return -TARGET_ENOSYS;
5699         }
5700         if (ie->target_cmd == cmd)
5701             break;
5702         ie++;
5703     }
5704     arg_type = ie->arg_type;
5705     if (ie->do_ioctl) {
5706         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5707     } else if (!ie->host_cmd) {
5708         /* Some architectures define BSD ioctls in their headers
5709            that are not implemented in Linux.  */
5710         return -TARGET_ENOSYS;
5711     }
5712 
5713     switch(arg_type[0]) {
5714     case TYPE_NULL:
5715         /* no argument */
5716         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5717         break;
5718     case TYPE_PTRVOID:
5719     case TYPE_INT:
5720     case TYPE_LONG:
5721     case TYPE_ULONG:
5722         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5723         break;
5724     case TYPE_PTR:
5725         arg_type++;
5726         target_size = thunk_type_size(arg_type, 0);
5727         switch(ie->access) {
5728         case IOC_R:
5729             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5730             if (!is_error(ret)) {
5731                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5732                 if (!argptr)
5733                     return -TARGET_EFAULT;
5734                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5735                 unlock_user(argptr, arg, target_size);
5736             }
5737             break;
5738         case IOC_W:
5739             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5740             if (!argptr)
5741                 return -TARGET_EFAULT;
5742             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5743             unlock_user(argptr, arg, 0);
5744             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5745             break;
5746         default:
5747         case IOC_RW:
5748             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5749             if (!argptr)
5750                 return -TARGET_EFAULT;
5751             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5752             unlock_user(argptr, arg, 0);
5753             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5754             if (!is_error(ret)) {
5755                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5756                 if (!argptr)
5757                     return -TARGET_EFAULT;
5758                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5759                 unlock_user(argptr, arg, target_size);
5760             }
5761             break;
5762         }
5763         break;
5764     default:
5765         qemu_log_mask(LOG_UNIMP,
5766                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5767                       (long)cmd, arg_type[0]);
5768         ret = -TARGET_ENOSYS;
5769         break;
5770     }
5771     return ret;
5772 }
5773 
5774 static const bitmask_transtbl iflag_tbl[] = {
5775         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5776         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5777         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5778         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5779         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5780         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5781         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5782         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5783         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5784         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5785         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5786         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5787         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5788         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5789         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5790         { 0, 0, 0, 0 }
5791 };
5792 
5793 static const bitmask_transtbl oflag_tbl[] = {
5794 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5795 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5796 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5797 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5798 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5799 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5800 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5801 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5802 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5803 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5804 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5805 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5806 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5807 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5808 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5809 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5810 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5811 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5812 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5813 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5814 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5815 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5816 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5817 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5818 	{ 0, 0, 0, 0 }
5819 };
5820 
5821 static const bitmask_transtbl cflag_tbl[] = {
5822 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5823 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5824 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5825 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5826 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5827 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5828 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5829 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5830 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5831 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5832 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5833 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5834 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5835 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5836 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5837 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5838 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5839 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5840 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5841 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5842 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5843 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5844 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5845 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5846 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5847 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5848 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5849 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5850 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5851 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5852 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5853 	{ 0, 0, 0, 0 }
5854 };
5855 
5856 static const bitmask_transtbl lflag_tbl[] = {
5857   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5858   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5859   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5860   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5861   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5862   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5863   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5864   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5865   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5866   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5867   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5868   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5869   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5870   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5871   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5872   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5873   { 0, 0, 0, 0 }
5874 };
5875 
5876 static void target_to_host_termios (void *dst, const void *src)
5877 {
5878     struct host_termios *host = dst;
5879     const struct target_termios *target = src;
5880 
5881     host->c_iflag =
5882         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5883     host->c_oflag =
5884         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5885     host->c_cflag =
5886         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5887     host->c_lflag =
5888         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5889     host->c_line = target->c_line;
5890 
5891     memset(host->c_cc, 0, sizeof(host->c_cc));
5892     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5893     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5894     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5895     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5896     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5897     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5898     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5899     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5900     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5901     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5902     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5903     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5904     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5905     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5906     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5907     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5908     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5909 }
5910 
5911 static void host_to_target_termios (void *dst, const void *src)
5912 {
5913     struct target_termios *target = dst;
5914     const struct host_termios *host = src;
5915 
5916     target->c_iflag =
5917         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5918     target->c_oflag =
5919         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5920     target->c_cflag =
5921         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5922     target->c_lflag =
5923         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5924     target->c_line = host->c_line;
5925 
5926     memset(target->c_cc, 0, sizeof(target->c_cc));
5927     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5928     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5929     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5930     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5931     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5932     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5933     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5934     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5935     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5936     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5937     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5938     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5939     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5940     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5941     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5942     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5943     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5944 }
5945 
5946 static const StructEntry struct_termios_def = {
5947     .convert = { host_to_target_termios, target_to_host_termios },
5948     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5949     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5950     .print = print_termios,
5951 };
5952 
5953 static const bitmask_transtbl mmap_flags_tbl[] = {
5954     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5955     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5956     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5957     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5958       MAP_ANONYMOUS, MAP_ANONYMOUS },
5959     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5960       MAP_GROWSDOWN, MAP_GROWSDOWN },
5961     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5962       MAP_DENYWRITE, MAP_DENYWRITE },
5963     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5964       MAP_EXECUTABLE, MAP_EXECUTABLE },
5965     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5966     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5967       MAP_NORESERVE, MAP_NORESERVE },
5968     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5969     /* MAP_STACK had been ignored by the kernel for quite some time.
5970        Recognize it for the target insofar as we do not want to pass
5971        it through to the host.  */
5972     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5973     { 0, 0, 0, 0 }
5974 };
5975 
5976 /*
5977  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5978  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5979  */
5980 #if defined(TARGET_I386)
5981 
5982 /* NOTE: there is really one LDT for all the threads */
5983 static uint8_t *ldt_table;
5984 
5985 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5986 {
5987     int size;
5988     void *p;
5989 
5990     if (!ldt_table)
5991         return 0;
5992     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5993     if (size > bytecount)
5994         size = bytecount;
5995     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5996     if (!p)
5997         return -TARGET_EFAULT;
5998     /* ??? Should this by byteswapped?  */
5999     memcpy(p, ldt_table, size);
6000     unlock_user(p, ptr, size);
6001     return size;
6002 }
6003 
6004 /* XXX: add locking support */
6005 static abi_long write_ldt(CPUX86State *env,
6006                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6007 {
6008     struct target_modify_ldt_ldt_s ldt_info;
6009     struct target_modify_ldt_ldt_s *target_ldt_info;
6010     int seg_32bit, contents, read_exec_only, limit_in_pages;
6011     int seg_not_present, useable, lm;
6012     uint32_t *lp, entry_1, entry_2;
6013 
6014     if (bytecount != sizeof(ldt_info))
6015         return -TARGET_EINVAL;
6016     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6017         return -TARGET_EFAULT;
6018     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6019     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6020     ldt_info.limit = tswap32(target_ldt_info->limit);
6021     ldt_info.flags = tswap32(target_ldt_info->flags);
6022     unlock_user_struct(target_ldt_info, ptr, 0);
6023 
6024     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6025         return -TARGET_EINVAL;
6026     seg_32bit = ldt_info.flags & 1;
6027     contents = (ldt_info.flags >> 1) & 3;
6028     read_exec_only = (ldt_info.flags >> 3) & 1;
6029     limit_in_pages = (ldt_info.flags >> 4) & 1;
6030     seg_not_present = (ldt_info.flags >> 5) & 1;
6031     useable = (ldt_info.flags >> 6) & 1;
6032 #ifdef TARGET_ABI32
6033     lm = 0;
6034 #else
6035     lm = (ldt_info.flags >> 7) & 1;
6036 #endif
6037     if (contents == 3) {
6038         if (oldmode)
6039             return -TARGET_EINVAL;
6040         if (seg_not_present == 0)
6041             return -TARGET_EINVAL;
6042     }
6043     /* allocate the LDT */
6044     if (!ldt_table) {
6045         env->ldt.base = target_mmap(0,
6046                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6047                                     PROT_READ|PROT_WRITE,
6048                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6049         if (env->ldt.base == -1)
6050             return -TARGET_ENOMEM;
6051         memset(g2h_untagged(env->ldt.base), 0,
6052                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6053         env->ldt.limit = 0xffff;
6054         ldt_table = g2h_untagged(env->ldt.base);
6055     }
6056 
6057     /* NOTE: same code as Linux kernel */
6058     /* Allow LDTs to be cleared by the user. */
6059     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6060         if (oldmode ||
6061             (contents == 0		&&
6062              read_exec_only == 1	&&
6063              seg_32bit == 0		&&
6064              limit_in_pages == 0	&&
6065              seg_not_present == 1	&&
6066              useable == 0 )) {
6067             entry_1 = 0;
6068             entry_2 = 0;
6069             goto install;
6070         }
6071     }
6072 
6073     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6074         (ldt_info.limit & 0x0ffff);
6075     entry_2 = (ldt_info.base_addr & 0xff000000) |
6076         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6077         (ldt_info.limit & 0xf0000) |
6078         ((read_exec_only ^ 1) << 9) |
6079         (contents << 10) |
6080         ((seg_not_present ^ 1) << 15) |
6081         (seg_32bit << 22) |
6082         (limit_in_pages << 23) |
6083         (lm << 21) |
6084         0x7000;
6085     if (!oldmode)
6086         entry_2 |= (useable << 20);
6087 
6088     /* Install the new entry ...  */
6089 install:
6090     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6091     lp[0] = tswap32(entry_1);
6092     lp[1] = tswap32(entry_2);
6093     return 0;
6094 }
6095 
6096 /* specific and weird i386 syscalls */
6097 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6098                               unsigned long bytecount)
6099 {
6100     abi_long ret;
6101 
6102     switch (func) {
6103     case 0:
6104         ret = read_ldt(ptr, bytecount);
6105         break;
6106     case 1:
6107         ret = write_ldt(env, ptr, bytecount, 1);
6108         break;
6109     case 0x11:
6110         ret = write_ldt(env, ptr, bytecount, 0);
6111         break;
6112     default:
6113         ret = -TARGET_ENOSYS;
6114         break;
6115     }
6116     return ret;
6117 }
6118 
6119 #if defined(TARGET_ABI32)
6120 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6121 {
6122     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6123     struct target_modify_ldt_ldt_s ldt_info;
6124     struct target_modify_ldt_ldt_s *target_ldt_info;
6125     int seg_32bit, contents, read_exec_only, limit_in_pages;
6126     int seg_not_present, useable, lm;
6127     uint32_t *lp, entry_1, entry_2;
6128     int i;
6129 
6130     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6131     if (!target_ldt_info)
6132         return -TARGET_EFAULT;
6133     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6134     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6135     ldt_info.limit = tswap32(target_ldt_info->limit);
6136     ldt_info.flags = tswap32(target_ldt_info->flags);
6137     if (ldt_info.entry_number == -1) {
6138         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6139             if (gdt_table[i] == 0) {
6140                 ldt_info.entry_number = i;
6141                 target_ldt_info->entry_number = tswap32(i);
6142                 break;
6143             }
6144         }
6145     }
6146     unlock_user_struct(target_ldt_info, ptr, 1);
6147 
6148     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6149         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6150            return -TARGET_EINVAL;
6151     seg_32bit = ldt_info.flags & 1;
6152     contents = (ldt_info.flags >> 1) & 3;
6153     read_exec_only = (ldt_info.flags >> 3) & 1;
6154     limit_in_pages = (ldt_info.flags >> 4) & 1;
6155     seg_not_present = (ldt_info.flags >> 5) & 1;
6156     useable = (ldt_info.flags >> 6) & 1;
6157 #ifdef TARGET_ABI32
6158     lm = 0;
6159 #else
6160     lm = (ldt_info.flags >> 7) & 1;
6161 #endif
6162 
6163     if (contents == 3) {
6164         if (seg_not_present == 0)
6165             return -TARGET_EINVAL;
6166     }
6167 
6168     /* NOTE: same code as Linux kernel */
6169     /* Allow LDTs to be cleared by the user. */
6170     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6171         if ((contents == 0             &&
6172              read_exec_only == 1       &&
6173              seg_32bit == 0            &&
6174              limit_in_pages == 0       &&
6175              seg_not_present == 1      &&
6176              useable == 0 )) {
6177             entry_1 = 0;
6178             entry_2 = 0;
6179             goto install;
6180         }
6181     }
6182 
6183     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6184         (ldt_info.limit & 0x0ffff);
6185     entry_2 = (ldt_info.base_addr & 0xff000000) |
6186         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6187         (ldt_info.limit & 0xf0000) |
6188         ((read_exec_only ^ 1) << 9) |
6189         (contents << 10) |
6190         ((seg_not_present ^ 1) << 15) |
6191         (seg_32bit << 22) |
6192         (limit_in_pages << 23) |
6193         (useable << 20) |
6194         (lm << 21) |
6195         0x7000;
6196 
6197     /* Install the new entry ...  */
6198 install:
6199     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6200     lp[0] = tswap32(entry_1);
6201     lp[1] = tswap32(entry_2);
6202     return 0;
6203 }
6204 
6205 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6206 {
6207     struct target_modify_ldt_ldt_s *target_ldt_info;
6208     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6209     uint32_t base_addr, limit, flags;
6210     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6211     int seg_not_present, useable, lm;
6212     uint32_t *lp, entry_1, entry_2;
6213 
6214     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6215     if (!target_ldt_info)
6216         return -TARGET_EFAULT;
6217     idx = tswap32(target_ldt_info->entry_number);
6218     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6219         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6220         unlock_user_struct(target_ldt_info, ptr, 1);
6221         return -TARGET_EINVAL;
6222     }
6223     lp = (uint32_t *)(gdt_table + idx);
6224     entry_1 = tswap32(lp[0]);
6225     entry_2 = tswap32(lp[1]);
6226 
6227     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6228     contents = (entry_2 >> 10) & 3;
6229     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6230     seg_32bit = (entry_2 >> 22) & 1;
6231     limit_in_pages = (entry_2 >> 23) & 1;
6232     useable = (entry_2 >> 20) & 1;
6233 #ifdef TARGET_ABI32
6234     lm = 0;
6235 #else
6236     lm = (entry_2 >> 21) & 1;
6237 #endif
6238     flags = (seg_32bit << 0) | (contents << 1) |
6239         (read_exec_only << 3) | (limit_in_pages << 4) |
6240         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6241     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6242     base_addr = (entry_1 >> 16) |
6243         (entry_2 & 0xff000000) |
6244         ((entry_2 & 0xff) << 16);
6245     target_ldt_info->base_addr = tswapal(base_addr);
6246     target_ldt_info->limit = tswap32(limit);
6247     target_ldt_info->flags = tswap32(flags);
6248     unlock_user_struct(target_ldt_info, ptr, 1);
6249     return 0;
6250 }
6251 
6252 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6253 {
6254     return -TARGET_ENOSYS;
6255 }
6256 #else
6257 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6258 {
6259     abi_long ret = 0;
6260     abi_ulong val;
6261     int idx;
6262 
6263     switch(code) {
6264     case TARGET_ARCH_SET_GS:
6265     case TARGET_ARCH_SET_FS:
6266         if (code == TARGET_ARCH_SET_GS)
6267             idx = R_GS;
6268         else
6269             idx = R_FS;
6270         cpu_x86_load_seg(env, idx, 0);
6271         env->segs[idx].base = addr;
6272         break;
6273     case TARGET_ARCH_GET_GS:
6274     case TARGET_ARCH_GET_FS:
6275         if (code == TARGET_ARCH_GET_GS)
6276             idx = R_GS;
6277         else
6278             idx = R_FS;
6279         val = env->segs[idx].base;
6280         if (put_user(val, addr, abi_ulong))
6281             ret = -TARGET_EFAULT;
6282         break;
6283     default:
6284         ret = -TARGET_EINVAL;
6285         break;
6286     }
6287     return ret;
6288 }
6289 #endif /* defined(TARGET_ABI32 */
6290 
6291 #endif /* defined(TARGET_I386) */
6292 
6293 #define NEW_STACK_SIZE 0x40000
6294 
6295 
6296 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6297 typedef struct {
6298     CPUArchState *env;
6299     pthread_mutex_t mutex;
6300     pthread_cond_t cond;
6301     pthread_t thread;
6302     uint32_t tid;
6303     abi_ulong child_tidptr;
6304     abi_ulong parent_tidptr;
6305     sigset_t sigmask;
6306 } new_thread_info;
6307 
6308 static void *clone_func(void *arg)
6309 {
6310     new_thread_info *info = arg;
6311     CPUArchState *env;
6312     CPUState *cpu;
6313     TaskState *ts;
6314 
6315     rcu_register_thread();
6316     tcg_register_thread();
6317     env = info->env;
6318     cpu = env_cpu(env);
6319     thread_cpu = cpu;
6320     ts = (TaskState *)cpu->opaque;
6321     info->tid = sys_gettid();
6322     task_settid(ts);
6323     if (info->child_tidptr)
6324         put_user_u32(info->tid, info->child_tidptr);
6325     if (info->parent_tidptr)
6326         put_user_u32(info->tid, info->parent_tidptr);
6327     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6328     /* Enable signals.  */
6329     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6330     /* Signal to the parent that we're ready.  */
6331     pthread_mutex_lock(&info->mutex);
6332     pthread_cond_broadcast(&info->cond);
6333     pthread_mutex_unlock(&info->mutex);
6334     /* Wait until the parent has finished initializing the tls state.  */
6335     pthread_mutex_lock(&clone_lock);
6336     pthread_mutex_unlock(&clone_lock);
6337     cpu_loop(env);
6338     /* never exits */
6339     return NULL;
6340 }
6341 
6342 /* do_fork() Must return host values and target errnos (unlike most
6343    do_*() functions). */
6344 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6345                    abi_ulong parent_tidptr, target_ulong newtls,
6346                    abi_ulong child_tidptr)
6347 {
6348     CPUState *cpu = env_cpu(env);
6349     int ret;
6350     TaskState *ts;
6351     CPUState *new_cpu;
6352     CPUArchState *new_env;
6353     sigset_t sigmask;
6354 
6355     flags &= ~CLONE_IGNORED_FLAGS;
6356 
6357     /* Emulate vfork() with fork() */
6358     if (flags & CLONE_VFORK)
6359         flags &= ~(CLONE_VFORK | CLONE_VM);
6360 
6361     if (flags & CLONE_VM) {
6362         TaskState *parent_ts = (TaskState *)cpu->opaque;
6363         new_thread_info info;
6364         pthread_attr_t attr;
6365 
6366         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6367             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6368             return -TARGET_EINVAL;
6369         }
6370 
6371         ts = g_new0(TaskState, 1);
6372         init_task_state(ts);
6373 
6374         /* Grab a mutex so that thread setup appears atomic.  */
6375         pthread_mutex_lock(&clone_lock);
6376 
6377         /*
6378          * If this is our first additional thread, we need to ensure we
6379          * generate code for parallel execution and flush old translations.
6380          * Do this now so that the copy gets CF_PARALLEL too.
6381          */
6382         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6383             cpu->tcg_cflags |= CF_PARALLEL;
6384             tb_flush(cpu);
6385         }
6386 
6387         /* we create a new CPU instance. */
6388         new_env = cpu_copy(env);
6389         /* Init regs that differ from the parent.  */
6390         cpu_clone_regs_child(new_env, newsp, flags);
6391         cpu_clone_regs_parent(env, flags);
6392         new_cpu = env_cpu(new_env);
6393         new_cpu->opaque = ts;
6394         ts->bprm = parent_ts->bprm;
6395         ts->info = parent_ts->info;
6396         ts->signal_mask = parent_ts->signal_mask;
6397 
6398         if (flags & CLONE_CHILD_CLEARTID) {
6399             ts->child_tidptr = child_tidptr;
6400         }
6401 
6402         if (flags & CLONE_SETTLS) {
6403             cpu_set_tls (new_env, newtls);
6404         }
6405 
6406         memset(&info, 0, sizeof(info));
6407         pthread_mutex_init(&info.mutex, NULL);
6408         pthread_mutex_lock(&info.mutex);
6409         pthread_cond_init(&info.cond, NULL);
6410         info.env = new_env;
6411         if (flags & CLONE_CHILD_SETTID) {
6412             info.child_tidptr = child_tidptr;
6413         }
6414         if (flags & CLONE_PARENT_SETTID) {
6415             info.parent_tidptr = parent_tidptr;
6416         }
6417 
6418         ret = pthread_attr_init(&attr);
6419         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6420         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6421         /* It is not safe to deliver signals until the child has finished
6422            initializing, so temporarily block all signals.  */
6423         sigfillset(&sigmask);
6424         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6425         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6426 
6427         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6428         /* TODO: Free new CPU state if thread creation failed.  */
6429 
6430         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6431         pthread_attr_destroy(&attr);
6432         if (ret == 0) {
6433             /* Wait for the child to initialize.  */
6434             pthread_cond_wait(&info.cond, &info.mutex);
6435             ret = info.tid;
6436         } else {
6437             ret = -1;
6438         }
6439         pthread_mutex_unlock(&info.mutex);
6440         pthread_cond_destroy(&info.cond);
6441         pthread_mutex_destroy(&info.mutex);
6442         pthread_mutex_unlock(&clone_lock);
6443     } else {
6444         /* if no CLONE_VM, we consider it is a fork */
6445         if (flags & CLONE_INVALID_FORK_FLAGS) {
6446             return -TARGET_EINVAL;
6447         }
6448 
6449         /* We can't support custom termination signals */
6450         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6451             return -TARGET_EINVAL;
6452         }
6453 
6454         if (block_signals()) {
6455             return -TARGET_ERESTARTSYS;
6456         }
6457 
6458         fork_start();
6459         ret = fork();
6460         if (ret == 0) {
6461             /* Child Process.  */
6462             cpu_clone_regs_child(env, newsp, flags);
6463             fork_end(1);
6464             /* There is a race condition here.  The parent process could
6465                theoretically read the TID in the child process before the child
6466                tid is set.  This would require using either ptrace
6467                (not implemented) or having *_tidptr to point at a shared memory
6468                mapping.  We can't repeat the spinlock hack used above because
6469                the child process gets its own copy of the lock.  */
6470             if (flags & CLONE_CHILD_SETTID)
6471                 put_user_u32(sys_gettid(), child_tidptr);
6472             if (flags & CLONE_PARENT_SETTID)
6473                 put_user_u32(sys_gettid(), parent_tidptr);
6474             ts = (TaskState *)cpu->opaque;
6475             if (flags & CLONE_SETTLS)
6476                 cpu_set_tls (env, newtls);
6477             if (flags & CLONE_CHILD_CLEARTID)
6478                 ts->child_tidptr = child_tidptr;
6479         } else {
6480             cpu_clone_regs_parent(env, flags);
6481             fork_end(0);
6482         }
6483     }
6484     return ret;
6485 }
6486 
6487 /* warning : doesn't handle linux specific flags... */
6488 static int target_to_host_fcntl_cmd(int cmd)
6489 {
6490     int ret;
6491 
6492     switch(cmd) {
6493     case TARGET_F_DUPFD:
6494     case TARGET_F_GETFD:
6495     case TARGET_F_SETFD:
6496     case TARGET_F_GETFL:
6497     case TARGET_F_SETFL:
6498     case TARGET_F_OFD_GETLK:
6499     case TARGET_F_OFD_SETLK:
6500     case TARGET_F_OFD_SETLKW:
6501         ret = cmd;
6502         break;
6503     case TARGET_F_GETLK:
6504         ret = F_GETLK64;
6505         break;
6506     case TARGET_F_SETLK:
6507         ret = F_SETLK64;
6508         break;
6509     case TARGET_F_SETLKW:
6510         ret = F_SETLKW64;
6511         break;
6512     case TARGET_F_GETOWN:
6513         ret = F_GETOWN;
6514         break;
6515     case TARGET_F_SETOWN:
6516         ret = F_SETOWN;
6517         break;
6518     case TARGET_F_GETSIG:
6519         ret = F_GETSIG;
6520         break;
6521     case TARGET_F_SETSIG:
6522         ret = F_SETSIG;
6523         break;
6524 #if TARGET_ABI_BITS == 32
6525     case TARGET_F_GETLK64:
6526         ret = F_GETLK64;
6527         break;
6528     case TARGET_F_SETLK64:
6529         ret = F_SETLK64;
6530         break;
6531     case TARGET_F_SETLKW64:
6532         ret = F_SETLKW64;
6533         break;
6534 #endif
6535     case TARGET_F_SETLEASE:
6536         ret = F_SETLEASE;
6537         break;
6538     case TARGET_F_GETLEASE:
6539         ret = F_GETLEASE;
6540         break;
6541 #ifdef F_DUPFD_CLOEXEC
6542     case TARGET_F_DUPFD_CLOEXEC:
6543         ret = F_DUPFD_CLOEXEC;
6544         break;
6545 #endif
6546     case TARGET_F_NOTIFY:
6547         ret = F_NOTIFY;
6548         break;
6549 #ifdef F_GETOWN_EX
6550     case TARGET_F_GETOWN_EX:
6551         ret = F_GETOWN_EX;
6552         break;
6553 #endif
6554 #ifdef F_SETOWN_EX
6555     case TARGET_F_SETOWN_EX:
6556         ret = F_SETOWN_EX;
6557         break;
6558 #endif
6559 #ifdef F_SETPIPE_SZ
6560     case TARGET_F_SETPIPE_SZ:
6561         ret = F_SETPIPE_SZ;
6562         break;
6563     case TARGET_F_GETPIPE_SZ:
6564         ret = F_GETPIPE_SZ;
6565         break;
6566 #endif
6567 #ifdef F_ADD_SEALS
6568     case TARGET_F_ADD_SEALS:
6569         ret = F_ADD_SEALS;
6570         break;
6571     case TARGET_F_GET_SEALS:
6572         ret = F_GET_SEALS;
6573         break;
6574 #endif
6575     default:
6576         ret = -TARGET_EINVAL;
6577         break;
6578     }
6579 
6580 #if defined(__powerpc64__)
6581     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6582      * is not supported by kernel. The glibc fcntl call actually adjusts
6583      * them to 5, 6 and 7 before making the syscall(). Since we make the
6584      * syscall directly, adjust to what is supported by the kernel.
6585      */
6586     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6587         ret -= F_GETLK64 - 5;
6588     }
6589 #endif
6590 
6591     return ret;
6592 }
6593 
6594 #define FLOCK_TRANSTBL \
6595     switch (type) { \
6596     TRANSTBL_CONVERT(F_RDLCK); \
6597     TRANSTBL_CONVERT(F_WRLCK); \
6598     TRANSTBL_CONVERT(F_UNLCK); \
6599     }
6600 
6601 static int target_to_host_flock(int type)
6602 {
6603 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6604     FLOCK_TRANSTBL
6605 #undef  TRANSTBL_CONVERT
6606     return -TARGET_EINVAL;
6607 }
6608 
6609 static int host_to_target_flock(int type)
6610 {
6611 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6612     FLOCK_TRANSTBL
6613 #undef  TRANSTBL_CONVERT
6614     /* if we don't know how to convert the value coming
6615      * from the host we copy to the target field as-is
6616      */
6617     return type;
6618 }
6619 
6620 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6621                                             abi_ulong target_flock_addr)
6622 {
6623     struct target_flock *target_fl;
6624     int l_type;
6625 
6626     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6627         return -TARGET_EFAULT;
6628     }
6629 
6630     __get_user(l_type, &target_fl->l_type);
6631     l_type = target_to_host_flock(l_type);
6632     if (l_type < 0) {
6633         return l_type;
6634     }
6635     fl->l_type = l_type;
6636     __get_user(fl->l_whence, &target_fl->l_whence);
6637     __get_user(fl->l_start, &target_fl->l_start);
6638     __get_user(fl->l_len, &target_fl->l_len);
6639     __get_user(fl->l_pid, &target_fl->l_pid);
6640     unlock_user_struct(target_fl, target_flock_addr, 0);
6641     return 0;
6642 }
6643 
6644 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6645                                           const struct flock64 *fl)
6646 {
6647     struct target_flock *target_fl;
6648     short l_type;
6649 
6650     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6651         return -TARGET_EFAULT;
6652     }
6653 
6654     l_type = host_to_target_flock(fl->l_type);
6655     __put_user(l_type, &target_fl->l_type);
6656     __put_user(fl->l_whence, &target_fl->l_whence);
6657     __put_user(fl->l_start, &target_fl->l_start);
6658     __put_user(fl->l_len, &target_fl->l_len);
6659     __put_user(fl->l_pid, &target_fl->l_pid);
6660     unlock_user_struct(target_fl, target_flock_addr, 1);
6661     return 0;
6662 }
6663 
6664 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6665 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6666 
6667 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6668 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6669                                                    abi_ulong target_flock_addr)
6670 {
6671     struct target_oabi_flock64 *target_fl;
6672     int l_type;
6673 
6674     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6675         return -TARGET_EFAULT;
6676     }
6677 
6678     __get_user(l_type, &target_fl->l_type);
6679     l_type = target_to_host_flock(l_type);
6680     if (l_type < 0) {
6681         return l_type;
6682     }
6683     fl->l_type = l_type;
6684     __get_user(fl->l_whence, &target_fl->l_whence);
6685     __get_user(fl->l_start, &target_fl->l_start);
6686     __get_user(fl->l_len, &target_fl->l_len);
6687     __get_user(fl->l_pid, &target_fl->l_pid);
6688     unlock_user_struct(target_fl, target_flock_addr, 0);
6689     return 0;
6690 }
6691 
6692 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6693                                                  const struct flock64 *fl)
6694 {
6695     struct target_oabi_flock64 *target_fl;
6696     short l_type;
6697 
6698     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6699         return -TARGET_EFAULT;
6700     }
6701 
6702     l_type = host_to_target_flock(fl->l_type);
6703     __put_user(l_type, &target_fl->l_type);
6704     __put_user(fl->l_whence, &target_fl->l_whence);
6705     __put_user(fl->l_start, &target_fl->l_start);
6706     __put_user(fl->l_len, &target_fl->l_len);
6707     __put_user(fl->l_pid, &target_fl->l_pid);
6708     unlock_user_struct(target_fl, target_flock_addr, 1);
6709     return 0;
6710 }
6711 #endif
6712 
6713 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6714                                               abi_ulong target_flock_addr)
6715 {
6716     struct target_flock64 *target_fl;
6717     int l_type;
6718 
6719     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6720         return -TARGET_EFAULT;
6721     }
6722 
6723     __get_user(l_type, &target_fl->l_type);
6724     l_type = target_to_host_flock(l_type);
6725     if (l_type < 0) {
6726         return l_type;
6727     }
6728     fl->l_type = l_type;
6729     __get_user(fl->l_whence, &target_fl->l_whence);
6730     __get_user(fl->l_start, &target_fl->l_start);
6731     __get_user(fl->l_len, &target_fl->l_len);
6732     __get_user(fl->l_pid, &target_fl->l_pid);
6733     unlock_user_struct(target_fl, target_flock_addr, 0);
6734     return 0;
6735 }
6736 
6737 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6738                                             const struct flock64 *fl)
6739 {
6740     struct target_flock64 *target_fl;
6741     short l_type;
6742 
6743     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6744         return -TARGET_EFAULT;
6745     }
6746 
6747     l_type = host_to_target_flock(fl->l_type);
6748     __put_user(l_type, &target_fl->l_type);
6749     __put_user(fl->l_whence, &target_fl->l_whence);
6750     __put_user(fl->l_start, &target_fl->l_start);
6751     __put_user(fl->l_len, &target_fl->l_len);
6752     __put_user(fl->l_pid, &target_fl->l_pid);
6753     unlock_user_struct(target_fl, target_flock_addr, 1);
6754     return 0;
6755 }
6756 
6757 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6758 {
6759     struct flock64 fl64;
6760 #ifdef F_GETOWN_EX
6761     struct f_owner_ex fox;
6762     struct target_f_owner_ex *target_fox;
6763 #endif
6764     abi_long ret;
6765     int host_cmd = target_to_host_fcntl_cmd(cmd);
6766 
6767     if (host_cmd == -TARGET_EINVAL)
6768 	    return host_cmd;
6769 
6770     switch(cmd) {
6771     case TARGET_F_GETLK:
6772         ret = copy_from_user_flock(&fl64, arg);
6773         if (ret) {
6774             return ret;
6775         }
6776         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6777         if (ret == 0) {
6778             ret = copy_to_user_flock(arg, &fl64);
6779         }
6780         break;
6781 
6782     case TARGET_F_SETLK:
6783     case TARGET_F_SETLKW:
6784         ret = copy_from_user_flock(&fl64, arg);
6785         if (ret) {
6786             return ret;
6787         }
6788         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6789         break;
6790 
6791     case TARGET_F_GETLK64:
6792     case TARGET_F_OFD_GETLK:
6793         ret = copy_from_user_flock64(&fl64, arg);
6794         if (ret) {
6795             return ret;
6796         }
6797         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6798         if (ret == 0) {
6799             ret = copy_to_user_flock64(arg, &fl64);
6800         }
6801         break;
6802     case TARGET_F_SETLK64:
6803     case TARGET_F_SETLKW64:
6804     case TARGET_F_OFD_SETLK:
6805     case TARGET_F_OFD_SETLKW:
6806         ret = copy_from_user_flock64(&fl64, arg);
6807         if (ret) {
6808             return ret;
6809         }
6810         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6811         break;
6812 
6813     case TARGET_F_GETFL:
6814         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6815         if (ret >= 0) {
6816             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6817         }
6818         break;
6819 
6820     case TARGET_F_SETFL:
6821         ret = get_errno(safe_fcntl(fd, host_cmd,
6822                                    target_to_host_bitmask(arg,
6823                                                           fcntl_flags_tbl)));
6824         break;
6825 
6826 #ifdef F_GETOWN_EX
6827     case TARGET_F_GETOWN_EX:
6828         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6829         if (ret >= 0) {
6830             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6831                 return -TARGET_EFAULT;
6832             target_fox->type = tswap32(fox.type);
6833             target_fox->pid = tswap32(fox.pid);
6834             unlock_user_struct(target_fox, arg, 1);
6835         }
6836         break;
6837 #endif
6838 
6839 #ifdef F_SETOWN_EX
6840     case TARGET_F_SETOWN_EX:
6841         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6842             return -TARGET_EFAULT;
6843         fox.type = tswap32(target_fox->type);
6844         fox.pid = tswap32(target_fox->pid);
6845         unlock_user_struct(target_fox, arg, 0);
6846         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6847         break;
6848 #endif
6849 
6850     case TARGET_F_SETSIG:
6851         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6852         break;
6853 
6854     case TARGET_F_GETSIG:
6855         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6856         break;
6857 
6858     case TARGET_F_SETOWN:
6859     case TARGET_F_GETOWN:
6860     case TARGET_F_SETLEASE:
6861     case TARGET_F_GETLEASE:
6862     case TARGET_F_SETPIPE_SZ:
6863     case TARGET_F_GETPIPE_SZ:
6864     case TARGET_F_ADD_SEALS:
6865     case TARGET_F_GET_SEALS:
6866         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6867         break;
6868 
6869     default:
6870         ret = get_errno(safe_fcntl(fd, cmd, arg));
6871         break;
6872     }
6873     return ret;
6874 }
6875 
6876 #ifdef USE_UID16
6877 
6878 static inline int high2lowuid(int uid)
6879 {
6880     if (uid > 65535)
6881         return 65534;
6882     else
6883         return uid;
6884 }
6885 
6886 static inline int high2lowgid(int gid)
6887 {
6888     if (gid > 65535)
6889         return 65534;
6890     else
6891         return gid;
6892 }
6893 
6894 static inline int low2highuid(int uid)
6895 {
6896     if ((int16_t)uid == -1)
6897         return -1;
6898     else
6899         return uid;
6900 }
6901 
6902 static inline int low2highgid(int gid)
6903 {
6904     if ((int16_t)gid == -1)
6905         return -1;
6906     else
6907         return gid;
6908 }
6909 static inline int tswapid(int id)
6910 {
6911     return tswap16(id);
6912 }
6913 
6914 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6915 
6916 #else /* !USE_UID16 */
6917 static inline int high2lowuid(int uid)
6918 {
6919     return uid;
6920 }
6921 static inline int high2lowgid(int gid)
6922 {
6923     return gid;
6924 }
6925 static inline int low2highuid(int uid)
6926 {
6927     return uid;
6928 }
6929 static inline int low2highgid(int gid)
6930 {
6931     return gid;
6932 }
6933 static inline int tswapid(int id)
6934 {
6935     return tswap32(id);
6936 }
6937 
6938 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6939 
6940 #endif /* USE_UID16 */
6941 
6942 /* We must do direct syscalls for setting UID/GID, because we want to
6943  * implement the Linux system call semantics of "change only for this thread",
6944  * not the libc/POSIX semantics of "change for all threads in process".
6945  * (See http://ewontfix.com/17/ for more details.)
6946  * We use the 32-bit version of the syscalls if present; if it is not
6947  * then either the host architecture supports 32-bit UIDs natively with
6948  * the standard syscall, or the 16-bit UID is the best we can do.
6949  */
6950 #ifdef __NR_setuid32
6951 #define __NR_sys_setuid __NR_setuid32
6952 #else
6953 #define __NR_sys_setuid __NR_setuid
6954 #endif
6955 #ifdef __NR_setgid32
6956 #define __NR_sys_setgid __NR_setgid32
6957 #else
6958 #define __NR_sys_setgid __NR_setgid
6959 #endif
6960 #ifdef __NR_setresuid32
6961 #define __NR_sys_setresuid __NR_setresuid32
6962 #else
6963 #define __NR_sys_setresuid __NR_setresuid
6964 #endif
6965 #ifdef __NR_setresgid32
6966 #define __NR_sys_setresgid __NR_setresgid32
6967 #else
6968 #define __NR_sys_setresgid __NR_setresgid
6969 #endif
6970 
6971 _syscall1(int, sys_setuid, uid_t, uid)
6972 _syscall1(int, sys_setgid, gid_t, gid)
6973 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6974 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6975 
6976 void syscall_init(void)
6977 {
6978     IOCTLEntry *ie;
6979     const argtype *arg_type;
6980     int size;
6981 
6982     thunk_init(STRUCT_MAX);
6983 
6984 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6985 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6986 #include "syscall_types.h"
6987 #undef STRUCT
6988 #undef STRUCT_SPECIAL
6989 
6990     /* we patch the ioctl size if necessary. We rely on the fact that
6991        no ioctl has all the bits at '1' in the size field */
6992     ie = ioctl_entries;
6993     while (ie->target_cmd != 0) {
6994         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6995             TARGET_IOC_SIZEMASK) {
6996             arg_type = ie->arg_type;
6997             if (arg_type[0] != TYPE_PTR) {
6998                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6999                         ie->target_cmd);
7000                 exit(1);
7001             }
7002             arg_type++;
7003             size = thunk_type_size(arg_type, 0);
7004             ie->target_cmd = (ie->target_cmd &
7005                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7006                 (size << TARGET_IOC_SIZESHIFT);
7007         }
7008 
7009         /* automatic consistency check if same arch */
7010 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7011     (defined(__x86_64__) && defined(TARGET_X86_64))
7012         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7013             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7014                     ie->name, ie->target_cmd, ie->host_cmd);
7015         }
7016 #endif
7017         ie++;
7018     }
7019 }
7020 
7021 #ifdef TARGET_NR_truncate64
7022 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7023                                          abi_long arg2,
7024                                          abi_long arg3,
7025                                          abi_long arg4)
7026 {
7027     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7028         arg2 = arg3;
7029         arg3 = arg4;
7030     }
7031     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7032 }
7033 #endif
7034 
7035 #ifdef TARGET_NR_ftruncate64
7036 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7037                                           abi_long arg2,
7038                                           abi_long arg3,
7039                                           abi_long arg4)
7040 {
7041     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7042         arg2 = arg3;
7043         arg3 = arg4;
7044     }
7045     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7046 }
7047 #endif
7048 
7049 #if defined(TARGET_NR_timer_settime) || \
7050     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7051 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7052                                                  abi_ulong target_addr)
7053 {
7054     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7055                                 offsetof(struct target_itimerspec,
7056                                          it_interval)) ||
7057         target_to_host_timespec(&host_its->it_value, target_addr +
7058                                 offsetof(struct target_itimerspec,
7059                                          it_value))) {
7060         return -TARGET_EFAULT;
7061     }
7062 
7063     return 0;
7064 }
7065 #endif
7066 
7067 #if defined(TARGET_NR_timer_settime64) || \
7068     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7069 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7070                                                    abi_ulong target_addr)
7071 {
7072     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7073                                   offsetof(struct target__kernel_itimerspec,
7074                                            it_interval)) ||
7075         target_to_host_timespec64(&host_its->it_value, target_addr +
7076                                   offsetof(struct target__kernel_itimerspec,
7077                                            it_value))) {
7078         return -TARGET_EFAULT;
7079     }
7080 
7081     return 0;
7082 }
7083 #endif
7084 
7085 #if ((defined(TARGET_NR_timerfd_gettime) || \
7086       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7087       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7088 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7089                                                  struct itimerspec *host_its)
7090 {
7091     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7092                                                        it_interval),
7093                                 &host_its->it_interval) ||
7094         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7095                                                        it_value),
7096                                 &host_its->it_value)) {
7097         return -TARGET_EFAULT;
7098     }
7099     return 0;
7100 }
7101 #endif
7102 
7103 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7104       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7105       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7106 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7107                                                    struct itimerspec *host_its)
7108 {
7109     if (host_to_target_timespec64(target_addr +
7110                                   offsetof(struct target__kernel_itimerspec,
7111                                            it_interval),
7112                                   &host_its->it_interval) ||
7113         host_to_target_timespec64(target_addr +
7114                                   offsetof(struct target__kernel_itimerspec,
7115                                            it_value),
7116                                   &host_its->it_value)) {
7117         return -TARGET_EFAULT;
7118     }
7119     return 0;
7120 }
7121 #endif
7122 
7123 #if defined(TARGET_NR_adjtimex) || \
7124     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7125 static inline abi_long target_to_host_timex(struct timex *host_tx,
7126                                             abi_long target_addr)
7127 {
7128     struct target_timex *target_tx;
7129 
7130     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7131         return -TARGET_EFAULT;
7132     }
7133 
7134     __get_user(host_tx->modes, &target_tx->modes);
7135     __get_user(host_tx->offset, &target_tx->offset);
7136     __get_user(host_tx->freq, &target_tx->freq);
7137     __get_user(host_tx->maxerror, &target_tx->maxerror);
7138     __get_user(host_tx->esterror, &target_tx->esterror);
7139     __get_user(host_tx->status, &target_tx->status);
7140     __get_user(host_tx->constant, &target_tx->constant);
7141     __get_user(host_tx->precision, &target_tx->precision);
7142     __get_user(host_tx->tolerance, &target_tx->tolerance);
7143     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7144     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7145     __get_user(host_tx->tick, &target_tx->tick);
7146     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7147     __get_user(host_tx->jitter, &target_tx->jitter);
7148     __get_user(host_tx->shift, &target_tx->shift);
7149     __get_user(host_tx->stabil, &target_tx->stabil);
7150     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7151     __get_user(host_tx->calcnt, &target_tx->calcnt);
7152     __get_user(host_tx->errcnt, &target_tx->errcnt);
7153     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7154     __get_user(host_tx->tai, &target_tx->tai);
7155 
7156     unlock_user_struct(target_tx, target_addr, 0);
7157     return 0;
7158 }
7159 
7160 static inline abi_long host_to_target_timex(abi_long target_addr,
7161                                             struct timex *host_tx)
7162 {
7163     struct target_timex *target_tx;
7164 
7165     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7166         return -TARGET_EFAULT;
7167     }
7168 
7169     __put_user(host_tx->modes, &target_tx->modes);
7170     __put_user(host_tx->offset, &target_tx->offset);
7171     __put_user(host_tx->freq, &target_tx->freq);
7172     __put_user(host_tx->maxerror, &target_tx->maxerror);
7173     __put_user(host_tx->esterror, &target_tx->esterror);
7174     __put_user(host_tx->status, &target_tx->status);
7175     __put_user(host_tx->constant, &target_tx->constant);
7176     __put_user(host_tx->precision, &target_tx->precision);
7177     __put_user(host_tx->tolerance, &target_tx->tolerance);
7178     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7179     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7180     __put_user(host_tx->tick, &target_tx->tick);
7181     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7182     __put_user(host_tx->jitter, &target_tx->jitter);
7183     __put_user(host_tx->shift, &target_tx->shift);
7184     __put_user(host_tx->stabil, &target_tx->stabil);
7185     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7186     __put_user(host_tx->calcnt, &target_tx->calcnt);
7187     __put_user(host_tx->errcnt, &target_tx->errcnt);
7188     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7189     __put_user(host_tx->tai, &target_tx->tai);
7190 
7191     unlock_user_struct(target_tx, target_addr, 1);
7192     return 0;
7193 }
7194 #endif
7195 
7196 
7197 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7198 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7199                                               abi_long target_addr)
7200 {
7201     struct target__kernel_timex *target_tx;
7202 
7203     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7204                                  offsetof(struct target__kernel_timex,
7205                                           time))) {
7206         return -TARGET_EFAULT;
7207     }
7208 
7209     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7210         return -TARGET_EFAULT;
7211     }
7212 
7213     __get_user(host_tx->modes, &target_tx->modes);
7214     __get_user(host_tx->offset, &target_tx->offset);
7215     __get_user(host_tx->freq, &target_tx->freq);
7216     __get_user(host_tx->maxerror, &target_tx->maxerror);
7217     __get_user(host_tx->esterror, &target_tx->esterror);
7218     __get_user(host_tx->status, &target_tx->status);
7219     __get_user(host_tx->constant, &target_tx->constant);
7220     __get_user(host_tx->precision, &target_tx->precision);
7221     __get_user(host_tx->tolerance, &target_tx->tolerance);
7222     __get_user(host_tx->tick, &target_tx->tick);
7223     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7224     __get_user(host_tx->jitter, &target_tx->jitter);
7225     __get_user(host_tx->shift, &target_tx->shift);
7226     __get_user(host_tx->stabil, &target_tx->stabil);
7227     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7228     __get_user(host_tx->calcnt, &target_tx->calcnt);
7229     __get_user(host_tx->errcnt, &target_tx->errcnt);
7230     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7231     __get_user(host_tx->tai, &target_tx->tai);
7232 
7233     unlock_user_struct(target_tx, target_addr, 0);
7234     return 0;
7235 }
7236 
7237 static inline abi_long host_to_target_timex64(abi_long target_addr,
7238                                               struct timex *host_tx)
7239 {
7240     struct target__kernel_timex *target_tx;
7241 
7242    if (copy_to_user_timeval64(target_addr +
7243                               offsetof(struct target__kernel_timex, time),
7244                               &host_tx->time)) {
7245         return -TARGET_EFAULT;
7246     }
7247 
7248     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7249         return -TARGET_EFAULT;
7250     }
7251 
7252     __put_user(host_tx->modes, &target_tx->modes);
7253     __put_user(host_tx->offset, &target_tx->offset);
7254     __put_user(host_tx->freq, &target_tx->freq);
7255     __put_user(host_tx->maxerror, &target_tx->maxerror);
7256     __put_user(host_tx->esterror, &target_tx->esterror);
7257     __put_user(host_tx->status, &target_tx->status);
7258     __put_user(host_tx->constant, &target_tx->constant);
7259     __put_user(host_tx->precision, &target_tx->precision);
7260     __put_user(host_tx->tolerance, &target_tx->tolerance);
7261     __put_user(host_tx->tick, &target_tx->tick);
7262     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7263     __put_user(host_tx->jitter, &target_tx->jitter);
7264     __put_user(host_tx->shift, &target_tx->shift);
7265     __put_user(host_tx->stabil, &target_tx->stabil);
7266     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7267     __put_user(host_tx->calcnt, &target_tx->calcnt);
7268     __put_user(host_tx->errcnt, &target_tx->errcnt);
7269     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7270     __put_user(host_tx->tai, &target_tx->tai);
7271 
7272     unlock_user_struct(target_tx, target_addr, 1);
7273     return 0;
7274 }
7275 #endif
7276 
7277 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7278 #define sigev_notify_thread_id _sigev_un._tid
7279 #endif
7280 
7281 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7282                                                abi_ulong target_addr)
7283 {
7284     struct target_sigevent *target_sevp;
7285 
7286     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7287         return -TARGET_EFAULT;
7288     }
7289 
7290     /* This union is awkward on 64 bit systems because it has a 32 bit
7291      * integer and a pointer in it; we follow the conversion approach
7292      * used for handling sigval types in signal.c so the guest should get
7293      * the correct value back even if we did a 64 bit byteswap and it's
7294      * using the 32 bit integer.
7295      */
7296     host_sevp->sigev_value.sival_ptr =
7297         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7298     host_sevp->sigev_signo =
7299         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7300     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7301     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7302 
7303     unlock_user_struct(target_sevp, target_addr, 1);
7304     return 0;
7305 }
7306 
7307 #if defined(TARGET_NR_mlockall)
7308 static inline int target_to_host_mlockall_arg(int arg)
7309 {
7310     int result = 0;
7311 
7312     if (arg & TARGET_MCL_CURRENT) {
7313         result |= MCL_CURRENT;
7314     }
7315     if (arg & TARGET_MCL_FUTURE) {
7316         result |= MCL_FUTURE;
7317     }
7318 #ifdef MCL_ONFAULT
7319     if (arg & TARGET_MCL_ONFAULT) {
7320         result |= MCL_ONFAULT;
7321     }
7322 #endif
7323 
7324     return result;
7325 }
7326 #endif
7327 
7328 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7329      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7330      defined(TARGET_NR_newfstatat))
7331 static inline abi_long host_to_target_stat64(void *cpu_env,
7332                                              abi_ulong target_addr,
7333                                              struct stat *host_st)
7334 {
7335 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7336     if (((CPUARMState *)cpu_env)->eabi) {
7337         struct target_eabi_stat64 *target_st;
7338 
7339         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7340             return -TARGET_EFAULT;
7341         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7342         __put_user(host_st->st_dev, &target_st->st_dev);
7343         __put_user(host_st->st_ino, &target_st->st_ino);
7344 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7345         __put_user(host_st->st_ino, &target_st->__st_ino);
7346 #endif
7347         __put_user(host_st->st_mode, &target_st->st_mode);
7348         __put_user(host_st->st_nlink, &target_st->st_nlink);
7349         __put_user(host_st->st_uid, &target_st->st_uid);
7350         __put_user(host_st->st_gid, &target_st->st_gid);
7351         __put_user(host_st->st_rdev, &target_st->st_rdev);
7352         __put_user(host_st->st_size, &target_st->st_size);
7353         __put_user(host_st->st_blksize, &target_st->st_blksize);
7354         __put_user(host_st->st_blocks, &target_st->st_blocks);
7355         __put_user(host_st->st_atime, &target_st->target_st_atime);
7356         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7357         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7358 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7359         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7360         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7361         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7362 #endif
7363         unlock_user_struct(target_st, target_addr, 1);
7364     } else
7365 #endif
7366     {
7367 #if defined(TARGET_HAS_STRUCT_STAT64)
7368         struct target_stat64 *target_st;
7369 #else
7370         struct target_stat *target_st;
7371 #endif
7372 
7373         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7374             return -TARGET_EFAULT;
7375         memset(target_st, 0, sizeof(*target_st));
7376         __put_user(host_st->st_dev, &target_st->st_dev);
7377         __put_user(host_st->st_ino, &target_st->st_ino);
7378 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7379         __put_user(host_st->st_ino, &target_st->__st_ino);
7380 #endif
7381         __put_user(host_st->st_mode, &target_st->st_mode);
7382         __put_user(host_st->st_nlink, &target_st->st_nlink);
7383         __put_user(host_st->st_uid, &target_st->st_uid);
7384         __put_user(host_st->st_gid, &target_st->st_gid);
7385         __put_user(host_st->st_rdev, &target_st->st_rdev);
7386         /* XXX: better use of kernel struct */
7387         __put_user(host_st->st_size, &target_st->st_size);
7388         __put_user(host_st->st_blksize, &target_st->st_blksize);
7389         __put_user(host_st->st_blocks, &target_st->st_blocks);
7390         __put_user(host_st->st_atime, &target_st->target_st_atime);
7391         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7392         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7393 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7394         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7395         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7396         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7397 #endif
7398         unlock_user_struct(target_st, target_addr, 1);
7399     }
7400 
7401     return 0;
7402 }
7403 #endif
7404 
7405 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7406 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7407                                             abi_ulong target_addr)
7408 {
7409     struct target_statx *target_stx;
7410 
7411     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7412         return -TARGET_EFAULT;
7413     }
7414     memset(target_stx, 0, sizeof(*target_stx));
7415 
7416     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7417     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7418     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7419     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7420     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7421     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7422     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7423     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7424     __put_user(host_stx->stx_size, &target_stx->stx_size);
7425     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7426     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7427     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7428     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7429     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7430     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7431     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7432     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7433     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7434     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7435     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7436     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7437     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7438     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7439 
7440     unlock_user_struct(target_stx, target_addr, 1);
7441 
7442     return 0;
7443 }
7444 #endif
7445 
7446 static int do_sys_futex(int *uaddr, int op, int val,
7447                          const struct timespec *timeout, int *uaddr2,
7448                          int val3)
7449 {
7450 #if HOST_LONG_BITS == 64
7451 #if defined(__NR_futex)
7452     /* always a 64-bit time_t, it doesn't define _time64 version  */
7453     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7454 
7455 #endif
7456 #else /* HOST_LONG_BITS == 64 */
7457 #if defined(__NR_futex_time64)
7458     if (sizeof(timeout->tv_sec) == 8) {
7459         /* _time64 function on 32bit arch */
7460         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7461     }
7462 #endif
7463 #if defined(__NR_futex)
7464     /* old function on 32bit arch */
7465     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7466 #endif
7467 #endif /* HOST_LONG_BITS == 64 */
7468     g_assert_not_reached();
7469 }
7470 
7471 static int do_safe_futex(int *uaddr, int op, int val,
7472                          const struct timespec *timeout, int *uaddr2,
7473                          int val3)
7474 {
7475 #if HOST_LONG_BITS == 64
7476 #if defined(__NR_futex)
7477     /* always a 64-bit time_t, it doesn't define _time64 version  */
7478     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7479 #endif
7480 #else /* HOST_LONG_BITS == 64 */
7481 #if defined(__NR_futex_time64)
7482     if (sizeof(timeout->tv_sec) == 8) {
7483         /* _time64 function on 32bit arch */
7484         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7485                                            val3));
7486     }
7487 #endif
7488 #if defined(__NR_futex)
7489     /* old function on 32bit arch */
7490     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7491 #endif
7492 #endif /* HOST_LONG_BITS == 64 */
7493     return -TARGET_ENOSYS;
7494 }
7495 
7496 /* ??? Using host futex calls even when target atomic operations
7497    are not really atomic probably breaks things.  However implementing
7498    futexes locally would make futexes shared between multiple processes
7499    tricky.  However they're probably useless because guest atomic
7500    operations won't work either.  */
7501 #if defined(TARGET_NR_futex)
7502 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7503                     target_ulong timeout, target_ulong uaddr2, int val3)
7504 {
7505     struct timespec ts, *pts;
7506     int base_op;
7507 
7508     /* ??? We assume FUTEX_* constants are the same on both host
7509        and target.  */
7510 #ifdef FUTEX_CMD_MASK
7511     base_op = op & FUTEX_CMD_MASK;
7512 #else
7513     base_op = op;
7514 #endif
7515     switch (base_op) {
7516     case FUTEX_WAIT:
7517     case FUTEX_WAIT_BITSET:
7518         if (timeout) {
7519             pts = &ts;
7520             target_to_host_timespec(pts, timeout);
7521         } else {
7522             pts = NULL;
7523         }
7524         return do_safe_futex(g2h(cpu, uaddr),
7525                              op, tswap32(val), pts, NULL, val3);
7526     case FUTEX_WAKE:
7527         return do_safe_futex(g2h(cpu, uaddr),
7528                              op, val, NULL, NULL, 0);
7529     case FUTEX_FD:
7530         return do_safe_futex(g2h(cpu, uaddr),
7531                              op, val, NULL, NULL, 0);
7532     case FUTEX_REQUEUE:
7533     case FUTEX_CMP_REQUEUE:
7534     case FUTEX_WAKE_OP:
7535         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7536            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7537            But the prototype takes a `struct timespec *'; insert casts
7538            to satisfy the compiler.  We do not need to tswap TIMEOUT
7539            since it's not compared to guest memory.  */
7540         pts = (struct timespec *)(uintptr_t) timeout;
7541         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7542                              (base_op == FUTEX_CMP_REQUEUE
7543                               ? tswap32(val3) : val3));
7544     default:
7545         return -TARGET_ENOSYS;
7546     }
7547 }
7548 #endif
7549 
7550 #if defined(TARGET_NR_futex_time64)
7551 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7552                            int val, target_ulong timeout,
7553                            target_ulong uaddr2, int val3)
7554 {
7555     struct timespec ts, *pts;
7556     int base_op;
7557 
7558     /* ??? We assume FUTEX_* constants are the same on both host
7559        and target.  */
7560 #ifdef FUTEX_CMD_MASK
7561     base_op = op & FUTEX_CMD_MASK;
7562 #else
7563     base_op = op;
7564 #endif
7565     switch (base_op) {
7566     case FUTEX_WAIT:
7567     case FUTEX_WAIT_BITSET:
7568         if (timeout) {
7569             pts = &ts;
7570             if (target_to_host_timespec64(pts, timeout)) {
7571                 return -TARGET_EFAULT;
7572             }
7573         } else {
7574             pts = NULL;
7575         }
7576         return do_safe_futex(g2h(cpu, uaddr), op,
7577                              tswap32(val), pts, NULL, val3);
7578     case FUTEX_WAKE:
7579         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7580     case FUTEX_FD:
7581         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7582     case FUTEX_REQUEUE:
7583     case FUTEX_CMP_REQUEUE:
7584     case FUTEX_WAKE_OP:
7585         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7586            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7587            But the prototype takes a `struct timespec *'; insert casts
7588            to satisfy the compiler.  We do not need to tswap TIMEOUT
7589            since it's not compared to guest memory.  */
7590         pts = (struct timespec *)(uintptr_t) timeout;
7591         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7592                              (base_op == FUTEX_CMP_REQUEUE
7593                               ? tswap32(val3) : val3));
7594     default:
7595         return -TARGET_ENOSYS;
7596     }
7597 }
7598 #endif
7599 
7600 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7601 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7602                                      abi_long handle, abi_long mount_id,
7603                                      abi_long flags)
7604 {
7605     struct file_handle *target_fh;
7606     struct file_handle *fh;
7607     int mid = 0;
7608     abi_long ret;
7609     char *name;
7610     unsigned int size, total_size;
7611 
7612     if (get_user_s32(size, handle)) {
7613         return -TARGET_EFAULT;
7614     }
7615 
7616     name = lock_user_string(pathname);
7617     if (!name) {
7618         return -TARGET_EFAULT;
7619     }
7620 
7621     total_size = sizeof(struct file_handle) + size;
7622     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7623     if (!target_fh) {
7624         unlock_user(name, pathname, 0);
7625         return -TARGET_EFAULT;
7626     }
7627 
7628     fh = g_malloc0(total_size);
7629     fh->handle_bytes = size;
7630 
7631     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7632     unlock_user(name, pathname, 0);
7633 
7634     /* man name_to_handle_at(2):
7635      * Other than the use of the handle_bytes field, the caller should treat
7636      * the file_handle structure as an opaque data type
7637      */
7638 
7639     memcpy(target_fh, fh, total_size);
7640     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7641     target_fh->handle_type = tswap32(fh->handle_type);
7642     g_free(fh);
7643     unlock_user(target_fh, handle, total_size);
7644 
7645     if (put_user_s32(mid, mount_id)) {
7646         return -TARGET_EFAULT;
7647     }
7648 
7649     return ret;
7650 
7651 }
7652 #endif
7653 
7654 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7655 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7656                                      abi_long flags)
7657 {
7658     struct file_handle *target_fh;
7659     struct file_handle *fh;
7660     unsigned int size, total_size;
7661     abi_long ret;
7662 
7663     if (get_user_s32(size, handle)) {
7664         return -TARGET_EFAULT;
7665     }
7666 
7667     total_size = sizeof(struct file_handle) + size;
7668     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7669     if (!target_fh) {
7670         return -TARGET_EFAULT;
7671     }
7672 
7673     fh = g_memdup(target_fh, total_size);
7674     fh->handle_bytes = size;
7675     fh->handle_type = tswap32(target_fh->handle_type);
7676 
7677     ret = get_errno(open_by_handle_at(mount_fd, fh,
7678                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7679 
7680     g_free(fh);
7681 
7682     unlock_user(target_fh, handle, total_size);
7683 
7684     return ret;
7685 }
7686 #endif
7687 
7688 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7689 
7690 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7691 {
7692     int host_flags;
7693     target_sigset_t *target_mask;
7694     sigset_t host_mask;
7695     abi_long ret;
7696 
7697     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7698         return -TARGET_EINVAL;
7699     }
7700     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7701         return -TARGET_EFAULT;
7702     }
7703 
7704     target_to_host_sigset(&host_mask, target_mask);
7705 
7706     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7707 
7708     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7709     if (ret >= 0) {
7710         fd_trans_register(ret, &target_signalfd_trans);
7711     }
7712 
7713     unlock_user_struct(target_mask, mask, 0);
7714 
7715     return ret;
7716 }
7717 #endif
7718 
7719 /* Map host to target signal numbers for the wait family of syscalls.
7720    Assume all other status bits are the same.  */
7721 int host_to_target_waitstatus(int status)
7722 {
7723     if (WIFSIGNALED(status)) {
7724         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7725     }
7726     if (WIFSTOPPED(status)) {
7727         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7728                | (status & 0xff);
7729     }
7730     return status;
7731 }
7732 
7733 static int open_self_cmdline(void *cpu_env, int fd)
7734 {
7735     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7736     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7737     int i;
7738 
7739     for (i = 0; i < bprm->argc; i++) {
7740         size_t len = strlen(bprm->argv[i]) + 1;
7741 
7742         if (write(fd, bprm->argv[i], len) != len) {
7743             return -1;
7744         }
7745     }
7746 
7747     return 0;
7748 }
7749 
7750 static int open_self_maps(void *cpu_env, int fd)
7751 {
7752     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7753     TaskState *ts = cpu->opaque;
7754     GSList *map_info = read_self_maps();
7755     GSList *s;
7756     int count;
7757 
7758     for (s = map_info; s; s = g_slist_next(s)) {
7759         MapInfo *e = (MapInfo *) s->data;
7760 
7761         if (h2g_valid(e->start)) {
7762             unsigned long min = e->start;
7763             unsigned long max = e->end;
7764             int flags = page_get_flags(h2g(min));
7765             const char *path;
7766 
7767             max = h2g_valid(max - 1) ?
7768                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7769 
7770             if (page_check_range(h2g(min), max - min, flags) == -1) {
7771                 continue;
7772             }
7773 
7774             if (h2g(min) == ts->info->stack_limit) {
7775                 path = "[stack]";
7776             } else {
7777                 path = e->path;
7778             }
7779 
7780             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7781                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7782                             h2g(min), h2g(max - 1) + 1,
7783                             (flags & PAGE_READ) ? 'r' : '-',
7784                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7785                             (flags & PAGE_EXEC) ? 'x' : '-',
7786                             e->is_priv ? 'p' : '-',
7787                             (uint64_t) e->offset, e->dev, e->inode);
7788             if (path) {
7789                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7790             } else {
7791                 dprintf(fd, "\n");
7792             }
7793         }
7794     }
7795 
7796     free_self_maps(map_info);
7797 
7798 #ifdef TARGET_VSYSCALL_PAGE
7799     /*
7800      * We only support execution from the vsyscall page.
7801      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7802      */
7803     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7804                     " --xp 00000000 00:00 0",
7805                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7806     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7807 #endif
7808 
7809     return 0;
7810 }
7811 
7812 static int open_self_stat(void *cpu_env, int fd)
7813 {
7814     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7815     TaskState *ts = cpu->opaque;
7816     g_autoptr(GString) buf = g_string_new(NULL);
7817     int i;
7818 
7819     for (i = 0; i < 44; i++) {
7820         if (i == 0) {
7821             /* pid */
7822             g_string_printf(buf, FMT_pid " ", getpid());
7823         } else if (i == 1) {
7824             /* app name */
7825             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7826             bin = bin ? bin + 1 : ts->bprm->argv[0];
7827             g_string_printf(buf, "(%.15s) ", bin);
7828         } else if (i == 3) {
7829             /* ppid */
7830             g_string_printf(buf, FMT_pid " ", getppid());
7831         } else if (i == 27) {
7832             /* stack bottom */
7833             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7834         } else {
7835             /* for the rest, there is MasterCard */
7836             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7837         }
7838 
7839         if (write(fd, buf->str, buf->len) != buf->len) {
7840             return -1;
7841         }
7842     }
7843 
7844     return 0;
7845 }
7846 
7847 static int open_self_auxv(void *cpu_env, int fd)
7848 {
7849     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7850     TaskState *ts = cpu->opaque;
7851     abi_ulong auxv = ts->info->saved_auxv;
7852     abi_ulong len = ts->info->auxv_len;
7853     char *ptr;
7854 
7855     /*
7856      * Auxiliary vector is stored in target process stack.
7857      * read in whole auxv vector and copy it to file
7858      */
7859     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7860     if (ptr != NULL) {
7861         while (len > 0) {
7862             ssize_t r;
7863             r = write(fd, ptr, len);
7864             if (r <= 0) {
7865                 break;
7866             }
7867             len -= r;
7868             ptr += r;
7869         }
7870         lseek(fd, 0, SEEK_SET);
7871         unlock_user(ptr, auxv, len);
7872     }
7873 
7874     return 0;
7875 }
7876 
7877 static int is_proc_myself(const char *filename, const char *entry)
7878 {
7879     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7880         filename += strlen("/proc/");
7881         if (!strncmp(filename, "self/", strlen("self/"))) {
7882             filename += strlen("self/");
7883         } else if (*filename >= '1' && *filename <= '9') {
7884             char myself[80];
7885             snprintf(myself, sizeof(myself), "%d/", getpid());
7886             if (!strncmp(filename, myself, strlen(myself))) {
7887                 filename += strlen(myself);
7888             } else {
7889                 return 0;
7890             }
7891         } else {
7892             return 0;
7893         }
7894         if (!strcmp(filename, entry)) {
7895             return 1;
7896         }
7897     }
7898     return 0;
7899 }
7900 
7901 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7902     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7903 static int is_proc(const char *filename, const char *entry)
7904 {
7905     return strcmp(filename, entry) == 0;
7906 }
7907 #endif
7908 
7909 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7910 static int open_net_route(void *cpu_env, int fd)
7911 {
7912     FILE *fp;
7913     char *line = NULL;
7914     size_t len = 0;
7915     ssize_t read;
7916 
7917     fp = fopen("/proc/net/route", "r");
7918     if (fp == NULL) {
7919         return -1;
7920     }
7921 
7922     /* read header */
7923 
7924     read = getline(&line, &len, fp);
7925     dprintf(fd, "%s", line);
7926 
7927     /* read routes */
7928 
7929     while ((read = getline(&line, &len, fp)) != -1) {
7930         char iface[16];
7931         uint32_t dest, gw, mask;
7932         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7933         int fields;
7934 
7935         fields = sscanf(line,
7936                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7937                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7938                         &mask, &mtu, &window, &irtt);
7939         if (fields != 11) {
7940             continue;
7941         }
7942         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7943                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7944                 metric, tswap32(mask), mtu, window, irtt);
7945     }
7946 
7947     free(line);
7948     fclose(fp);
7949 
7950     return 0;
7951 }
7952 #endif
7953 
7954 #if defined(TARGET_SPARC)
7955 static int open_cpuinfo(void *cpu_env, int fd)
7956 {
7957     dprintf(fd, "type\t\t: sun4u\n");
7958     return 0;
7959 }
7960 #endif
7961 
7962 #if defined(TARGET_HPPA)
7963 static int open_cpuinfo(void *cpu_env, int fd)
7964 {
7965     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7966     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7967     dprintf(fd, "capabilities\t: os32\n");
7968     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7969     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7970     return 0;
7971 }
7972 #endif
7973 
7974 #if defined(TARGET_M68K)
7975 static int open_hardware(void *cpu_env, int fd)
7976 {
7977     dprintf(fd, "Model:\t\tqemu-m68k\n");
7978     return 0;
7979 }
7980 #endif
7981 
7982 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7983 {
7984     struct fake_open {
7985         const char *filename;
7986         int (*fill)(void *cpu_env, int fd);
7987         int (*cmp)(const char *s1, const char *s2);
7988     };
7989     const struct fake_open *fake_open;
7990     static const struct fake_open fakes[] = {
7991         { "maps", open_self_maps, is_proc_myself },
7992         { "stat", open_self_stat, is_proc_myself },
7993         { "auxv", open_self_auxv, is_proc_myself },
7994         { "cmdline", open_self_cmdline, is_proc_myself },
7995 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7996         { "/proc/net/route", open_net_route, is_proc },
7997 #endif
7998 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7999         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8000 #endif
8001 #if defined(TARGET_M68K)
8002         { "/proc/hardware", open_hardware, is_proc },
8003 #endif
8004         { NULL, NULL, NULL }
8005     };
8006 
8007     if (is_proc_myself(pathname, "exe")) {
8008         int execfd = qemu_getauxval(AT_EXECFD);
8009         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8010     }
8011 
8012     for (fake_open = fakes; fake_open->filename; fake_open++) {
8013         if (fake_open->cmp(pathname, fake_open->filename)) {
8014             break;
8015         }
8016     }
8017 
8018     if (fake_open->filename) {
8019         const char *tmpdir;
8020         char filename[PATH_MAX];
8021         int fd, r;
8022 
8023         /* create temporary file to map stat to */
8024         tmpdir = getenv("TMPDIR");
8025         if (!tmpdir)
8026             tmpdir = "/tmp";
8027         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8028         fd = mkstemp(filename);
8029         if (fd < 0) {
8030             return fd;
8031         }
8032         unlink(filename);
8033 
8034         if ((r = fake_open->fill(cpu_env, fd))) {
8035             int e = errno;
8036             close(fd);
8037             errno = e;
8038             return r;
8039         }
8040         lseek(fd, 0, SEEK_SET);
8041 
8042         return fd;
8043     }
8044 
8045     return safe_openat(dirfd, path(pathname), flags, mode);
8046 }
8047 
8048 #define TIMER_MAGIC 0x0caf0000
8049 #define TIMER_MAGIC_MASK 0xffff0000
8050 
8051 /* Convert QEMU provided timer ID back to internal 16bit index format */
8052 static target_timer_t get_timer_id(abi_long arg)
8053 {
8054     target_timer_t timerid = arg;
8055 
8056     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8057         return -TARGET_EINVAL;
8058     }
8059 
8060     timerid &= 0xffff;
8061 
8062     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8063         return -TARGET_EINVAL;
8064     }
8065 
8066     return timerid;
8067 }
8068 
8069 static int target_to_host_cpu_mask(unsigned long *host_mask,
8070                                    size_t host_size,
8071                                    abi_ulong target_addr,
8072                                    size_t target_size)
8073 {
8074     unsigned target_bits = sizeof(abi_ulong) * 8;
8075     unsigned host_bits = sizeof(*host_mask) * 8;
8076     abi_ulong *target_mask;
8077     unsigned i, j;
8078 
8079     assert(host_size >= target_size);
8080 
8081     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8082     if (!target_mask) {
8083         return -TARGET_EFAULT;
8084     }
8085     memset(host_mask, 0, host_size);
8086 
8087     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8088         unsigned bit = i * target_bits;
8089         abi_ulong val;
8090 
8091         __get_user(val, &target_mask[i]);
8092         for (j = 0; j < target_bits; j++, bit++) {
8093             if (val & (1UL << j)) {
8094                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8095             }
8096         }
8097     }
8098 
8099     unlock_user(target_mask, target_addr, 0);
8100     return 0;
8101 }
8102 
8103 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8104                                    size_t host_size,
8105                                    abi_ulong target_addr,
8106                                    size_t target_size)
8107 {
8108     unsigned target_bits = sizeof(abi_ulong) * 8;
8109     unsigned host_bits = sizeof(*host_mask) * 8;
8110     abi_ulong *target_mask;
8111     unsigned i, j;
8112 
8113     assert(host_size >= target_size);
8114 
8115     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8116     if (!target_mask) {
8117         return -TARGET_EFAULT;
8118     }
8119 
8120     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8121         unsigned bit = i * target_bits;
8122         abi_ulong val = 0;
8123 
8124         for (j = 0; j < target_bits; j++, bit++) {
8125             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8126                 val |= 1UL << j;
8127             }
8128         }
8129         __put_user(val, &target_mask[i]);
8130     }
8131 
8132     unlock_user(target_mask, target_addr, target_size);
8133     return 0;
8134 }
8135 
8136 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8137 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8138 #endif
8139 
8140 /* This is an internal helper for do_syscall so that it is easier
8141  * to have a single return point, so that actions, such as logging
8142  * of syscall results, can be performed.
8143  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8144  */
8145 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8146                             abi_long arg2, abi_long arg3, abi_long arg4,
8147                             abi_long arg5, abi_long arg6, abi_long arg7,
8148                             abi_long arg8)
8149 {
8150     CPUState *cpu = env_cpu(cpu_env);
8151     abi_long ret;
8152 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8153     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8154     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8155     || defined(TARGET_NR_statx)
8156     struct stat st;
8157 #endif
8158 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8159     || defined(TARGET_NR_fstatfs)
8160     struct statfs stfs;
8161 #endif
8162     void *p;
8163 
8164     switch(num) {
8165     case TARGET_NR_exit:
8166         /* In old applications this may be used to implement _exit(2).
8167            However in threaded applications it is used for thread termination,
8168            and _exit_group is used for application termination.
8169            Do thread termination if we have more then one thread.  */
8170 
8171         if (block_signals()) {
8172             return -TARGET_ERESTARTSYS;
8173         }
8174 
8175         pthread_mutex_lock(&clone_lock);
8176 
8177         if (CPU_NEXT(first_cpu)) {
8178             TaskState *ts = cpu->opaque;
8179 
8180             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8181             object_unref(OBJECT(cpu));
8182             /*
8183              * At this point the CPU should be unrealized and removed
8184              * from cpu lists. We can clean-up the rest of the thread
8185              * data without the lock held.
8186              */
8187 
8188             pthread_mutex_unlock(&clone_lock);
8189 
8190             if (ts->child_tidptr) {
8191                 put_user_u32(0, ts->child_tidptr);
8192                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8193                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8194             }
8195             thread_cpu = NULL;
8196             g_free(ts);
8197             rcu_unregister_thread();
8198             pthread_exit(NULL);
8199         }
8200 
8201         pthread_mutex_unlock(&clone_lock);
8202         preexit_cleanup(cpu_env, arg1);
8203         _exit(arg1);
8204         return 0; /* avoid warning */
8205     case TARGET_NR_read:
8206         if (arg2 == 0 && arg3 == 0) {
8207             return get_errno(safe_read(arg1, 0, 0));
8208         } else {
8209             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8210                 return -TARGET_EFAULT;
8211             ret = get_errno(safe_read(arg1, p, arg3));
8212             if (ret >= 0 &&
8213                 fd_trans_host_to_target_data(arg1)) {
8214                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8215             }
8216             unlock_user(p, arg2, ret);
8217         }
8218         return ret;
8219     case TARGET_NR_write:
8220         if (arg2 == 0 && arg3 == 0) {
8221             return get_errno(safe_write(arg1, 0, 0));
8222         }
8223         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8224             return -TARGET_EFAULT;
8225         if (fd_trans_target_to_host_data(arg1)) {
8226             void *copy = g_malloc(arg3);
8227             memcpy(copy, p, arg3);
8228             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8229             if (ret >= 0) {
8230                 ret = get_errno(safe_write(arg1, copy, ret));
8231             }
8232             g_free(copy);
8233         } else {
8234             ret = get_errno(safe_write(arg1, p, arg3));
8235         }
8236         unlock_user(p, arg2, 0);
8237         return ret;
8238 
8239 #ifdef TARGET_NR_open
8240     case TARGET_NR_open:
8241         if (!(p = lock_user_string(arg1)))
8242             return -TARGET_EFAULT;
8243         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8244                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8245                                   arg3));
8246         fd_trans_unregister(ret);
8247         unlock_user(p, arg1, 0);
8248         return ret;
8249 #endif
8250     case TARGET_NR_openat:
8251         if (!(p = lock_user_string(arg2)))
8252             return -TARGET_EFAULT;
8253         ret = get_errno(do_openat(cpu_env, arg1, p,
8254                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8255                                   arg4));
8256         fd_trans_unregister(ret);
8257         unlock_user(p, arg2, 0);
8258         return ret;
8259 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8260     case TARGET_NR_name_to_handle_at:
8261         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8262         return ret;
8263 #endif
8264 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8265     case TARGET_NR_open_by_handle_at:
8266         ret = do_open_by_handle_at(arg1, arg2, arg3);
8267         fd_trans_unregister(ret);
8268         return ret;
8269 #endif
8270     case TARGET_NR_close:
8271         fd_trans_unregister(arg1);
8272         return get_errno(close(arg1));
8273 
8274     case TARGET_NR_brk:
8275         return do_brk(arg1);
8276 #ifdef TARGET_NR_fork
8277     case TARGET_NR_fork:
8278         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8279 #endif
8280 #ifdef TARGET_NR_waitpid
8281     case TARGET_NR_waitpid:
8282         {
8283             int status;
8284             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8285             if (!is_error(ret) && arg2 && ret
8286                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8287                 return -TARGET_EFAULT;
8288         }
8289         return ret;
8290 #endif
8291 #ifdef TARGET_NR_waitid
8292     case TARGET_NR_waitid:
8293         {
8294             siginfo_t info;
8295             info.si_pid = 0;
8296             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8297             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8298                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8299                     return -TARGET_EFAULT;
8300                 host_to_target_siginfo(p, &info);
8301                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8302             }
8303         }
8304         return ret;
8305 #endif
8306 #ifdef TARGET_NR_creat /* not on alpha */
8307     case TARGET_NR_creat:
8308         if (!(p = lock_user_string(arg1)))
8309             return -TARGET_EFAULT;
8310         ret = get_errno(creat(p, arg2));
8311         fd_trans_unregister(ret);
8312         unlock_user(p, arg1, 0);
8313         return ret;
8314 #endif
8315 #ifdef TARGET_NR_link
8316     case TARGET_NR_link:
8317         {
8318             void * p2;
8319             p = lock_user_string(arg1);
8320             p2 = lock_user_string(arg2);
8321             if (!p || !p2)
8322                 ret = -TARGET_EFAULT;
8323             else
8324                 ret = get_errno(link(p, p2));
8325             unlock_user(p2, arg2, 0);
8326             unlock_user(p, arg1, 0);
8327         }
8328         return ret;
8329 #endif
8330 #if defined(TARGET_NR_linkat)
8331     case TARGET_NR_linkat:
8332         {
8333             void * p2 = NULL;
8334             if (!arg2 || !arg4)
8335                 return -TARGET_EFAULT;
8336             p  = lock_user_string(arg2);
8337             p2 = lock_user_string(arg4);
8338             if (!p || !p2)
8339                 ret = -TARGET_EFAULT;
8340             else
8341                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8342             unlock_user(p, arg2, 0);
8343             unlock_user(p2, arg4, 0);
8344         }
8345         return ret;
8346 #endif
8347 #ifdef TARGET_NR_unlink
8348     case TARGET_NR_unlink:
8349         if (!(p = lock_user_string(arg1)))
8350             return -TARGET_EFAULT;
8351         ret = get_errno(unlink(p));
8352         unlock_user(p, arg1, 0);
8353         return ret;
8354 #endif
8355 #if defined(TARGET_NR_unlinkat)
8356     case TARGET_NR_unlinkat:
8357         if (!(p = lock_user_string(arg2)))
8358             return -TARGET_EFAULT;
8359         ret = get_errno(unlinkat(arg1, p, arg3));
8360         unlock_user(p, arg2, 0);
8361         return ret;
8362 #endif
8363     case TARGET_NR_execve:
8364         {
8365             char **argp, **envp;
8366             int argc, envc;
8367             abi_ulong gp;
8368             abi_ulong guest_argp;
8369             abi_ulong guest_envp;
8370             abi_ulong addr;
8371             char **q;
8372             int total_size = 0;
8373 
8374             argc = 0;
8375             guest_argp = arg2;
8376             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8377                 if (get_user_ual(addr, gp))
8378                     return -TARGET_EFAULT;
8379                 if (!addr)
8380                     break;
8381                 argc++;
8382             }
8383             envc = 0;
8384             guest_envp = arg3;
8385             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8386                 if (get_user_ual(addr, gp))
8387                     return -TARGET_EFAULT;
8388                 if (!addr)
8389                     break;
8390                 envc++;
8391             }
8392 
8393             argp = g_new0(char *, argc + 1);
8394             envp = g_new0(char *, envc + 1);
8395 
8396             for (gp = guest_argp, q = argp; gp;
8397                   gp += sizeof(abi_ulong), q++) {
8398                 if (get_user_ual(addr, gp))
8399                     goto execve_efault;
8400                 if (!addr)
8401                     break;
8402                 if (!(*q = lock_user_string(addr)))
8403                     goto execve_efault;
8404                 total_size += strlen(*q) + 1;
8405             }
8406             *q = NULL;
8407 
8408             for (gp = guest_envp, q = envp; gp;
8409                   gp += sizeof(abi_ulong), q++) {
8410                 if (get_user_ual(addr, gp))
8411                     goto execve_efault;
8412                 if (!addr)
8413                     break;
8414                 if (!(*q = lock_user_string(addr)))
8415                     goto execve_efault;
8416                 total_size += strlen(*q) + 1;
8417             }
8418             *q = NULL;
8419 
8420             if (!(p = lock_user_string(arg1)))
8421                 goto execve_efault;
8422             /* Although execve() is not an interruptible syscall it is
8423              * a special case where we must use the safe_syscall wrapper:
8424              * if we allow a signal to happen before we make the host
8425              * syscall then we will 'lose' it, because at the point of
8426              * execve the process leaves QEMU's control. So we use the
8427              * safe syscall wrapper to ensure that we either take the
8428              * signal as a guest signal, or else it does not happen
8429              * before the execve completes and makes it the other
8430              * program's problem.
8431              */
8432             ret = get_errno(safe_execve(p, argp, envp));
8433             unlock_user(p, arg1, 0);
8434 
8435             goto execve_end;
8436 
8437         execve_efault:
8438             ret = -TARGET_EFAULT;
8439 
8440         execve_end:
8441             for (gp = guest_argp, q = argp; *q;
8442                   gp += sizeof(abi_ulong), q++) {
8443                 if (get_user_ual(addr, gp)
8444                     || !addr)
8445                     break;
8446                 unlock_user(*q, addr, 0);
8447             }
8448             for (gp = guest_envp, q = envp; *q;
8449                   gp += sizeof(abi_ulong), q++) {
8450                 if (get_user_ual(addr, gp)
8451                     || !addr)
8452                     break;
8453                 unlock_user(*q, addr, 0);
8454             }
8455 
8456             g_free(argp);
8457             g_free(envp);
8458         }
8459         return ret;
8460     case TARGET_NR_chdir:
8461         if (!(p = lock_user_string(arg1)))
8462             return -TARGET_EFAULT;
8463         ret = get_errno(chdir(p));
8464         unlock_user(p, arg1, 0);
8465         return ret;
8466 #ifdef TARGET_NR_time
8467     case TARGET_NR_time:
8468         {
8469             time_t host_time;
8470             ret = get_errno(time(&host_time));
8471             if (!is_error(ret)
8472                 && arg1
8473                 && put_user_sal(host_time, arg1))
8474                 return -TARGET_EFAULT;
8475         }
8476         return ret;
8477 #endif
8478 #ifdef TARGET_NR_mknod
8479     case TARGET_NR_mknod:
8480         if (!(p = lock_user_string(arg1)))
8481             return -TARGET_EFAULT;
8482         ret = get_errno(mknod(p, arg2, arg3));
8483         unlock_user(p, arg1, 0);
8484         return ret;
8485 #endif
8486 #if defined(TARGET_NR_mknodat)
8487     case TARGET_NR_mknodat:
8488         if (!(p = lock_user_string(arg2)))
8489             return -TARGET_EFAULT;
8490         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8491         unlock_user(p, arg2, 0);
8492         return ret;
8493 #endif
8494 #ifdef TARGET_NR_chmod
8495     case TARGET_NR_chmod:
8496         if (!(p = lock_user_string(arg1)))
8497             return -TARGET_EFAULT;
8498         ret = get_errno(chmod(p, arg2));
8499         unlock_user(p, arg1, 0);
8500         return ret;
8501 #endif
8502 #ifdef TARGET_NR_lseek
8503     case TARGET_NR_lseek:
8504         return get_errno(lseek(arg1, arg2, arg3));
8505 #endif
8506 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8507     /* Alpha specific */
8508     case TARGET_NR_getxpid:
8509         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8510         return get_errno(getpid());
8511 #endif
8512 #ifdef TARGET_NR_getpid
8513     case TARGET_NR_getpid:
8514         return get_errno(getpid());
8515 #endif
8516     case TARGET_NR_mount:
8517         {
8518             /* need to look at the data field */
8519             void *p2, *p3;
8520 
8521             if (arg1) {
8522                 p = lock_user_string(arg1);
8523                 if (!p) {
8524                     return -TARGET_EFAULT;
8525                 }
8526             } else {
8527                 p = NULL;
8528             }
8529 
8530             p2 = lock_user_string(arg2);
8531             if (!p2) {
8532                 if (arg1) {
8533                     unlock_user(p, arg1, 0);
8534                 }
8535                 return -TARGET_EFAULT;
8536             }
8537 
8538             if (arg3) {
8539                 p3 = lock_user_string(arg3);
8540                 if (!p3) {
8541                     if (arg1) {
8542                         unlock_user(p, arg1, 0);
8543                     }
8544                     unlock_user(p2, arg2, 0);
8545                     return -TARGET_EFAULT;
8546                 }
8547             } else {
8548                 p3 = NULL;
8549             }
8550 
8551             /* FIXME - arg5 should be locked, but it isn't clear how to
8552              * do that since it's not guaranteed to be a NULL-terminated
8553              * string.
8554              */
8555             if (!arg5) {
8556                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8557             } else {
8558                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8559             }
8560             ret = get_errno(ret);
8561 
8562             if (arg1) {
8563                 unlock_user(p, arg1, 0);
8564             }
8565             unlock_user(p2, arg2, 0);
8566             if (arg3) {
8567                 unlock_user(p3, arg3, 0);
8568             }
8569         }
8570         return ret;
8571 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8572 #if defined(TARGET_NR_umount)
8573     case TARGET_NR_umount:
8574 #endif
8575 #if defined(TARGET_NR_oldumount)
8576     case TARGET_NR_oldumount:
8577 #endif
8578         if (!(p = lock_user_string(arg1)))
8579             return -TARGET_EFAULT;
8580         ret = get_errno(umount(p));
8581         unlock_user(p, arg1, 0);
8582         return ret;
8583 #endif
8584 #ifdef TARGET_NR_stime /* not on alpha */
8585     case TARGET_NR_stime:
8586         {
8587             struct timespec ts;
8588             ts.tv_nsec = 0;
8589             if (get_user_sal(ts.tv_sec, arg1)) {
8590                 return -TARGET_EFAULT;
8591             }
8592             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8593         }
8594 #endif
8595 #ifdef TARGET_NR_alarm /* not on alpha */
8596     case TARGET_NR_alarm:
8597         return alarm(arg1);
8598 #endif
8599 #ifdef TARGET_NR_pause /* not on alpha */
8600     case TARGET_NR_pause:
8601         if (!block_signals()) {
8602             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8603         }
8604         return -TARGET_EINTR;
8605 #endif
8606 #ifdef TARGET_NR_utime
8607     case TARGET_NR_utime:
8608         {
8609             struct utimbuf tbuf, *host_tbuf;
8610             struct target_utimbuf *target_tbuf;
8611             if (arg2) {
8612                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8613                     return -TARGET_EFAULT;
8614                 tbuf.actime = tswapal(target_tbuf->actime);
8615                 tbuf.modtime = tswapal(target_tbuf->modtime);
8616                 unlock_user_struct(target_tbuf, arg2, 0);
8617                 host_tbuf = &tbuf;
8618             } else {
8619                 host_tbuf = NULL;
8620             }
8621             if (!(p = lock_user_string(arg1)))
8622                 return -TARGET_EFAULT;
8623             ret = get_errno(utime(p, host_tbuf));
8624             unlock_user(p, arg1, 0);
8625         }
8626         return ret;
8627 #endif
8628 #ifdef TARGET_NR_utimes
8629     case TARGET_NR_utimes:
8630         {
8631             struct timeval *tvp, tv[2];
8632             if (arg2) {
8633                 if (copy_from_user_timeval(&tv[0], arg2)
8634                     || copy_from_user_timeval(&tv[1],
8635                                               arg2 + sizeof(struct target_timeval)))
8636                     return -TARGET_EFAULT;
8637                 tvp = tv;
8638             } else {
8639                 tvp = NULL;
8640             }
8641             if (!(p = lock_user_string(arg1)))
8642                 return -TARGET_EFAULT;
8643             ret = get_errno(utimes(p, tvp));
8644             unlock_user(p, arg1, 0);
8645         }
8646         return ret;
8647 #endif
8648 #if defined(TARGET_NR_futimesat)
8649     case TARGET_NR_futimesat:
8650         {
8651             struct timeval *tvp, tv[2];
8652             if (arg3) {
8653                 if (copy_from_user_timeval(&tv[0], arg3)
8654                     || copy_from_user_timeval(&tv[1],
8655                                               arg3 + sizeof(struct target_timeval)))
8656                     return -TARGET_EFAULT;
8657                 tvp = tv;
8658             } else {
8659                 tvp = NULL;
8660             }
8661             if (!(p = lock_user_string(arg2))) {
8662                 return -TARGET_EFAULT;
8663             }
8664             ret = get_errno(futimesat(arg1, path(p), tvp));
8665             unlock_user(p, arg2, 0);
8666         }
8667         return ret;
8668 #endif
8669 #ifdef TARGET_NR_access
8670     case TARGET_NR_access:
8671         if (!(p = lock_user_string(arg1))) {
8672             return -TARGET_EFAULT;
8673         }
8674         ret = get_errno(access(path(p), arg2));
8675         unlock_user(p, arg1, 0);
8676         return ret;
8677 #endif
8678 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8679     case TARGET_NR_faccessat:
8680         if (!(p = lock_user_string(arg2))) {
8681             return -TARGET_EFAULT;
8682         }
8683         ret = get_errno(faccessat(arg1, p, arg3, 0));
8684         unlock_user(p, arg2, 0);
8685         return ret;
8686 #endif
8687 #ifdef TARGET_NR_nice /* not on alpha */
8688     case TARGET_NR_nice:
8689         return get_errno(nice(arg1));
8690 #endif
8691     case TARGET_NR_sync:
8692         sync();
8693         return 0;
8694 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8695     case TARGET_NR_syncfs:
8696         return get_errno(syncfs(arg1));
8697 #endif
8698     case TARGET_NR_kill:
8699         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8700 #ifdef TARGET_NR_rename
8701     case TARGET_NR_rename:
8702         {
8703             void *p2;
8704             p = lock_user_string(arg1);
8705             p2 = lock_user_string(arg2);
8706             if (!p || !p2)
8707                 ret = -TARGET_EFAULT;
8708             else
8709                 ret = get_errno(rename(p, p2));
8710             unlock_user(p2, arg2, 0);
8711             unlock_user(p, arg1, 0);
8712         }
8713         return ret;
8714 #endif
8715 #if defined(TARGET_NR_renameat)
8716     case TARGET_NR_renameat:
8717         {
8718             void *p2;
8719             p  = lock_user_string(arg2);
8720             p2 = lock_user_string(arg4);
8721             if (!p || !p2)
8722                 ret = -TARGET_EFAULT;
8723             else
8724                 ret = get_errno(renameat(arg1, p, arg3, p2));
8725             unlock_user(p2, arg4, 0);
8726             unlock_user(p, arg2, 0);
8727         }
8728         return ret;
8729 #endif
8730 #if defined(TARGET_NR_renameat2)
8731     case TARGET_NR_renameat2:
8732         {
8733             void *p2;
8734             p  = lock_user_string(arg2);
8735             p2 = lock_user_string(arg4);
8736             if (!p || !p2) {
8737                 ret = -TARGET_EFAULT;
8738             } else {
8739                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8740             }
8741             unlock_user(p2, arg4, 0);
8742             unlock_user(p, arg2, 0);
8743         }
8744         return ret;
8745 #endif
8746 #ifdef TARGET_NR_mkdir
8747     case TARGET_NR_mkdir:
8748         if (!(p = lock_user_string(arg1)))
8749             return -TARGET_EFAULT;
8750         ret = get_errno(mkdir(p, arg2));
8751         unlock_user(p, arg1, 0);
8752         return ret;
8753 #endif
8754 #if defined(TARGET_NR_mkdirat)
8755     case TARGET_NR_mkdirat:
8756         if (!(p = lock_user_string(arg2)))
8757             return -TARGET_EFAULT;
8758         ret = get_errno(mkdirat(arg1, p, arg3));
8759         unlock_user(p, arg2, 0);
8760         return ret;
8761 #endif
8762 #ifdef TARGET_NR_rmdir
8763     case TARGET_NR_rmdir:
8764         if (!(p = lock_user_string(arg1)))
8765             return -TARGET_EFAULT;
8766         ret = get_errno(rmdir(p));
8767         unlock_user(p, arg1, 0);
8768         return ret;
8769 #endif
8770     case TARGET_NR_dup:
8771         ret = get_errno(dup(arg1));
8772         if (ret >= 0) {
8773             fd_trans_dup(arg1, ret);
8774         }
8775         return ret;
8776 #ifdef TARGET_NR_pipe
8777     case TARGET_NR_pipe:
8778         return do_pipe(cpu_env, arg1, 0, 0);
8779 #endif
8780 #ifdef TARGET_NR_pipe2
8781     case TARGET_NR_pipe2:
8782         return do_pipe(cpu_env, arg1,
8783                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8784 #endif
8785     case TARGET_NR_times:
8786         {
8787             struct target_tms *tmsp;
8788             struct tms tms;
8789             ret = get_errno(times(&tms));
8790             if (arg1) {
8791                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8792                 if (!tmsp)
8793                     return -TARGET_EFAULT;
8794                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8795                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8796                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8797                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8798             }
8799             if (!is_error(ret))
8800                 ret = host_to_target_clock_t(ret);
8801         }
8802         return ret;
8803     case TARGET_NR_acct:
8804         if (arg1 == 0) {
8805             ret = get_errno(acct(NULL));
8806         } else {
8807             if (!(p = lock_user_string(arg1))) {
8808                 return -TARGET_EFAULT;
8809             }
8810             ret = get_errno(acct(path(p)));
8811             unlock_user(p, arg1, 0);
8812         }
8813         return ret;
8814 #ifdef TARGET_NR_umount2
8815     case TARGET_NR_umount2:
8816         if (!(p = lock_user_string(arg1)))
8817             return -TARGET_EFAULT;
8818         ret = get_errno(umount2(p, arg2));
8819         unlock_user(p, arg1, 0);
8820         return ret;
8821 #endif
8822     case TARGET_NR_ioctl:
8823         return do_ioctl(arg1, arg2, arg3);
8824 #ifdef TARGET_NR_fcntl
8825     case TARGET_NR_fcntl:
8826         return do_fcntl(arg1, arg2, arg3);
8827 #endif
8828     case TARGET_NR_setpgid:
8829         return get_errno(setpgid(arg1, arg2));
8830     case TARGET_NR_umask:
8831         return get_errno(umask(arg1));
8832     case TARGET_NR_chroot:
8833         if (!(p = lock_user_string(arg1)))
8834             return -TARGET_EFAULT;
8835         ret = get_errno(chroot(p));
8836         unlock_user(p, arg1, 0);
8837         return ret;
8838 #ifdef TARGET_NR_dup2
8839     case TARGET_NR_dup2:
8840         ret = get_errno(dup2(arg1, arg2));
8841         if (ret >= 0) {
8842             fd_trans_dup(arg1, arg2);
8843         }
8844         return ret;
8845 #endif
8846 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8847     case TARGET_NR_dup3:
8848     {
8849         int host_flags;
8850 
8851         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8852             return -EINVAL;
8853         }
8854         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8855         ret = get_errno(dup3(arg1, arg2, host_flags));
8856         if (ret >= 0) {
8857             fd_trans_dup(arg1, arg2);
8858         }
8859         return ret;
8860     }
8861 #endif
8862 #ifdef TARGET_NR_getppid /* not on alpha */
8863     case TARGET_NR_getppid:
8864         return get_errno(getppid());
8865 #endif
8866 #ifdef TARGET_NR_getpgrp
8867     case TARGET_NR_getpgrp:
8868         return get_errno(getpgrp());
8869 #endif
8870     case TARGET_NR_setsid:
8871         return get_errno(setsid());
8872 #ifdef TARGET_NR_sigaction
8873     case TARGET_NR_sigaction:
8874         {
8875 #if defined(TARGET_MIPS)
8876 	    struct target_sigaction act, oact, *pact, *old_act;
8877 
8878 	    if (arg2) {
8879                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8880                     return -TARGET_EFAULT;
8881 		act._sa_handler = old_act->_sa_handler;
8882 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8883 		act.sa_flags = old_act->sa_flags;
8884 		unlock_user_struct(old_act, arg2, 0);
8885 		pact = &act;
8886 	    } else {
8887 		pact = NULL;
8888 	    }
8889 
8890         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8891 
8892 	    if (!is_error(ret) && arg3) {
8893                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8894                     return -TARGET_EFAULT;
8895 		old_act->_sa_handler = oact._sa_handler;
8896 		old_act->sa_flags = oact.sa_flags;
8897 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8898 		old_act->sa_mask.sig[1] = 0;
8899 		old_act->sa_mask.sig[2] = 0;
8900 		old_act->sa_mask.sig[3] = 0;
8901 		unlock_user_struct(old_act, arg3, 1);
8902 	    }
8903 #else
8904             struct target_old_sigaction *old_act;
8905             struct target_sigaction act, oact, *pact;
8906             if (arg2) {
8907                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8908                     return -TARGET_EFAULT;
8909                 act._sa_handler = old_act->_sa_handler;
8910                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8911                 act.sa_flags = old_act->sa_flags;
8912 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8913                 act.sa_restorer = old_act->sa_restorer;
8914 #endif
8915                 unlock_user_struct(old_act, arg2, 0);
8916                 pact = &act;
8917             } else {
8918                 pact = NULL;
8919             }
8920             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8921             if (!is_error(ret) && arg3) {
8922                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8923                     return -TARGET_EFAULT;
8924                 old_act->_sa_handler = oact._sa_handler;
8925                 old_act->sa_mask = oact.sa_mask.sig[0];
8926                 old_act->sa_flags = oact.sa_flags;
8927 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8928                 old_act->sa_restorer = oact.sa_restorer;
8929 #endif
8930                 unlock_user_struct(old_act, arg3, 1);
8931             }
8932 #endif
8933         }
8934         return ret;
8935 #endif
8936     case TARGET_NR_rt_sigaction:
8937         {
8938             /*
8939              * For Alpha and SPARC this is a 5 argument syscall, with
8940              * a 'restorer' parameter which must be copied into the
8941              * sa_restorer field of the sigaction struct.
8942              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8943              * and arg5 is the sigsetsize.
8944              */
8945 #if defined(TARGET_ALPHA)
8946             target_ulong sigsetsize = arg4;
8947             target_ulong restorer = arg5;
8948 #elif defined(TARGET_SPARC)
8949             target_ulong restorer = arg4;
8950             target_ulong sigsetsize = arg5;
8951 #else
8952             target_ulong sigsetsize = arg4;
8953             target_ulong restorer = 0;
8954 #endif
8955             struct target_sigaction *act = NULL;
8956             struct target_sigaction *oact = NULL;
8957 
8958             if (sigsetsize != sizeof(target_sigset_t)) {
8959                 return -TARGET_EINVAL;
8960             }
8961             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8962                 return -TARGET_EFAULT;
8963             }
8964             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8965                 ret = -TARGET_EFAULT;
8966             } else {
8967                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
8968                 if (oact) {
8969                     unlock_user_struct(oact, arg3, 1);
8970                 }
8971             }
8972             if (act) {
8973                 unlock_user_struct(act, arg2, 0);
8974             }
8975         }
8976         return ret;
8977 #ifdef TARGET_NR_sgetmask /* not on alpha */
8978     case TARGET_NR_sgetmask:
8979         {
8980             sigset_t cur_set;
8981             abi_ulong target_set;
8982             ret = do_sigprocmask(0, NULL, &cur_set);
8983             if (!ret) {
8984                 host_to_target_old_sigset(&target_set, &cur_set);
8985                 ret = target_set;
8986             }
8987         }
8988         return ret;
8989 #endif
8990 #ifdef TARGET_NR_ssetmask /* not on alpha */
8991     case TARGET_NR_ssetmask:
8992         {
8993             sigset_t set, oset;
8994             abi_ulong target_set = arg1;
8995             target_to_host_old_sigset(&set, &target_set);
8996             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8997             if (!ret) {
8998                 host_to_target_old_sigset(&target_set, &oset);
8999                 ret = target_set;
9000             }
9001         }
9002         return ret;
9003 #endif
9004 #ifdef TARGET_NR_sigprocmask
9005     case TARGET_NR_sigprocmask:
9006         {
9007 #if defined(TARGET_ALPHA)
9008             sigset_t set, oldset;
9009             abi_ulong mask;
9010             int how;
9011 
9012             switch (arg1) {
9013             case TARGET_SIG_BLOCK:
9014                 how = SIG_BLOCK;
9015                 break;
9016             case TARGET_SIG_UNBLOCK:
9017                 how = SIG_UNBLOCK;
9018                 break;
9019             case TARGET_SIG_SETMASK:
9020                 how = SIG_SETMASK;
9021                 break;
9022             default:
9023                 return -TARGET_EINVAL;
9024             }
9025             mask = arg2;
9026             target_to_host_old_sigset(&set, &mask);
9027 
9028             ret = do_sigprocmask(how, &set, &oldset);
9029             if (!is_error(ret)) {
9030                 host_to_target_old_sigset(&mask, &oldset);
9031                 ret = mask;
9032                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9033             }
9034 #else
9035             sigset_t set, oldset, *set_ptr;
9036             int how;
9037 
9038             if (arg2) {
9039                 switch (arg1) {
9040                 case TARGET_SIG_BLOCK:
9041                     how = SIG_BLOCK;
9042                     break;
9043                 case TARGET_SIG_UNBLOCK:
9044                     how = SIG_UNBLOCK;
9045                     break;
9046                 case TARGET_SIG_SETMASK:
9047                     how = SIG_SETMASK;
9048                     break;
9049                 default:
9050                     return -TARGET_EINVAL;
9051                 }
9052                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9053                     return -TARGET_EFAULT;
9054                 target_to_host_old_sigset(&set, p);
9055                 unlock_user(p, arg2, 0);
9056                 set_ptr = &set;
9057             } else {
9058                 how = 0;
9059                 set_ptr = NULL;
9060             }
9061             ret = do_sigprocmask(how, set_ptr, &oldset);
9062             if (!is_error(ret) && arg3) {
9063                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9064                     return -TARGET_EFAULT;
9065                 host_to_target_old_sigset(p, &oldset);
9066                 unlock_user(p, arg3, sizeof(target_sigset_t));
9067             }
9068 #endif
9069         }
9070         return ret;
9071 #endif
9072     case TARGET_NR_rt_sigprocmask:
9073         {
9074             int how = arg1;
9075             sigset_t set, oldset, *set_ptr;
9076 
9077             if (arg4 != sizeof(target_sigset_t)) {
9078                 return -TARGET_EINVAL;
9079             }
9080 
9081             if (arg2) {
9082                 switch(how) {
9083                 case TARGET_SIG_BLOCK:
9084                     how = SIG_BLOCK;
9085                     break;
9086                 case TARGET_SIG_UNBLOCK:
9087                     how = SIG_UNBLOCK;
9088                     break;
9089                 case TARGET_SIG_SETMASK:
9090                     how = SIG_SETMASK;
9091                     break;
9092                 default:
9093                     return -TARGET_EINVAL;
9094                 }
9095                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9096                     return -TARGET_EFAULT;
9097                 target_to_host_sigset(&set, p);
9098                 unlock_user(p, arg2, 0);
9099                 set_ptr = &set;
9100             } else {
9101                 how = 0;
9102                 set_ptr = NULL;
9103             }
9104             ret = do_sigprocmask(how, set_ptr, &oldset);
9105             if (!is_error(ret) && arg3) {
9106                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9107                     return -TARGET_EFAULT;
9108                 host_to_target_sigset(p, &oldset);
9109                 unlock_user(p, arg3, sizeof(target_sigset_t));
9110             }
9111         }
9112         return ret;
9113 #ifdef TARGET_NR_sigpending
9114     case TARGET_NR_sigpending:
9115         {
9116             sigset_t set;
9117             ret = get_errno(sigpending(&set));
9118             if (!is_error(ret)) {
9119                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9120                     return -TARGET_EFAULT;
9121                 host_to_target_old_sigset(p, &set);
9122                 unlock_user(p, arg1, sizeof(target_sigset_t));
9123             }
9124         }
9125         return ret;
9126 #endif
9127     case TARGET_NR_rt_sigpending:
9128         {
9129             sigset_t set;
9130 
9131             /* Yes, this check is >, not != like most. We follow the kernel's
9132              * logic and it does it like this because it implements
9133              * NR_sigpending through the same code path, and in that case
9134              * the old_sigset_t is smaller in size.
9135              */
9136             if (arg2 > sizeof(target_sigset_t)) {
9137                 return -TARGET_EINVAL;
9138             }
9139 
9140             ret = get_errno(sigpending(&set));
9141             if (!is_error(ret)) {
9142                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9143                     return -TARGET_EFAULT;
9144                 host_to_target_sigset(p, &set);
9145                 unlock_user(p, arg1, sizeof(target_sigset_t));
9146             }
9147         }
9148         return ret;
9149 #ifdef TARGET_NR_sigsuspend
9150     case TARGET_NR_sigsuspend:
9151         {
9152             TaskState *ts = cpu->opaque;
9153 #if defined(TARGET_ALPHA)
9154             abi_ulong mask = arg1;
9155             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9156 #else
9157             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9158                 return -TARGET_EFAULT;
9159             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9160             unlock_user(p, arg1, 0);
9161 #endif
9162             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9163                                                SIGSET_T_SIZE));
9164             if (ret != -TARGET_ERESTARTSYS) {
9165                 ts->in_sigsuspend = 1;
9166             }
9167         }
9168         return ret;
9169 #endif
9170     case TARGET_NR_rt_sigsuspend:
9171         {
9172             TaskState *ts = cpu->opaque;
9173 
9174             if (arg2 != sizeof(target_sigset_t)) {
9175                 return -TARGET_EINVAL;
9176             }
9177             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9178                 return -TARGET_EFAULT;
9179             target_to_host_sigset(&ts->sigsuspend_mask, p);
9180             unlock_user(p, arg1, 0);
9181             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9182                                                SIGSET_T_SIZE));
9183             if (ret != -TARGET_ERESTARTSYS) {
9184                 ts->in_sigsuspend = 1;
9185             }
9186         }
9187         return ret;
9188 #ifdef TARGET_NR_rt_sigtimedwait
9189     case TARGET_NR_rt_sigtimedwait:
9190         {
9191             sigset_t set;
9192             struct timespec uts, *puts;
9193             siginfo_t uinfo;
9194 
9195             if (arg4 != sizeof(target_sigset_t)) {
9196                 return -TARGET_EINVAL;
9197             }
9198 
9199             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9200                 return -TARGET_EFAULT;
9201             target_to_host_sigset(&set, p);
9202             unlock_user(p, arg1, 0);
9203             if (arg3) {
9204                 puts = &uts;
9205                 if (target_to_host_timespec(puts, arg3)) {
9206                     return -TARGET_EFAULT;
9207                 }
9208             } else {
9209                 puts = NULL;
9210             }
9211             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9212                                                  SIGSET_T_SIZE));
9213             if (!is_error(ret)) {
9214                 if (arg2) {
9215                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9216                                   0);
9217                     if (!p) {
9218                         return -TARGET_EFAULT;
9219                     }
9220                     host_to_target_siginfo(p, &uinfo);
9221                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9222                 }
9223                 ret = host_to_target_signal(ret);
9224             }
9225         }
9226         return ret;
9227 #endif
9228 #ifdef TARGET_NR_rt_sigtimedwait_time64
9229     case TARGET_NR_rt_sigtimedwait_time64:
9230         {
9231             sigset_t set;
9232             struct timespec uts, *puts;
9233             siginfo_t uinfo;
9234 
9235             if (arg4 != sizeof(target_sigset_t)) {
9236                 return -TARGET_EINVAL;
9237             }
9238 
9239             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9240             if (!p) {
9241                 return -TARGET_EFAULT;
9242             }
9243             target_to_host_sigset(&set, p);
9244             unlock_user(p, arg1, 0);
9245             if (arg3) {
9246                 puts = &uts;
9247                 if (target_to_host_timespec64(puts, arg3)) {
9248                     return -TARGET_EFAULT;
9249                 }
9250             } else {
9251                 puts = NULL;
9252             }
9253             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9254                                                  SIGSET_T_SIZE));
9255             if (!is_error(ret)) {
9256                 if (arg2) {
9257                     p = lock_user(VERIFY_WRITE, arg2,
9258                                   sizeof(target_siginfo_t), 0);
9259                     if (!p) {
9260                         return -TARGET_EFAULT;
9261                     }
9262                     host_to_target_siginfo(p, &uinfo);
9263                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9264                 }
9265                 ret = host_to_target_signal(ret);
9266             }
9267         }
9268         return ret;
9269 #endif
9270     case TARGET_NR_rt_sigqueueinfo:
9271         {
9272             siginfo_t uinfo;
9273 
9274             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9275             if (!p) {
9276                 return -TARGET_EFAULT;
9277             }
9278             target_to_host_siginfo(&uinfo, p);
9279             unlock_user(p, arg3, 0);
9280             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9281         }
9282         return ret;
9283     case TARGET_NR_rt_tgsigqueueinfo:
9284         {
9285             siginfo_t uinfo;
9286 
9287             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9288             if (!p) {
9289                 return -TARGET_EFAULT;
9290             }
9291             target_to_host_siginfo(&uinfo, p);
9292             unlock_user(p, arg4, 0);
9293             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9294         }
9295         return ret;
9296 #ifdef TARGET_NR_sigreturn
9297     case TARGET_NR_sigreturn:
9298         if (block_signals()) {
9299             return -TARGET_ERESTARTSYS;
9300         }
9301         return do_sigreturn(cpu_env);
9302 #endif
9303     case TARGET_NR_rt_sigreturn:
9304         if (block_signals()) {
9305             return -TARGET_ERESTARTSYS;
9306         }
9307         return do_rt_sigreturn(cpu_env);
9308     case TARGET_NR_sethostname:
9309         if (!(p = lock_user_string(arg1)))
9310             return -TARGET_EFAULT;
9311         ret = get_errno(sethostname(p, arg2));
9312         unlock_user(p, arg1, 0);
9313         return ret;
9314 #ifdef TARGET_NR_setrlimit
9315     case TARGET_NR_setrlimit:
9316         {
9317             int resource = target_to_host_resource(arg1);
9318             struct target_rlimit *target_rlim;
9319             struct rlimit rlim;
9320             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9321                 return -TARGET_EFAULT;
9322             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9323             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9324             unlock_user_struct(target_rlim, arg2, 0);
9325             /*
9326              * If we just passed through resource limit settings for memory then
9327              * they would also apply to QEMU's own allocations, and QEMU will
9328              * crash or hang or die if its allocations fail. Ideally we would
9329              * track the guest allocations in QEMU and apply the limits ourselves.
9330              * For now, just tell the guest the call succeeded but don't actually
9331              * limit anything.
9332              */
9333             if (resource != RLIMIT_AS &&
9334                 resource != RLIMIT_DATA &&
9335                 resource != RLIMIT_STACK) {
9336                 return get_errno(setrlimit(resource, &rlim));
9337             } else {
9338                 return 0;
9339             }
9340         }
9341 #endif
9342 #ifdef TARGET_NR_getrlimit
9343     case TARGET_NR_getrlimit:
9344         {
9345             int resource = target_to_host_resource(arg1);
9346             struct target_rlimit *target_rlim;
9347             struct rlimit rlim;
9348 
9349             ret = get_errno(getrlimit(resource, &rlim));
9350             if (!is_error(ret)) {
9351                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9352                     return -TARGET_EFAULT;
9353                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9354                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9355                 unlock_user_struct(target_rlim, arg2, 1);
9356             }
9357         }
9358         return ret;
9359 #endif
9360     case TARGET_NR_getrusage:
9361         {
9362             struct rusage rusage;
9363             ret = get_errno(getrusage(arg1, &rusage));
9364             if (!is_error(ret)) {
9365                 ret = host_to_target_rusage(arg2, &rusage);
9366             }
9367         }
9368         return ret;
9369 #if defined(TARGET_NR_gettimeofday)
9370     case TARGET_NR_gettimeofday:
9371         {
9372             struct timeval tv;
9373             struct timezone tz;
9374 
9375             ret = get_errno(gettimeofday(&tv, &tz));
9376             if (!is_error(ret)) {
9377                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9378                     return -TARGET_EFAULT;
9379                 }
9380                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9381                     return -TARGET_EFAULT;
9382                 }
9383             }
9384         }
9385         return ret;
9386 #endif
9387 #if defined(TARGET_NR_settimeofday)
9388     case TARGET_NR_settimeofday:
9389         {
9390             struct timeval tv, *ptv = NULL;
9391             struct timezone tz, *ptz = NULL;
9392 
9393             if (arg1) {
9394                 if (copy_from_user_timeval(&tv, arg1)) {
9395                     return -TARGET_EFAULT;
9396                 }
9397                 ptv = &tv;
9398             }
9399 
9400             if (arg2) {
9401                 if (copy_from_user_timezone(&tz, arg2)) {
9402                     return -TARGET_EFAULT;
9403                 }
9404                 ptz = &tz;
9405             }
9406 
9407             return get_errno(settimeofday(ptv, ptz));
9408         }
9409 #endif
9410 #if defined(TARGET_NR_select)
9411     case TARGET_NR_select:
9412 #if defined(TARGET_WANT_NI_OLD_SELECT)
9413         /* some architectures used to have old_select here
9414          * but now ENOSYS it.
9415          */
9416         ret = -TARGET_ENOSYS;
9417 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9418         ret = do_old_select(arg1);
9419 #else
9420         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9421 #endif
9422         return ret;
9423 #endif
9424 #ifdef TARGET_NR_pselect6
9425     case TARGET_NR_pselect6:
9426         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9427 #endif
9428 #ifdef TARGET_NR_pselect6_time64
9429     case TARGET_NR_pselect6_time64:
9430         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9431 #endif
9432 #ifdef TARGET_NR_symlink
9433     case TARGET_NR_symlink:
9434         {
9435             void *p2;
9436             p = lock_user_string(arg1);
9437             p2 = lock_user_string(arg2);
9438             if (!p || !p2)
9439                 ret = -TARGET_EFAULT;
9440             else
9441                 ret = get_errno(symlink(p, p2));
9442             unlock_user(p2, arg2, 0);
9443             unlock_user(p, arg1, 0);
9444         }
9445         return ret;
9446 #endif
9447 #if defined(TARGET_NR_symlinkat)
9448     case TARGET_NR_symlinkat:
9449         {
9450             void *p2;
9451             p  = lock_user_string(arg1);
9452             p2 = lock_user_string(arg3);
9453             if (!p || !p2)
9454                 ret = -TARGET_EFAULT;
9455             else
9456                 ret = get_errno(symlinkat(p, arg2, p2));
9457             unlock_user(p2, arg3, 0);
9458             unlock_user(p, arg1, 0);
9459         }
9460         return ret;
9461 #endif
9462 #ifdef TARGET_NR_readlink
9463     case TARGET_NR_readlink:
9464         {
9465             void *p2;
9466             p = lock_user_string(arg1);
9467             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9468             if (!p || !p2) {
9469                 ret = -TARGET_EFAULT;
9470             } else if (!arg3) {
9471                 /* Short circuit this for the magic exe check. */
9472                 ret = -TARGET_EINVAL;
9473             } else if (is_proc_myself((const char *)p, "exe")) {
9474                 char real[PATH_MAX], *temp;
9475                 temp = realpath(exec_path, real);
9476                 /* Return value is # of bytes that we wrote to the buffer. */
9477                 if (temp == NULL) {
9478                     ret = get_errno(-1);
9479                 } else {
9480                     /* Don't worry about sign mismatch as earlier mapping
9481                      * logic would have thrown a bad address error. */
9482                     ret = MIN(strlen(real), arg3);
9483                     /* We cannot NUL terminate the string. */
9484                     memcpy(p2, real, ret);
9485                 }
9486             } else {
9487                 ret = get_errno(readlink(path(p), p2, arg3));
9488             }
9489             unlock_user(p2, arg2, ret);
9490             unlock_user(p, arg1, 0);
9491         }
9492         return ret;
9493 #endif
9494 #if defined(TARGET_NR_readlinkat)
9495     case TARGET_NR_readlinkat:
9496         {
9497             void *p2;
9498             p  = lock_user_string(arg2);
9499             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9500             if (!p || !p2) {
9501                 ret = -TARGET_EFAULT;
9502             } else if (is_proc_myself((const char *)p, "exe")) {
9503                 char real[PATH_MAX], *temp;
9504                 temp = realpath(exec_path, real);
9505                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9506                 snprintf((char *)p2, arg4, "%s", real);
9507             } else {
9508                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9509             }
9510             unlock_user(p2, arg3, ret);
9511             unlock_user(p, arg2, 0);
9512         }
9513         return ret;
9514 #endif
9515 #ifdef TARGET_NR_swapon
9516     case TARGET_NR_swapon:
9517         if (!(p = lock_user_string(arg1)))
9518             return -TARGET_EFAULT;
9519         ret = get_errno(swapon(p, arg2));
9520         unlock_user(p, arg1, 0);
9521         return ret;
9522 #endif
9523     case TARGET_NR_reboot:
9524         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9525            /* arg4 must be ignored in all other cases */
9526            p = lock_user_string(arg4);
9527            if (!p) {
9528                return -TARGET_EFAULT;
9529            }
9530            ret = get_errno(reboot(arg1, arg2, arg3, p));
9531            unlock_user(p, arg4, 0);
9532         } else {
9533            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9534         }
9535         return ret;
9536 #ifdef TARGET_NR_mmap
9537     case TARGET_NR_mmap:
9538 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9539     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9540     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9541     || defined(TARGET_S390X)
9542         {
9543             abi_ulong *v;
9544             abi_ulong v1, v2, v3, v4, v5, v6;
9545             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9546                 return -TARGET_EFAULT;
9547             v1 = tswapal(v[0]);
9548             v2 = tswapal(v[1]);
9549             v3 = tswapal(v[2]);
9550             v4 = tswapal(v[3]);
9551             v5 = tswapal(v[4]);
9552             v6 = tswapal(v[5]);
9553             unlock_user(v, arg1, 0);
9554             ret = get_errno(target_mmap(v1, v2, v3,
9555                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9556                                         v5, v6));
9557         }
9558 #else
9559         /* mmap pointers are always untagged */
9560         ret = get_errno(target_mmap(arg1, arg2, arg3,
9561                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9562                                     arg5,
9563                                     arg6));
9564 #endif
9565         return ret;
9566 #endif
9567 #ifdef TARGET_NR_mmap2
9568     case TARGET_NR_mmap2:
9569 #ifndef MMAP_SHIFT
9570 #define MMAP_SHIFT 12
9571 #endif
9572         ret = target_mmap(arg1, arg2, arg3,
9573                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9574                           arg5, arg6 << MMAP_SHIFT);
9575         return get_errno(ret);
9576 #endif
9577     case TARGET_NR_munmap:
9578         arg1 = cpu_untagged_addr(cpu, arg1);
9579         return get_errno(target_munmap(arg1, arg2));
9580     case TARGET_NR_mprotect:
9581         arg1 = cpu_untagged_addr(cpu, arg1);
9582         {
9583             TaskState *ts = cpu->opaque;
9584             /* Special hack to detect libc making the stack executable.  */
9585             if ((arg3 & PROT_GROWSDOWN)
9586                 && arg1 >= ts->info->stack_limit
9587                 && arg1 <= ts->info->start_stack) {
9588                 arg3 &= ~PROT_GROWSDOWN;
9589                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9590                 arg1 = ts->info->stack_limit;
9591             }
9592         }
9593         return get_errno(target_mprotect(arg1, arg2, arg3));
9594 #ifdef TARGET_NR_mremap
9595     case TARGET_NR_mremap:
9596         arg1 = cpu_untagged_addr(cpu, arg1);
9597         /* mremap new_addr (arg5) is always untagged */
9598         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9599 #endif
9600         /* ??? msync/mlock/munlock are broken for softmmu.  */
9601 #ifdef TARGET_NR_msync
9602     case TARGET_NR_msync:
9603         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9604 #endif
9605 #ifdef TARGET_NR_mlock
9606     case TARGET_NR_mlock:
9607         return get_errno(mlock(g2h(cpu, arg1), arg2));
9608 #endif
9609 #ifdef TARGET_NR_munlock
9610     case TARGET_NR_munlock:
9611         return get_errno(munlock(g2h(cpu, arg1), arg2));
9612 #endif
9613 #ifdef TARGET_NR_mlockall
9614     case TARGET_NR_mlockall:
9615         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9616 #endif
9617 #ifdef TARGET_NR_munlockall
9618     case TARGET_NR_munlockall:
9619         return get_errno(munlockall());
9620 #endif
9621 #ifdef TARGET_NR_truncate
9622     case TARGET_NR_truncate:
9623         if (!(p = lock_user_string(arg1)))
9624             return -TARGET_EFAULT;
9625         ret = get_errno(truncate(p, arg2));
9626         unlock_user(p, arg1, 0);
9627         return ret;
9628 #endif
9629 #ifdef TARGET_NR_ftruncate
9630     case TARGET_NR_ftruncate:
9631         return get_errno(ftruncate(arg1, arg2));
9632 #endif
9633     case TARGET_NR_fchmod:
9634         return get_errno(fchmod(arg1, arg2));
9635 #if defined(TARGET_NR_fchmodat)
9636     case TARGET_NR_fchmodat:
9637         if (!(p = lock_user_string(arg2)))
9638             return -TARGET_EFAULT;
9639         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9640         unlock_user(p, arg2, 0);
9641         return ret;
9642 #endif
9643     case TARGET_NR_getpriority:
9644         /* Note that negative values are valid for getpriority, so we must
9645            differentiate based on errno settings.  */
9646         errno = 0;
9647         ret = getpriority(arg1, arg2);
9648         if (ret == -1 && errno != 0) {
9649             return -host_to_target_errno(errno);
9650         }
9651 #ifdef TARGET_ALPHA
9652         /* Return value is the unbiased priority.  Signal no error.  */
9653         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9654 #else
9655         /* Return value is a biased priority to avoid negative numbers.  */
9656         ret = 20 - ret;
9657 #endif
9658         return ret;
9659     case TARGET_NR_setpriority:
9660         return get_errno(setpriority(arg1, arg2, arg3));
9661 #ifdef TARGET_NR_statfs
9662     case TARGET_NR_statfs:
9663         if (!(p = lock_user_string(arg1))) {
9664             return -TARGET_EFAULT;
9665         }
9666         ret = get_errno(statfs(path(p), &stfs));
9667         unlock_user(p, arg1, 0);
9668     convert_statfs:
9669         if (!is_error(ret)) {
9670             struct target_statfs *target_stfs;
9671 
9672             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9673                 return -TARGET_EFAULT;
9674             __put_user(stfs.f_type, &target_stfs->f_type);
9675             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9676             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9677             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9678             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9679             __put_user(stfs.f_files, &target_stfs->f_files);
9680             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9681             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9682             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9683             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9684             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9685 #ifdef _STATFS_F_FLAGS
9686             __put_user(stfs.f_flags, &target_stfs->f_flags);
9687 #else
9688             __put_user(0, &target_stfs->f_flags);
9689 #endif
9690             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9691             unlock_user_struct(target_stfs, arg2, 1);
9692         }
9693         return ret;
9694 #endif
9695 #ifdef TARGET_NR_fstatfs
9696     case TARGET_NR_fstatfs:
9697         ret = get_errno(fstatfs(arg1, &stfs));
9698         goto convert_statfs;
9699 #endif
9700 #ifdef TARGET_NR_statfs64
9701     case TARGET_NR_statfs64:
9702         if (!(p = lock_user_string(arg1))) {
9703             return -TARGET_EFAULT;
9704         }
9705         ret = get_errno(statfs(path(p), &stfs));
9706         unlock_user(p, arg1, 0);
9707     convert_statfs64:
9708         if (!is_error(ret)) {
9709             struct target_statfs64 *target_stfs;
9710 
9711             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9712                 return -TARGET_EFAULT;
9713             __put_user(stfs.f_type, &target_stfs->f_type);
9714             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9715             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9716             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9717             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9718             __put_user(stfs.f_files, &target_stfs->f_files);
9719             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9720             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9721             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9722             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9723             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9724 #ifdef _STATFS_F_FLAGS
9725             __put_user(stfs.f_flags, &target_stfs->f_flags);
9726 #else
9727             __put_user(0, &target_stfs->f_flags);
9728 #endif
9729             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9730             unlock_user_struct(target_stfs, arg3, 1);
9731         }
9732         return ret;
9733     case TARGET_NR_fstatfs64:
9734         ret = get_errno(fstatfs(arg1, &stfs));
9735         goto convert_statfs64;
9736 #endif
9737 #ifdef TARGET_NR_socketcall
9738     case TARGET_NR_socketcall:
9739         return do_socketcall(arg1, arg2);
9740 #endif
9741 #ifdef TARGET_NR_accept
9742     case TARGET_NR_accept:
9743         return do_accept4(arg1, arg2, arg3, 0);
9744 #endif
9745 #ifdef TARGET_NR_accept4
9746     case TARGET_NR_accept4:
9747         return do_accept4(arg1, arg2, arg3, arg4);
9748 #endif
9749 #ifdef TARGET_NR_bind
9750     case TARGET_NR_bind:
9751         return do_bind(arg1, arg2, arg3);
9752 #endif
9753 #ifdef TARGET_NR_connect
9754     case TARGET_NR_connect:
9755         return do_connect(arg1, arg2, arg3);
9756 #endif
9757 #ifdef TARGET_NR_getpeername
9758     case TARGET_NR_getpeername:
9759         return do_getpeername(arg1, arg2, arg3);
9760 #endif
9761 #ifdef TARGET_NR_getsockname
9762     case TARGET_NR_getsockname:
9763         return do_getsockname(arg1, arg2, arg3);
9764 #endif
9765 #ifdef TARGET_NR_getsockopt
9766     case TARGET_NR_getsockopt:
9767         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9768 #endif
9769 #ifdef TARGET_NR_listen
9770     case TARGET_NR_listen:
9771         return get_errno(listen(arg1, arg2));
9772 #endif
9773 #ifdef TARGET_NR_recv
9774     case TARGET_NR_recv:
9775         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9776 #endif
9777 #ifdef TARGET_NR_recvfrom
9778     case TARGET_NR_recvfrom:
9779         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9780 #endif
9781 #ifdef TARGET_NR_recvmsg
9782     case TARGET_NR_recvmsg:
9783         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9784 #endif
9785 #ifdef TARGET_NR_send
9786     case TARGET_NR_send:
9787         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9788 #endif
9789 #ifdef TARGET_NR_sendmsg
9790     case TARGET_NR_sendmsg:
9791         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9792 #endif
9793 #ifdef TARGET_NR_sendmmsg
9794     case TARGET_NR_sendmmsg:
9795         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9796 #endif
9797 #ifdef TARGET_NR_recvmmsg
9798     case TARGET_NR_recvmmsg:
9799         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9800 #endif
9801 #ifdef TARGET_NR_sendto
9802     case TARGET_NR_sendto:
9803         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9804 #endif
9805 #ifdef TARGET_NR_shutdown
9806     case TARGET_NR_shutdown:
9807         return get_errno(shutdown(arg1, arg2));
9808 #endif
9809 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9810     case TARGET_NR_getrandom:
9811         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9812         if (!p) {
9813             return -TARGET_EFAULT;
9814         }
9815         ret = get_errno(getrandom(p, arg2, arg3));
9816         unlock_user(p, arg1, ret);
9817         return ret;
9818 #endif
9819 #ifdef TARGET_NR_socket
9820     case TARGET_NR_socket:
9821         return do_socket(arg1, arg2, arg3);
9822 #endif
9823 #ifdef TARGET_NR_socketpair
9824     case TARGET_NR_socketpair:
9825         return do_socketpair(arg1, arg2, arg3, arg4);
9826 #endif
9827 #ifdef TARGET_NR_setsockopt
9828     case TARGET_NR_setsockopt:
9829         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9830 #endif
9831 #if defined(TARGET_NR_syslog)
9832     case TARGET_NR_syslog:
9833         {
9834             int len = arg2;
9835 
9836             switch (arg1) {
9837             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9838             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9839             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9840             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9841             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9842             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9843             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9844             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9845                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9846             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9847             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9848             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9849                 {
9850                     if (len < 0) {
9851                         return -TARGET_EINVAL;
9852                     }
9853                     if (len == 0) {
9854                         return 0;
9855                     }
9856                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9857                     if (!p) {
9858                         return -TARGET_EFAULT;
9859                     }
9860                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9861                     unlock_user(p, arg2, arg3);
9862                 }
9863                 return ret;
9864             default:
9865                 return -TARGET_EINVAL;
9866             }
9867         }
9868         break;
9869 #endif
9870     case TARGET_NR_setitimer:
9871         {
9872             struct itimerval value, ovalue, *pvalue;
9873 
9874             if (arg2) {
9875                 pvalue = &value;
9876                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9877                     || copy_from_user_timeval(&pvalue->it_value,
9878                                               arg2 + sizeof(struct target_timeval)))
9879                     return -TARGET_EFAULT;
9880             } else {
9881                 pvalue = NULL;
9882             }
9883             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9884             if (!is_error(ret) && arg3) {
9885                 if (copy_to_user_timeval(arg3,
9886                                          &ovalue.it_interval)
9887                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9888                                             &ovalue.it_value))
9889                     return -TARGET_EFAULT;
9890             }
9891         }
9892         return ret;
9893     case TARGET_NR_getitimer:
9894         {
9895             struct itimerval value;
9896 
9897             ret = get_errno(getitimer(arg1, &value));
9898             if (!is_error(ret) && arg2) {
9899                 if (copy_to_user_timeval(arg2,
9900                                          &value.it_interval)
9901                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9902                                             &value.it_value))
9903                     return -TARGET_EFAULT;
9904             }
9905         }
9906         return ret;
9907 #ifdef TARGET_NR_stat
9908     case TARGET_NR_stat:
9909         if (!(p = lock_user_string(arg1))) {
9910             return -TARGET_EFAULT;
9911         }
9912         ret = get_errno(stat(path(p), &st));
9913         unlock_user(p, arg1, 0);
9914         goto do_stat;
9915 #endif
9916 #ifdef TARGET_NR_lstat
9917     case TARGET_NR_lstat:
9918         if (!(p = lock_user_string(arg1))) {
9919             return -TARGET_EFAULT;
9920         }
9921         ret = get_errno(lstat(path(p), &st));
9922         unlock_user(p, arg1, 0);
9923         goto do_stat;
9924 #endif
9925 #ifdef TARGET_NR_fstat
9926     case TARGET_NR_fstat:
9927         {
9928             ret = get_errno(fstat(arg1, &st));
9929 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9930         do_stat:
9931 #endif
9932             if (!is_error(ret)) {
9933                 struct target_stat *target_st;
9934 
9935                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9936                     return -TARGET_EFAULT;
9937                 memset(target_st, 0, sizeof(*target_st));
9938                 __put_user(st.st_dev, &target_st->st_dev);
9939                 __put_user(st.st_ino, &target_st->st_ino);
9940                 __put_user(st.st_mode, &target_st->st_mode);
9941                 __put_user(st.st_uid, &target_st->st_uid);
9942                 __put_user(st.st_gid, &target_st->st_gid);
9943                 __put_user(st.st_nlink, &target_st->st_nlink);
9944                 __put_user(st.st_rdev, &target_st->st_rdev);
9945                 __put_user(st.st_size, &target_st->st_size);
9946                 __put_user(st.st_blksize, &target_st->st_blksize);
9947                 __put_user(st.st_blocks, &target_st->st_blocks);
9948                 __put_user(st.st_atime, &target_st->target_st_atime);
9949                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9950                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9951 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9952                 __put_user(st.st_atim.tv_nsec,
9953                            &target_st->target_st_atime_nsec);
9954                 __put_user(st.st_mtim.tv_nsec,
9955                            &target_st->target_st_mtime_nsec);
9956                 __put_user(st.st_ctim.tv_nsec,
9957                            &target_st->target_st_ctime_nsec);
9958 #endif
9959                 unlock_user_struct(target_st, arg2, 1);
9960             }
9961         }
9962         return ret;
9963 #endif
9964     case TARGET_NR_vhangup:
9965         return get_errno(vhangup());
9966 #ifdef TARGET_NR_syscall
9967     case TARGET_NR_syscall:
9968         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9969                           arg6, arg7, arg8, 0);
9970 #endif
9971 #if defined(TARGET_NR_wait4)
9972     case TARGET_NR_wait4:
9973         {
9974             int status;
9975             abi_long status_ptr = arg2;
9976             struct rusage rusage, *rusage_ptr;
9977             abi_ulong target_rusage = arg4;
9978             abi_long rusage_err;
9979             if (target_rusage)
9980                 rusage_ptr = &rusage;
9981             else
9982                 rusage_ptr = NULL;
9983             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9984             if (!is_error(ret)) {
9985                 if (status_ptr && ret) {
9986                     status = host_to_target_waitstatus(status);
9987                     if (put_user_s32(status, status_ptr))
9988                         return -TARGET_EFAULT;
9989                 }
9990                 if (target_rusage) {
9991                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9992                     if (rusage_err) {
9993                         ret = rusage_err;
9994                     }
9995                 }
9996             }
9997         }
9998         return ret;
9999 #endif
10000 #ifdef TARGET_NR_swapoff
10001     case TARGET_NR_swapoff:
10002         if (!(p = lock_user_string(arg1)))
10003             return -TARGET_EFAULT;
10004         ret = get_errno(swapoff(p));
10005         unlock_user(p, arg1, 0);
10006         return ret;
10007 #endif
10008     case TARGET_NR_sysinfo:
10009         {
10010             struct target_sysinfo *target_value;
10011             struct sysinfo value;
10012             ret = get_errno(sysinfo(&value));
10013             if (!is_error(ret) && arg1)
10014             {
10015                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10016                     return -TARGET_EFAULT;
10017                 __put_user(value.uptime, &target_value->uptime);
10018                 __put_user(value.loads[0], &target_value->loads[0]);
10019                 __put_user(value.loads[1], &target_value->loads[1]);
10020                 __put_user(value.loads[2], &target_value->loads[2]);
10021                 __put_user(value.totalram, &target_value->totalram);
10022                 __put_user(value.freeram, &target_value->freeram);
10023                 __put_user(value.sharedram, &target_value->sharedram);
10024                 __put_user(value.bufferram, &target_value->bufferram);
10025                 __put_user(value.totalswap, &target_value->totalswap);
10026                 __put_user(value.freeswap, &target_value->freeswap);
10027                 __put_user(value.procs, &target_value->procs);
10028                 __put_user(value.totalhigh, &target_value->totalhigh);
10029                 __put_user(value.freehigh, &target_value->freehigh);
10030                 __put_user(value.mem_unit, &target_value->mem_unit);
10031                 unlock_user_struct(target_value, arg1, 1);
10032             }
10033         }
10034         return ret;
10035 #ifdef TARGET_NR_ipc
10036     case TARGET_NR_ipc:
10037         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10038 #endif
10039 #ifdef TARGET_NR_semget
10040     case TARGET_NR_semget:
10041         return get_errno(semget(arg1, arg2, arg3));
10042 #endif
10043 #ifdef TARGET_NR_semop
10044     case TARGET_NR_semop:
10045         return do_semtimedop(arg1, arg2, arg3, 0, false);
10046 #endif
10047 #ifdef TARGET_NR_semtimedop
10048     case TARGET_NR_semtimedop:
10049         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10050 #endif
10051 #ifdef TARGET_NR_semtimedop_time64
10052     case TARGET_NR_semtimedop_time64:
10053         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10054 #endif
10055 #ifdef TARGET_NR_semctl
10056     case TARGET_NR_semctl:
10057         return do_semctl(arg1, arg2, arg3, arg4);
10058 #endif
10059 #ifdef TARGET_NR_msgctl
10060     case TARGET_NR_msgctl:
10061         return do_msgctl(arg1, arg2, arg3);
10062 #endif
10063 #ifdef TARGET_NR_msgget
10064     case TARGET_NR_msgget:
10065         return get_errno(msgget(arg1, arg2));
10066 #endif
10067 #ifdef TARGET_NR_msgrcv
10068     case TARGET_NR_msgrcv:
10069         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10070 #endif
10071 #ifdef TARGET_NR_msgsnd
10072     case TARGET_NR_msgsnd:
10073         return do_msgsnd(arg1, arg2, arg3, arg4);
10074 #endif
10075 #ifdef TARGET_NR_shmget
10076     case TARGET_NR_shmget:
10077         return get_errno(shmget(arg1, arg2, arg3));
10078 #endif
10079 #ifdef TARGET_NR_shmctl
10080     case TARGET_NR_shmctl:
10081         return do_shmctl(arg1, arg2, arg3);
10082 #endif
10083 #ifdef TARGET_NR_shmat
10084     case TARGET_NR_shmat:
10085         return do_shmat(cpu_env, arg1, arg2, arg3);
10086 #endif
10087 #ifdef TARGET_NR_shmdt
10088     case TARGET_NR_shmdt:
10089         return do_shmdt(arg1);
10090 #endif
10091     case TARGET_NR_fsync:
10092         return get_errno(fsync(arg1));
10093     case TARGET_NR_clone:
10094         /* Linux manages to have three different orderings for its
10095          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10096          * match the kernel's CONFIG_CLONE_* settings.
10097          * Microblaze is further special in that it uses a sixth
10098          * implicit argument to clone for the TLS pointer.
10099          */
10100 #if defined(TARGET_MICROBLAZE)
10101         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10102 #elif defined(TARGET_CLONE_BACKWARDS)
10103         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10104 #elif defined(TARGET_CLONE_BACKWARDS2)
10105         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10106 #else
10107         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10108 #endif
10109         return ret;
10110 #ifdef __NR_exit_group
10111         /* new thread calls */
10112     case TARGET_NR_exit_group:
10113         preexit_cleanup(cpu_env, arg1);
10114         return get_errno(exit_group(arg1));
10115 #endif
10116     case TARGET_NR_setdomainname:
10117         if (!(p = lock_user_string(arg1)))
10118             return -TARGET_EFAULT;
10119         ret = get_errno(setdomainname(p, arg2));
10120         unlock_user(p, arg1, 0);
10121         return ret;
10122     case TARGET_NR_uname:
10123         /* no need to transcode because we use the linux syscall */
10124         {
10125             struct new_utsname * buf;
10126 
10127             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10128                 return -TARGET_EFAULT;
10129             ret = get_errno(sys_uname(buf));
10130             if (!is_error(ret)) {
10131                 /* Overwrite the native machine name with whatever is being
10132                    emulated. */
10133                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10134                           sizeof(buf->machine));
10135                 /* Allow the user to override the reported release.  */
10136                 if (qemu_uname_release && *qemu_uname_release) {
10137                     g_strlcpy(buf->release, qemu_uname_release,
10138                               sizeof(buf->release));
10139                 }
10140             }
10141             unlock_user_struct(buf, arg1, 1);
10142         }
10143         return ret;
10144 #ifdef TARGET_I386
10145     case TARGET_NR_modify_ldt:
10146         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10147 #if !defined(TARGET_X86_64)
10148     case TARGET_NR_vm86:
10149         return do_vm86(cpu_env, arg1, arg2);
10150 #endif
10151 #endif
10152 #if defined(TARGET_NR_adjtimex)
10153     case TARGET_NR_adjtimex:
10154         {
10155             struct timex host_buf;
10156 
10157             if (target_to_host_timex(&host_buf, arg1) != 0) {
10158                 return -TARGET_EFAULT;
10159             }
10160             ret = get_errno(adjtimex(&host_buf));
10161             if (!is_error(ret)) {
10162                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10163                     return -TARGET_EFAULT;
10164                 }
10165             }
10166         }
10167         return ret;
10168 #endif
10169 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10170     case TARGET_NR_clock_adjtime:
10171         {
10172             struct timex htx, *phtx = &htx;
10173 
10174             if (target_to_host_timex(phtx, arg2) != 0) {
10175                 return -TARGET_EFAULT;
10176             }
10177             ret = get_errno(clock_adjtime(arg1, phtx));
10178             if (!is_error(ret) && phtx) {
10179                 if (host_to_target_timex(arg2, phtx) != 0) {
10180                     return -TARGET_EFAULT;
10181                 }
10182             }
10183         }
10184         return ret;
10185 #endif
10186 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10187     case TARGET_NR_clock_adjtime64:
10188         {
10189             struct timex htx;
10190 
10191             if (target_to_host_timex64(&htx, arg2) != 0) {
10192                 return -TARGET_EFAULT;
10193             }
10194             ret = get_errno(clock_adjtime(arg1, &htx));
10195             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10196                     return -TARGET_EFAULT;
10197             }
10198         }
10199         return ret;
10200 #endif
10201     case TARGET_NR_getpgid:
10202         return get_errno(getpgid(arg1));
10203     case TARGET_NR_fchdir:
10204         return get_errno(fchdir(arg1));
10205     case TARGET_NR_personality:
10206         return get_errno(personality(arg1));
10207 #ifdef TARGET_NR__llseek /* Not on alpha */
10208     case TARGET_NR__llseek:
10209         {
10210             int64_t res;
10211 #if !defined(__NR_llseek)
10212             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10213             if (res == -1) {
10214                 ret = get_errno(res);
10215             } else {
10216                 ret = 0;
10217             }
10218 #else
10219             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10220 #endif
10221             if ((ret == 0) && put_user_s64(res, arg4)) {
10222                 return -TARGET_EFAULT;
10223             }
10224         }
10225         return ret;
10226 #endif
10227 #ifdef TARGET_NR_getdents
10228     case TARGET_NR_getdents:
10229 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10230 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10231         {
10232             struct target_dirent *target_dirp;
10233             struct linux_dirent *dirp;
10234             abi_long count = arg3;
10235 
10236             dirp = g_try_malloc(count);
10237             if (!dirp) {
10238                 return -TARGET_ENOMEM;
10239             }
10240 
10241             ret = get_errno(sys_getdents(arg1, dirp, count));
10242             if (!is_error(ret)) {
10243                 struct linux_dirent *de;
10244 		struct target_dirent *tde;
10245                 int len = ret;
10246                 int reclen, treclen;
10247 		int count1, tnamelen;
10248 
10249 		count1 = 0;
10250                 de = dirp;
10251                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10252                     return -TARGET_EFAULT;
10253 		tde = target_dirp;
10254                 while (len > 0) {
10255                     reclen = de->d_reclen;
10256                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10257                     assert(tnamelen >= 0);
10258                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10259                     assert(count1 + treclen <= count);
10260                     tde->d_reclen = tswap16(treclen);
10261                     tde->d_ino = tswapal(de->d_ino);
10262                     tde->d_off = tswapal(de->d_off);
10263                     memcpy(tde->d_name, de->d_name, tnamelen);
10264                     de = (struct linux_dirent *)((char *)de + reclen);
10265                     len -= reclen;
10266                     tde = (struct target_dirent *)((char *)tde + treclen);
10267 		    count1 += treclen;
10268                 }
10269 		ret = count1;
10270                 unlock_user(target_dirp, arg2, ret);
10271             }
10272             g_free(dirp);
10273         }
10274 #else
10275         {
10276             struct linux_dirent *dirp;
10277             abi_long count = arg3;
10278 
10279             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10280                 return -TARGET_EFAULT;
10281             ret = get_errno(sys_getdents(arg1, dirp, count));
10282             if (!is_error(ret)) {
10283                 struct linux_dirent *de;
10284                 int len = ret;
10285                 int reclen;
10286                 de = dirp;
10287                 while (len > 0) {
10288                     reclen = de->d_reclen;
10289                     if (reclen > len)
10290                         break;
10291                     de->d_reclen = tswap16(reclen);
10292                     tswapls(&de->d_ino);
10293                     tswapls(&de->d_off);
10294                     de = (struct linux_dirent *)((char *)de + reclen);
10295                     len -= reclen;
10296                 }
10297             }
10298             unlock_user(dirp, arg2, ret);
10299         }
10300 #endif
10301 #else
10302         /* Implement getdents in terms of getdents64 */
10303         {
10304             struct linux_dirent64 *dirp;
10305             abi_long count = arg3;
10306 
10307             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10308             if (!dirp) {
10309                 return -TARGET_EFAULT;
10310             }
10311             ret = get_errno(sys_getdents64(arg1, dirp, count));
10312             if (!is_error(ret)) {
10313                 /* Convert the dirent64 structs to target dirent.  We do this
10314                  * in-place, since we can guarantee that a target_dirent is no
10315                  * larger than a dirent64; however this means we have to be
10316                  * careful to read everything before writing in the new format.
10317                  */
10318                 struct linux_dirent64 *de;
10319                 struct target_dirent *tde;
10320                 int len = ret;
10321                 int tlen = 0;
10322 
10323                 de = dirp;
10324                 tde = (struct target_dirent *)dirp;
10325                 while (len > 0) {
10326                     int namelen, treclen;
10327                     int reclen = de->d_reclen;
10328                     uint64_t ino = de->d_ino;
10329                     int64_t off = de->d_off;
10330                     uint8_t type = de->d_type;
10331 
10332                     namelen = strlen(de->d_name);
10333                     treclen = offsetof(struct target_dirent, d_name)
10334                         + namelen + 2;
10335                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10336 
10337                     memmove(tde->d_name, de->d_name, namelen + 1);
10338                     tde->d_ino = tswapal(ino);
10339                     tde->d_off = tswapal(off);
10340                     tde->d_reclen = tswap16(treclen);
10341                     /* The target_dirent type is in what was formerly a padding
10342                      * byte at the end of the structure:
10343                      */
10344                     *(((char *)tde) + treclen - 1) = type;
10345 
10346                     de = (struct linux_dirent64 *)((char *)de + reclen);
10347                     tde = (struct target_dirent *)((char *)tde + treclen);
10348                     len -= reclen;
10349                     tlen += treclen;
10350                 }
10351                 ret = tlen;
10352             }
10353             unlock_user(dirp, arg2, ret);
10354         }
10355 #endif
10356         return ret;
10357 #endif /* TARGET_NR_getdents */
10358 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10359     case TARGET_NR_getdents64:
10360         {
10361             struct linux_dirent64 *dirp;
10362             abi_long count = arg3;
10363             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10364                 return -TARGET_EFAULT;
10365             ret = get_errno(sys_getdents64(arg1, dirp, count));
10366             if (!is_error(ret)) {
10367                 struct linux_dirent64 *de;
10368                 int len = ret;
10369                 int reclen;
10370                 de = dirp;
10371                 while (len > 0) {
10372                     reclen = de->d_reclen;
10373                     if (reclen > len)
10374                         break;
10375                     de->d_reclen = tswap16(reclen);
10376                     tswap64s((uint64_t *)&de->d_ino);
10377                     tswap64s((uint64_t *)&de->d_off);
10378                     de = (struct linux_dirent64 *)((char *)de + reclen);
10379                     len -= reclen;
10380                 }
10381             }
10382             unlock_user(dirp, arg2, ret);
10383         }
10384         return ret;
10385 #endif /* TARGET_NR_getdents64 */
10386 #if defined(TARGET_NR__newselect)
10387     case TARGET_NR__newselect:
10388         return do_select(arg1, arg2, arg3, arg4, arg5);
10389 #endif
10390 #ifdef TARGET_NR_poll
10391     case TARGET_NR_poll:
10392         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10393 #endif
10394 #ifdef TARGET_NR_ppoll
10395     case TARGET_NR_ppoll:
10396         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10397 #endif
10398 #ifdef TARGET_NR_ppoll_time64
10399     case TARGET_NR_ppoll_time64:
10400         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10401 #endif
10402     case TARGET_NR_flock:
10403         /* NOTE: the flock constant seems to be the same for every
10404            Linux platform */
10405         return get_errno(safe_flock(arg1, arg2));
10406     case TARGET_NR_readv:
10407         {
10408             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10409             if (vec != NULL) {
10410                 ret = get_errno(safe_readv(arg1, vec, arg3));
10411                 unlock_iovec(vec, arg2, arg3, 1);
10412             } else {
10413                 ret = -host_to_target_errno(errno);
10414             }
10415         }
10416         return ret;
10417     case TARGET_NR_writev:
10418         {
10419             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10420             if (vec != NULL) {
10421                 ret = get_errno(safe_writev(arg1, vec, arg3));
10422                 unlock_iovec(vec, arg2, arg3, 0);
10423             } else {
10424                 ret = -host_to_target_errno(errno);
10425             }
10426         }
10427         return ret;
10428 #if defined(TARGET_NR_preadv)
10429     case TARGET_NR_preadv:
10430         {
10431             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10432             if (vec != NULL) {
10433                 unsigned long low, high;
10434 
10435                 target_to_host_low_high(arg4, arg5, &low, &high);
10436                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10437                 unlock_iovec(vec, arg2, arg3, 1);
10438             } else {
10439                 ret = -host_to_target_errno(errno);
10440            }
10441         }
10442         return ret;
10443 #endif
10444 #if defined(TARGET_NR_pwritev)
10445     case TARGET_NR_pwritev:
10446         {
10447             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10448             if (vec != NULL) {
10449                 unsigned long low, high;
10450 
10451                 target_to_host_low_high(arg4, arg5, &low, &high);
10452                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10453                 unlock_iovec(vec, arg2, arg3, 0);
10454             } else {
10455                 ret = -host_to_target_errno(errno);
10456            }
10457         }
10458         return ret;
10459 #endif
10460     case TARGET_NR_getsid:
10461         return get_errno(getsid(arg1));
10462 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10463     case TARGET_NR_fdatasync:
10464         return get_errno(fdatasync(arg1));
10465 #endif
10466     case TARGET_NR_sched_getaffinity:
10467         {
10468             unsigned int mask_size;
10469             unsigned long *mask;
10470 
10471             /*
10472              * sched_getaffinity needs multiples of ulong, so need to take
10473              * care of mismatches between target ulong and host ulong sizes.
10474              */
10475             if (arg2 & (sizeof(abi_ulong) - 1)) {
10476                 return -TARGET_EINVAL;
10477             }
10478             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10479 
10480             mask = alloca(mask_size);
10481             memset(mask, 0, mask_size);
10482             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10483 
10484             if (!is_error(ret)) {
10485                 if (ret > arg2) {
10486                     /* More data returned than the caller's buffer will fit.
10487                      * This only happens if sizeof(abi_long) < sizeof(long)
10488                      * and the caller passed us a buffer holding an odd number
10489                      * of abi_longs. If the host kernel is actually using the
10490                      * extra 4 bytes then fail EINVAL; otherwise we can just
10491                      * ignore them and only copy the interesting part.
10492                      */
10493                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10494                     if (numcpus > arg2 * 8) {
10495                         return -TARGET_EINVAL;
10496                     }
10497                     ret = arg2;
10498                 }
10499 
10500                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10501                     return -TARGET_EFAULT;
10502                 }
10503             }
10504         }
10505         return ret;
10506     case TARGET_NR_sched_setaffinity:
10507         {
10508             unsigned int mask_size;
10509             unsigned long *mask;
10510 
10511             /*
10512              * sched_setaffinity needs multiples of ulong, so need to take
10513              * care of mismatches between target ulong and host ulong sizes.
10514              */
10515             if (arg2 & (sizeof(abi_ulong) - 1)) {
10516                 return -TARGET_EINVAL;
10517             }
10518             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10519             mask = alloca(mask_size);
10520 
10521             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10522             if (ret) {
10523                 return ret;
10524             }
10525 
10526             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10527         }
10528     case TARGET_NR_getcpu:
10529         {
10530             unsigned cpu, node;
10531             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10532                                        arg2 ? &node : NULL,
10533                                        NULL));
10534             if (is_error(ret)) {
10535                 return ret;
10536             }
10537             if (arg1 && put_user_u32(cpu, arg1)) {
10538                 return -TARGET_EFAULT;
10539             }
10540             if (arg2 && put_user_u32(node, arg2)) {
10541                 return -TARGET_EFAULT;
10542             }
10543         }
10544         return ret;
10545     case TARGET_NR_sched_setparam:
10546         {
10547             struct sched_param *target_schp;
10548             struct sched_param schp;
10549 
10550             if (arg2 == 0) {
10551                 return -TARGET_EINVAL;
10552             }
10553             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10554                 return -TARGET_EFAULT;
10555             schp.sched_priority = tswap32(target_schp->sched_priority);
10556             unlock_user_struct(target_schp, arg2, 0);
10557             return get_errno(sched_setparam(arg1, &schp));
10558         }
10559     case TARGET_NR_sched_getparam:
10560         {
10561             struct sched_param *target_schp;
10562             struct sched_param schp;
10563 
10564             if (arg2 == 0) {
10565                 return -TARGET_EINVAL;
10566             }
10567             ret = get_errno(sched_getparam(arg1, &schp));
10568             if (!is_error(ret)) {
10569                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10570                     return -TARGET_EFAULT;
10571                 target_schp->sched_priority = tswap32(schp.sched_priority);
10572                 unlock_user_struct(target_schp, arg2, 1);
10573             }
10574         }
10575         return ret;
10576     case TARGET_NR_sched_setscheduler:
10577         {
10578             struct sched_param *target_schp;
10579             struct sched_param schp;
10580             if (arg3 == 0) {
10581                 return -TARGET_EINVAL;
10582             }
10583             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10584                 return -TARGET_EFAULT;
10585             schp.sched_priority = tswap32(target_schp->sched_priority);
10586             unlock_user_struct(target_schp, arg3, 0);
10587             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10588         }
10589     case TARGET_NR_sched_getscheduler:
10590         return get_errno(sched_getscheduler(arg1));
10591     case TARGET_NR_sched_yield:
10592         return get_errno(sched_yield());
10593     case TARGET_NR_sched_get_priority_max:
10594         return get_errno(sched_get_priority_max(arg1));
10595     case TARGET_NR_sched_get_priority_min:
10596         return get_errno(sched_get_priority_min(arg1));
10597 #ifdef TARGET_NR_sched_rr_get_interval
10598     case TARGET_NR_sched_rr_get_interval:
10599         {
10600             struct timespec ts;
10601             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10602             if (!is_error(ret)) {
10603                 ret = host_to_target_timespec(arg2, &ts);
10604             }
10605         }
10606         return ret;
10607 #endif
10608 #ifdef TARGET_NR_sched_rr_get_interval_time64
10609     case TARGET_NR_sched_rr_get_interval_time64:
10610         {
10611             struct timespec ts;
10612             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10613             if (!is_error(ret)) {
10614                 ret = host_to_target_timespec64(arg2, &ts);
10615             }
10616         }
10617         return ret;
10618 #endif
10619 #if defined(TARGET_NR_nanosleep)
10620     case TARGET_NR_nanosleep:
10621         {
10622             struct timespec req, rem;
10623             target_to_host_timespec(&req, arg1);
10624             ret = get_errno(safe_nanosleep(&req, &rem));
10625             if (is_error(ret) && arg2) {
10626                 host_to_target_timespec(arg2, &rem);
10627             }
10628         }
10629         return ret;
10630 #endif
10631     case TARGET_NR_prctl:
10632         switch (arg1) {
10633         case PR_GET_PDEATHSIG:
10634         {
10635             int deathsig;
10636             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10637             if (!is_error(ret) && arg2
10638                 && put_user_s32(deathsig, arg2)) {
10639                 return -TARGET_EFAULT;
10640             }
10641             return ret;
10642         }
10643 #ifdef PR_GET_NAME
10644         case PR_GET_NAME:
10645         {
10646             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10647             if (!name) {
10648                 return -TARGET_EFAULT;
10649             }
10650             ret = get_errno(prctl(arg1, (unsigned long)name,
10651                                   arg3, arg4, arg5));
10652             unlock_user(name, arg2, 16);
10653             return ret;
10654         }
10655         case PR_SET_NAME:
10656         {
10657             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10658             if (!name) {
10659                 return -TARGET_EFAULT;
10660             }
10661             ret = get_errno(prctl(arg1, (unsigned long)name,
10662                                   arg3, arg4, arg5));
10663             unlock_user(name, arg2, 0);
10664             return ret;
10665         }
10666 #endif
10667 #ifdef TARGET_MIPS
10668         case TARGET_PR_GET_FP_MODE:
10669         {
10670             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10671             ret = 0;
10672             if (env->CP0_Status & (1 << CP0St_FR)) {
10673                 ret |= TARGET_PR_FP_MODE_FR;
10674             }
10675             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10676                 ret |= TARGET_PR_FP_MODE_FRE;
10677             }
10678             return ret;
10679         }
10680         case TARGET_PR_SET_FP_MODE:
10681         {
10682             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10683             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10684             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10685             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10686             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10687 
10688             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10689                                             TARGET_PR_FP_MODE_FRE;
10690 
10691             /* If nothing to change, return right away, successfully.  */
10692             if (old_fr == new_fr && old_fre == new_fre) {
10693                 return 0;
10694             }
10695             /* Check the value is valid */
10696             if (arg2 & ~known_bits) {
10697                 return -TARGET_EOPNOTSUPP;
10698             }
10699             /* Setting FRE without FR is not supported.  */
10700             if (new_fre && !new_fr) {
10701                 return -TARGET_EOPNOTSUPP;
10702             }
10703             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10704                 /* FR1 is not supported */
10705                 return -TARGET_EOPNOTSUPP;
10706             }
10707             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10708                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10709                 /* cannot set FR=0 */
10710                 return -TARGET_EOPNOTSUPP;
10711             }
10712             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10713                 /* Cannot set FRE=1 */
10714                 return -TARGET_EOPNOTSUPP;
10715             }
10716 
10717             int i;
10718             fpr_t *fpr = env->active_fpu.fpr;
10719             for (i = 0; i < 32 ; i += 2) {
10720                 if (!old_fr && new_fr) {
10721                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10722                 } else if (old_fr && !new_fr) {
10723                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10724                 }
10725             }
10726 
10727             if (new_fr) {
10728                 env->CP0_Status |= (1 << CP0St_FR);
10729                 env->hflags |= MIPS_HFLAG_F64;
10730             } else {
10731                 env->CP0_Status &= ~(1 << CP0St_FR);
10732                 env->hflags &= ~MIPS_HFLAG_F64;
10733             }
10734             if (new_fre) {
10735                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10736                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10737                     env->hflags |= MIPS_HFLAG_FRE;
10738                 }
10739             } else {
10740                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10741                 env->hflags &= ~MIPS_HFLAG_FRE;
10742             }
10743 
10744             return 0;
10745         }
10746 #endif /* MIPS */
10747 #ifdef TARGET_AARCH64
10748         case TARGET_PR_SVE_SET_VL:
10749             /*
10750              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10751              * PR_SVE_VL_INHERIT.  Note the kernel definition
10752              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10753              * even though the current architectural maximum is VQ=16.
10754              */
10755             ret = -TARGET_EINVAL;
10756             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10757                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10758                 CPUARMState *env = cpu_env;
10759                 ARMCPU *cpu = env_archcpu(env);
10760                 uint32_t vq, old_vq;
10761 
10762                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10763                 vq = MAX(arg2 / 16, 1);
10764                 vq = MIN(vq, cpu->sve_max_vq);
10765 
10766                 if (vq < old_vq) {
10767                     aarch64_sve_narrow_vq(env, vq);
10768                 }
10769                 env->vfp.zcr_el[1] = vq - 1;
10770                 arm_rebuild_hflags(env);
10771                 ret = vq * 16;
10772             }
10773             return ret;
10774         case TARGET_PR_SVE_GET_VL:
10775             ret = -TARGET_EINVAL;
10776             {
10777                 ARMCPU *cpu = env_archcpu(cpu_env);
10778                 if (cpu_isar_feature(aa64_sve, cpu)) {
10779                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10780                 }
10781             }
10782             return ret;
10783         case TARGET_PR_PAC_RESET_KEYS:
10784             {
10785                 CPUARMState *env = cpu_env;
10786                 ARMCPU *cpu = env_archcpu(env);
10787 
10788                 if (arg3 || arg4 || arg5) {
10789                     return -TARGET_EINVAL;
10790                 }
10791                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10792                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10793                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10794                                TARGET_PR_PAC_APGAKEY);
10795                     int ret = 0;
10796                     Error *err = NULL;
10797 
10798                     if (arg2 == 0) {
10799                         arg2 = all;
10800                     } else if (arg2 & ~all) {
10801                         return -TARGET_EINVAL;
10802                     }
10803                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10804                         ret |= qemu_guest_getrandom(&env->keys.apia,
10805                                                     sizeof(ARMPACKey), &err);
10806                     }
10807                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10808                         ret |= qemu_guest_getrandom(&env->keys.apib,
10809                                                     sizeof(ARMPACKey), &err);
10810                     }
10811                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10812                         ret |= qemu_guest_getrandom(&env->keys.apda,
10813                                                     sizeof(ARMPACKey), &err);
10814                     }
10815                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10816                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10817                                                     sizeof(ARMPACKey), &err);
10818                     }
10819                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10820                         ret |= qemu_guest_getrandom(&env->keys.apga,
10821                                                     sizeof(ARMPACKey), &err);
10822                     }
10823                     if (ret != 0) {
10824                         /*
10825                          * Some unknown failure in the crypto.  The best
10826                          * we can do is log it and fail the syscall.
10827                          * The real syscall cannot fail this way.
10828                          */
10829                         qemu_log_mask(LOG_UNIMP,
10830                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10831                                       error_get_pretty(err));
10832                         error_free(err);
10833                         return -TARGET_EIO;
10834                     }
10835                     return 0;
10836                 }
10837             }
10838             return -TARGET_EINVAL;
10839         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10840             {
10841                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10842                 CPUARMState *env = cpu_env;
10843                 ARMCPU *cpu = env_archcpu(env);
10844 
10845                 if (cpu_isar_feature(aa64_mte, cpu)) {
10846                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10847                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10848                 }
10849 
10850                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10851                     return -TARGET_EINVAL;
10852                 }
10853                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10854 
10855                 if (cpu_isar_feature(aa64_mte, cpu)) {
10856                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10857                     case TARGET_PR_MTE_TCF_NONE:
10858                     case TARGET_PR_MTE_TCF_SYNC:
10859                     case TARGET_PR_MTE_TCF_ASYNC:
10860                         break;
10861                     default:
10862                         return -EINVAL;
10863                     }
10864 
10865                     /*
10866                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10867                      * Note that the syscall values are consistent with hw.
10868                      */
10869                     env->cp15.sctlr_el[1] =
10870                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10871                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10872 
10873                     /*
10874                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10875                      * Note that the syscall uses an include mask,
10876                      * and hardware uses an exclude mask -- invert.
10877                      */
10878                     env->cp15.gcr_el1 =
10879                         deposit64(env->cp15.gcr_el1, 0, 16,
10880                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10881                     arm_rebuild_hflags(env);
10882                 }
10883                 return 0;
10884             }
10885         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10886             {
10887                 abi_long ret = 0;
10888                 CPUARMState *env = cpu_env;
10889                 ARMCPU *cpu = env_archcpu(env);
10890 
10891                 if (arg2 || arg3 || arg4 || arg5) {
10892                     return -TARGET_EINVAL;
10893                 }
10894                 if (env->tagged_addr_enable) {
10895                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10896                 }
10897                 if (cpu_isar_feature(aa64_mte, cpu)) {
10898                     /* See above. */
10899                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10900                             << TARGET_PR_MTE_TCF_SHIFT);
10901                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10902                                     ~env->cp15.gcr_el1);
10903                 }
10904                 return ret;
10905             }
10906 #endif /* AARCH64 */
10907         case PR_GET_SECCOMP:
10908         case PR_SET_SECCOMP:
10909             /* Disable seccomp to prevent the target disabling syscalls we
10910              * need. */
10911             return -TARGET_EINVAL;
10912         default:
10913             /* Most prctl options have no pointer arguments */
10914             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10915         }
10916         break;
10917 #ifdef TARGET_NR_arch_prctl
10918     case TARGET_NR_arch_prctl:
10919         return do_arch_prctl(cpu_env, arg1, arg2);
10920 #endif
10921 #ifdef TARGET_NR_pread64
10922     case TARGET_NR_pread64:
10923         if (regpairs_aligned(cpu_env, num)) {
10924             arg4 = arg5;
10925             arg5 = arg6;
10926         }
10927         if (arg2 == 0 && arg3 == 0) {
10928             /* Special-case NULL buffer and zero length, which should succeed */
10929             p = 0;
10930         } else {
10931             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10932             if (!p) {
10933                 return -TARGET_EFAULT;
10934             }
10935         }
10936         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10937         unlock_user(p, arg2, ret);
10938         return ret;
10939     case TARGET_NR_pwrite64:
10940         if (regpairs_aligned(cpu_env, num)) {
10941             arg4 = arg5;
10942             arg5 = arg6;
10943         }
10944         if (arg2 == 0 && arg3 == 0) {
10945             /* Special-case NULL buffer and zero length, which should succeed */
10946             p = 0;
10947         } else {
10948             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10949             if (!p) {
10950                 return -TARGET_EFAULT;
10951             }
10952         }
10953         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10954         unlock_user(p, arg2, 0);
10955         return ret;
10956 #endif
10957     case TARGET_NR_getcwd:
10958         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10959             return -TARGET_EFAULT;
10960         ret = get_errno(sys_getcwd1(p, arg2));
10961         unlock_user(p, arg1, ret);
10962         return ret;
10963     case TARGET_NR_capget:
10964     case TARGET_NR_capset:
10965     {
10966         struct target_user_cap_header *target_header;
10967         struct target_user_cap_data *target_data = NULL;
10968         struct __user_cap_header_struct header;
10969         struct __user_cap_data_struct data[2];
10970         struct __user_cap_data_struct *dataptr = NULL;
10971         int i, target_datalen;
10972         int data_items = 1;
10973 
10974         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10975             return -TARGET_EFAULT;
10976         }
10977         header.version = tswap32(target_header->version);
10978         header.pid = tswap32(target_header->pid);
10979 
10980         if (header.version != _LINUX_CAPABILITY_VERSION) {
10981             /* Version 2 and up takes pointer to two user_data structs */
10982             data_items = 2;
10983         }
10984 
10985         target_datalen = sizeof(*target_data) * data_items;
10986 
10987         if (arg2) {
10988             if (num == TARGET_NR_capget) {
10989                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10990             } else {
10991                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10992             }
10993             if (!target_data) {
10994                 unlock_user_struct(target_header, arg1, 0);
10995                 return -TARGET_EFAULT;
10996             }
10997 
10998             if (num == TARGET_NR_capset) {
10999                 for (i = 0; i < data_items; i++) {
11000                     data[i].effective = tswap32(target_data[i].effective);
11001                     data[i].permitted = tswap32(target_data[i].permitted);
11002                     data[i].inheritable = tswap32(target_data[i].inheritable);
11003                 }
11004             }
11005 
11006             dataptr = data;
11007         }
11008 
11009         if (num == TARGET_NR_capget) {
11010             ret = get_errno(capget(&header, dataptr));
11011         } else {
11012             ret = get_errno(capset(&header, dataptr));
11013         }
11014 
11015         /* The kernel always updates version for both capget and capset */
11016         target_header->version = tswap32(header.version);
11017         unlock_user_struct(target_header, arg1, 1);
11018 
11019         if (arg2) {
11020             if (num == TARGET_NR_capget) {
11021                 for (i = 0; i < data_items; i++) {
11022                     target_data[i].effective = tswap32(data[i].effective);
11023                     target_data[i].permitted = tswap32(data[i].permitted);
11024                     target_data[i].inheritable = tswap32(data[i].inheritable);
11025                 }
11026                 unlock_user(target_data, arg2, target_datalen);
11027             } else {
11028                 unlock_user(target_data, arg2, 0);
11029             }
11030         }
11031         return ret;
11032     }
11033     case TARGET_NR_sigaltstack:
11034         return do_sigaltstack(arg1, arg2, cpu_env);
11035 
11036 #ifdef CONFIG_SENDFILE
11037 #ifdef TARGET_NR_sendfile
11038     case TARGET_NR_sendfile:
11039     {
11040         off_t *offp = NULL;
11041         off_t off;
11042         if (arg3) {
11043             ret = get_user_sal(off, arg3);
11044             if (is_error(ret)) {
11045                 return ret;
11046             }
11047             offp = &off;
11048         }
11049         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11050         if (!is_error(ret) && arg3) {
11051             abi_long ret2 = put_user_sal(off, arg3);
11052             if (is_error(ret2)) {
11053                 ret = ret2;
11054             }
11055         }
11056         return ret;
11057     }
11058 #endif
11059 #ifdef TARGET_NR_sendfile64
11060     case TARGET_NR_sendfile64:
11061     {
11062         off_t *offp = NULL;
11063         off_t off;
11064         if (arg3) {
11065             ret = get_user_s64(off, arg3);
11066             if (is_error(ret)) {
11067                 return ret;
11068             }
11069             offp = &off;
11070         }
11071         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11072         if (!is_error(ret) && arg3) {
11073             abi_long ret2 = put_user_s64(off, arg3);
11074             if (is_error(ret2)) {
11075                 ret = ret2;
11076             }
11077         }
11078         return ret;
11079     }
11080 #endif
11081 #endif
11082 #ifdef TARGET_NR_vfork
11083     case TARGET_NR_vfork:
11084         return get_errno(do_fork(cpu_env,
11085                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11086                          0, 0, 0, 0));
11087 #endif
11088 #ifdef TARGET_NR_ugetrlimit
11089     case TARGET_NR_ugetrlimit:
11090     {
11091 	struct rlimit rlim;
11092 	int resource = target_to_host_resource(arg1);
11093 	ret = get_errno(getrlimit(resource, &rlim));
11094 	if (!is_error(ret)) {
11095 	    struct target_rlimit *target_rlim;
11096             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11097                 return -TARGET_EFAULT;
11098 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11099 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11100             unlock_user_struct(target_rlim, arg2, 1);
11101 	}
11102         return ret;
11103     }
11104 #endif
11105 #ifdef TARGET_NR_truncate64
11106     case TARGET_NR_truncate64:
11107         if (!(p = lock_user_string(arg1)))
11108             return -TARGET_EFAULT;
11109 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11110         unlock_user(p, arg1, 0);
11111         return ret;
11112 #endif
11113 #ifdef TARGET_NR_ftruncate64
11114     case TARGET_NR_ftruncate64:
11115         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11116 #endif
11117 #ifdef TARGET_NR_stat64
11118     case TARGET_NR_stat64:
11119         if (!(p = lock_user_string(arg1))) {
11120             return -TARGET_EFAULT;
11121         }
11122         ret = get_errno(stat(path(p), &st));
11123         unlock_user(p, arg1, 0);
11124         if (!is_error(ret))
11125             ret = host_to_target_stat64(cpu_env, arg2, &st);
11126         return ret;
11127 #endif
11128 #ifdef TARGET_NR_lstat64
11129     case TARGET_NR_lstat64:
11130         if (!(p = lock_user_string(arg1))) {
11131             return -TARGET_EFAULT;
11132         }
11133         ret = get_errno(lstat(path(p), &st));
11134         unlock_user(p, arg1, 0);
11135         if (!is_error(ret))
11136             ret = host_to_target_stat64(cpu_env, arg2, &st);
11137         return ret;
11138 #endif
11139 #ifdef TARGET_NR_fstat64
11140     case TARGET_NR_fstat64:
11141         ret = get_errno(fstat(arg1, &st));
11142         if (!is_error(ret))
11143             ret = host_to_target_stat64(cpu_env, arg2, &st);
11144         return ret;
11145 #endif
11146 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11147 #ifdef TARGET_NR_fstatat64
11148     case TARGET_NR_fstatat64:
11149 #endif
11150 #ifdef TARGET_NR_newfstatat
11151     case TARGET_NR_newfstatat:
11152 #endif
11153         if (!(p = lock_user_string(arg2))) {
11154             return -TARGET_EFAULT;
11155         }
11156         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11157         unlock_user(p, arg2, 0);
11158         if (!is_error(ret))
11159             ret = host_to_target_stat64(cpu_env, arg3, &st);
11160         return ret;
11161 #endif
11162 #if defined(TARGET_NR_statx)
11163     case TARGET_NR_statx:
11164         {
11165             struct target_statx *target_stx;
11166             int dirfd = arg1;
11167             int flags = arg3;
11168 
11169             p = lock_user_string(arg2);
11170             if (p == NULL) {
11171                 return -TARGET_EFAULT;
11172             }
11173 #if defined(__NR_statx)
11174             {
11175                 /*
11176                  * It is assumed that struct statx is architecture independent.
11177                  */
11178                 struct target_statx host_stx;
11179                 int mask = arg4;
11180 
11181                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11182                 if (!is_error(ret)) {
11183                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11184                         unlock_user(p, arg2, 0);
11185                         return -TARGET_EFAULT;
11186                     }
11187                 }
11188 
11189                 if (ret != -TARGET_ENOSYS) {
11190                     unlock_user(p, arg2, 0);
11191                     return ret;
11192                 }
11193             }
11194 #endif
11195             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11196             unlock_user(p, arg2, 0);
11197 
11198             if (!is_error(ret)) {
11199                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11200                     return -TARGET_EFAULT;
11201                 }
11202                 memset(target_stx, 0, sizeof(*target_stx));
11203                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11204                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11205                 __put_user(st.st_ino, &target_stx->stx_ino);
11206                 __put_user(st.st_mode, &target_stx->stx_mode);
11207                 __put_user(st.st_uid, &target_stx->stx_uid);
11208                 __put_user(st.st_gid, &target_stx->stx_gid);
11209                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11210                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11211                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11212                 __put_user(st.st_size, &target_stx->stx_size);
11213                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11214                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11215                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11216                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11217                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11218                 unlock_user_struct(target_stx, arg5, 1);
11219             }
11220         }
11221         return ret;
11222 #endif
11223 #ifdef TARGET_NR_lchown
11224     case TARGET_NR_lchown:
11225         if (!(p = lock_user_string(arg1)))
11226             return -TARGET_EFAULT;
11227         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11228         unlock_user(p, arg1, 0);
11229         return ret;
11230 #endif
11231 #ifdef TARGET_NR_getuid
11232     case TARGET_NR_getuid:
11233         return get_errno(high2lowuid(getuid()));
11234 #endif
11235 #ifdef TARGET_NR_getgid
11236     case TARGET_NR_getgid:
11237         return get_errno(high2lowgid(getgid()));
11238 #endif
11239 #ifdef TARGET_NR_geteuid
11240     case TARGET_NR_geteuid:
11241         return get_errno(high2lowuid(geteuid()));
11242 #endif
11243 #ifdef TARGET_NR_getegid
11244     case TARGET_NR_getegid:
11245         return get_errno(high2lowgid(getegid()));
11246 #endif
11247     case TARGET_NR_setreuid:
11248         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11249     case TARGET_NR_setregid:
11250         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11251     case TARGET_NR_getgroups:
11252         {
11253             int gidsetsize = arg1;
11254             target_id *target_grouplist;
11255             gid_t *grouplist;
11256             int i;
11257 
11258             grouplist = alloca(gidsetsize * sizeof(gid_t));
11259             ret = get_errno(getgroups(gidsetsize, grouplist));
11260             if (gidsetsize == 0)
11261                 return ret;
11262             if (!is_error(ret)) {
11263                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11264                 if (!target_grouplist)
11265                     return -TARGET_EFAULT;
11266                 for(i = 0;i < ret; i++)
11267                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11268                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11269             }
11270         }
11271         return ret;
11272     case TARGET_NR_setgroups:
11273         {
11274             int gidsetsize = arg1;
11275             target_id *target_grouplist;
11276             gid_t *grouplist = NULL;
11277             int i;
11278             if (gidsetsize) {
11279                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11280                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11281                 if (!target_grouplist) {
11282                     return -TARGET_EFAULT;
11283                 }
11284                 for (i = 0; i < gidsetsize; i++) {
11285                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11286                 }
11287                 unlock_user(target_grouplist, arg2, 0);
11288             }
11289             return get_errno(setgroups(gidsetsize, grouplist));
11290         }
11291     case TARGET_NR_fchown:
11292         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11293 #if defined(TARGET_NR_fchownat)
11294     case TARGET_NR_fchownat:
11295         if (!(p = lock_user_string(arg2)))
11296             return -TARGET_EFAULT;
11297         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11298                                  low2highgid(arg4), arg5));
11299         unlock_user(p, arg2, 0);
11300         return ret;
11301 #endif
11302 #ifdef TARGET_NR_setresuid
11303     case TARGET_NR_setresuid:
11304         return get_errno(sys_setresuid(low2highuid(arg1),
11305                                        low2highuid(arg2),
11306                                        low2highuid(arg3)));
11307 #endif
11308 #ifdef TARGET_NR_getresuid
11309     case TARGET_NR_getresuid:
11310         {
11311             uid_t ruid, euid, suid;
11312             ret = get_errno(getresuid(&ruid, &euid, &suid));
11313             if (!is_error(ret)) {
11314                 if (put_user_id(high2lowuid(ruid), arg1)
11315                     || put_user_id(high2lowuid(euid), arg2)
11316                     || put_user_id(high2lowuid(suid), arg3))
11317                     return -TARGET_EFAULT;
11318             }
11319         }
11320         return ret;
11321 #endif
11322 #ifdef TARGET_NR_getresgid
11323     case TARGET_NR_setresgid:
11324         return get_errno(sys_setresgid(low2highgid(arg1),
11325                                        low2highgid(arg2),
11326                                        low2highgid(arg3)));
11327 #endif
11328 #ifdef TARGET_NR_getresgid
11329     case TARGET_NR_getresgid:
11330         {
11331             gid_t rgid, egid, sgid;
11332             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11333             if (!is_error(ret)) {
11334                 if (put_user_id(high2lowgid(rgid), arg1)
11335                     || put_user_id(high2lowgid(egid), arg2)
11336                     || put_user_id(high2lowgid(sgid), arg3))
11337                     return -TARGET_EFAULT;
11338             }
11339         }
11340         return ret;
11341 #endif
11342 #ifdef TARGET_NR_chown
11343     case TARGET_NR_chown:
11344         if (!(p = lock_user_string(arg1)))
11345             return -TARGET_EFAULT;
11346         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11347         unlock_user(p, arg1, 0);
11348         return ret;
11349 #endif
11350     case TARGET_NR_setuid:
11351         return get_errno(sys_setuid(low2highuid(arg1)));
11352     case TARGET_NR_setgid:
11353         return get_errno(sys_setgid(low2highgid(arg1)));
11354     case TARGET_NR_setfsuid:
11355         return get_errno(setfsuid(arg1));
11356     case TARGET_NR_setfsgid:
11357         return get_errno(setfsgid(arg1));
11358 
11359 #ifdef TARGET_NR_lchown32
11360     case TARGET_NR_lchown32:
11361         if (!(p = lock_user_string(arg1)))
11362             return -TARGET_EFAULT;
11363         ret = get_errno(lchown(p, arg2, arg3));
11364         unlock_user(p, arg1, 0);
11365         return ret;
11366 #endif
11367 #ifdef TARGET_NR_getuid32
11368     case TARGET_NR_getuid32:
11369         return get_errno(getuid());
11370 #endif
11371 
11372 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11373    /* Alpha specific */
11374     case TARGET_NR_getxuid:
11375          {
11376             uid_t euid;
11377             euid=geteuid();
11378             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11379          }
11380         return get_errno(getuid());
11381 #endif
11382 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11383    /* Alpha specific */
11384     case TARGET_NR_getxgid:
11385          {
11386             uid_t egid;
11387             egid=getegid();
11388             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11389          }
11390         return get_errno(getgid());
11391 #endif
11392 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11393     /* Alpha specific */
11394     case TARGET_NR_osf_getsysinfo:
11395         ret = -TARGET_EOPNOTSUPP;
11396         switch (arg1) {
11397           case TARGET_GSI_IEEE_FP_CONTROL:
11398             {
11399                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11400                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11401 
11402                 swcr &= ~SWCR_STATUS_MASK;
11403                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11404 
11405                 if (put_user_u64 (swcr, arg2))
11406                         return -TARGET_EFAULT;
11407                 ret = 0;
11408             }
11409             break;
11410 
11411           /* case GSI_IEEE_STATE_AT_SIGNAL:
11412              -- Not implemented in linux kernel.
11413              case GSI_UACPROC:
11414              -- Retrieves current unaligned access state; not much used.
11415              case GSI_PROC_TYPE:
11416              -- Retrieves implver information; surely not used.
11417              case GSI_GET_HWRPB:
11418              -- Grabs a copy of the HWRPB; surely not used.
11419           */
11420         }
11421         return ret;
11422 #endif
11423 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11424     /* Alpha specific */
11425     case TARGET_NR_osf_setsysinfo:
11426         ret = -TARGET_EOPNOTSUPP;
11427         switch (arg1) {
11428           case TARGET_SSI_IEEE_FP_CONTROL:
11429             {
11430                 uint64_t swcr, fpcr;
11431 
11432                 if (get_user_u64 (swcr, arg2)) {
11433                     return -TARGET_EFAULT;
11434                 }
11435 
11436                 /*
11437                  * The kernel calls swcr_update_status to update the
11438                  * status bits from the fpcr at every point that it
11439                  * could be queried.  Therefore, we store the status
11440                  * bits only in FPCR.
11441                  */
11442                 ((CPUAlphaState *)cpu_env)->swcr
11443                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11444 
11445                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11446                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11447                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11448                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11449                 ret = 0;
11450             }
11451             break;
11452 
11453           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11454             {
11455                 uint64_t exc, fpcr, fex;
11456 
11457                 if (get_user_u64(exc, arg2)) {
11458                     return -TARGET_EFAULT;
11459                 }
11460                 exc &= SWCR_STATUS_MASK;
11461                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11462 
11463                 /* Old exceptions are not signaled.  */
11464                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11465                 fex = exc & ~fex;
11466                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11467                 fex &= ((CPUArchState *)cpu_env)->swcr;
11468 
11469                 /* Update the hardware fpcr.  */
11470                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11471                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11472 
11473                 if (fex) {
11474                     int si_code = TARGET_FPE_FLTUNK;
11475                     target_siginfo_t info;
11476 
11477                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11478                         si_code = TARGET_FPE_FLTUND;
11479                     }
11480                     if (fex & SWCR_TRAP_ENABLE_INE) {
11481                         si_code = TARGET_FPE_FLTRES;
11482                     }
11483                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11484                         si_code = TARGET_FPE_FLTUND;
11485                     }
11486                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11487                         si_code = TARGET_FPE_FLTOVF;
11488                     }
11489                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11490                         si_code = TARGET_FPE_FLTDIV;
11491                     }
11492                     if (fex & SWCR_TRAP_ENABLE_INV) {
11493                         si_code = TARGET_FPE_FLTINV;
11494                     }
11495 
11496                     info.si_signo = SIGFPE;
11497                     info.si_errno = 0;
11498                     info.si_code = si_code;
11499                     info._sifields._sigfault._addr
11500                         = ((CPUArchState *)cpu_env)->pc;
11501                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11502                                  QEMU_SI_FAULT, &info);
11503                 }
11504                 ret = 0;
11505             }
11506             break;
11507 
11508           /* case SSI_NVPAIRS:
11509              -- Used with SSIN_UACPROC to enable unaligned accesses.
11510              case SSI_IEEE_STATE_AT_SIGNAL:
11511              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11512              -- Not implemented in linux kernel
11513           */
11514         }
11515         return ret;
11516 #endif
11517 #ifdef TARGET_NR_osf_sigprocmask
11518     /* Alpha specific.  */
11519     case TARGET_NR_osf_sigprocmask:
11520         {
11521             abi_ulong mask;
11522             int how;
11523             sigset_t set, oldset;
11524 
11525             switch(arg1) {
11526             case TARGET_SIG_BLOCK:
11527                 how = SIG_BLOCK;
11528                 break;
11529             case TARGET_SIG_UNBLOCK:
11530                 how = SIG_UNBLOCK;
11531                 break;
11532             case TARGET_SIG_SETMASK:
11533                 how = SIG_SETMASK;
11534                 break;
11535             default:
11536                 return -TARGET_EINVAL;
11537             }
11538             mask = arg2;
11539             target_to_host_old_sigset(&set, &mask);
11540             ret = do_sigprocmask(how, &set, &oldset);
11541             if (!ret) {
11542                 host_to_target_old_sigset(&mask, &oldset);
11543                 ret = mask;
11544             }
11545         }
11546         return ret;
11547 #endif
11548 
11549 #ifdef TARGET_NR_getgid32
11550     case TARGET_NR_getgid32:
11551         return get_errno(getgid());
11552 #endif
11553 #ifdef TARGET_NR_geteuid32
11554     case TARGET_NR_geteuid32:
11555         return get_errno(geteuid());
11556 #endif
11557 #ifdef TARGET_NR_getegid32
11558     case TARGET_NR_getegid32:
11559         return get_errno(getegid());
11560 #endif
11561 #ifdef TARGET_NR_setreuid32
11562     case TARGET_NR_setreuid32:
11563         return get_errno(setreuid(arg1, arg2));
11564 #endif
11565 #ifdef TARGET_NR_setregid32
11566     case TARGET_NR_setregid32:
11567         return get_errno(setregid(arg1, arg2));
11568 #endif
11569 #ifdef TARGET_NR_getgroups32
11570     case TARGET_NR_getgroups32:
11571         {
11572             int gidsetsize = arg1;
11573             uint32_t *target_grouplist;
11574             gid_t *grouplist;
11575             int i;
11576 
11577             grouplist = alloca(gidsetsize * sizeof(gid_t));
11578             ret = get_errno(getgroups(gidsetsize, grouplist));
11579             if (gidsetsize == 0)
11580                 return ret;
11581             if (!is_error(ret)) {
11582                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11583                 if (!target_grouplist) {
11584                     return -TARGET_EFAULT;
11585                 }
11586                 for(i = 0;i < ret; i++)
11587                     target_grouplist[i] = tswap32(grouplist[i]);
11588                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11589             }
11590         }
11591         return ret;
11592 #endif
11593 #ifdef TARGET_NR_setgroups32
11594     case TARGET_NR_setgroups32:
11595         {
11596             int gidsetsize = arg1;
11597             uint32_t *target_grouplist;
11598             gid_t *grouplist;
11599             int i;
11600 
11601             grouplist = alloca(gidsetsize * sizeof(gid_t));
11602             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11603             if (!target_grouplist) {
11604                 return -TARGET_EFAULT;
11605             }
11606             for(i = 0;i < gidsetsize; i++)
11607                 grouplist[i] = tswap32(target_grouplist[i]);
11608             unlock_user(target_grouplist, arg2, 0);
11609             return get_errno(setgroups(gidsetsize, grouplist));
11610         }
11611 #endif
11612 #ifdef TARGET_NR_fchown32
11613     case TARGET_NR_fchown32:
11614         return get_errno(fchown(arg1, arg2, arg3));
11615 #endif
11616 #ifdef TARGET_NR_setresuid32
11617     case TARGET_NR_setresuid32:
11618         return get_errno(sys_setresuid(arg1, arg2, arg3));
11619 #endif
11620 #ifdef TARGET_NR_getresuid32
11621     case TARGET_NR_getresuid32:
11622         {
11623             uid_t ruid, euid, suid;
11624             ret = get_errno(getresuid(&ruid, &euid, &suid));
11625             if (!is_error(ret)) {
11626                 if (put_user_u32(ruid, arg1)
11627                     || put_user_u32(euid, arg2)
11628                     || put_user_u32(suid, arg3))
11629                     return -TARGET_EFAULT;
11630             }
11631         }
11632         return ret;
11633 #endif
11634 #ifdef TARGET_NR_setresgid32
11635     case TARGET_NR_setresgid32:
11636         return get_errno(sys_setresgid(arg1, arg2, arg3));
11637 #endif
11638 #ifdef TARGET_NR_getresgid32
11639     case TARGET_NR_getresgid32:
11640         {
11641             gid_t rgid, egid, sgid;
11642             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11643             if (!is_error(ret)) {
11644                 if (put_user_u32(rgid, arg1)
11645                     || put_user_u32(egid, arg2)
11646                     || put_user_u32(sgid, arg3))
11647                     return -TARGET_EFAULT;
11648             }
11649         }
11650         return ret;
11651 #endif
11652 #ifdef TARGET_NR_chown32
11653     case TARGET_NR_chown32:
11654         if (!(p = lock_user_string(arg1)))
11655             return -TARGET_EFAULT;
11656         ret = get_errno(chown(p, arg2, arg3));
11657         unlock_user(p, arg1, 0);
11658         return ret;
11659 #endif
11660 #ifdef TARGET_NR_setuid32
11661     case TARGET_NR_setuid32:
11662         return get_errno(sys_setuid(arg1));
11663 #endif
11664 #ifdef TARGET_NR_setgid32
11665     case TARGET_NR_setgid32:
11666         return get_errno(sys_setgid(arg1));
11667 #endif
11668 #ifdef TARGET_NR_setfsuid32
11669     case TARGET_NR_setfsuid32:
11670         return get_errno(setfsuid(arg1));
11671 #endif
11672 #ifdef TARGET_NR_setfsgid32
11673     case TARGET_NR_setfsgid32:
11674         return get_errno(setfsgid(arg1));
11675 #endif
11676 #ifdef TARGET_NR_mincore
11677     case TARGET_NR_mincore:
11678         {
11679             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11680             if (!a) {
11681                 return -TARGET_ENOMEM;
11682             }
11683             p = lock_user_string(arg3);
11684             if (!p) {
11685                 ret = -TARGET_EFAULT;
11686             } else {
11687                 ret = get_errno(mincore(a, arg2, p));
11688                 unlock_user(p, arg3, ret);
11689             }
11690             unlock_user(a, arg1, 0);
11691         }
11692         return ret;
11693 #endif
11694 #ifdef TARGET_NR_arm_fadvise64_64
11695     case TARGET_NR_arm_fadvise64_64:
11696         /* arm_fadvise64_64 looks like fadvise64_64 but
11697          * with different argument order: fd, advice, offset, len
11698          * rather than the usual fd, offset, len, advice.
11699          * Note that offset and len are both 64-bit so appear as
11700          * pairs of 32-bit registers.
11701          */
11702         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11703                             target_offset64(arg5, arg6), arg2);
11704         return -host_to_target_errno(ret);
11705 #endif
11706 
11707 #if TARGET_ABI_BITS == 32
11708 
11709 #ifdef TARGET_NR_fadvise64_64
11710     case TARGET_NR_fadvise64_64:
11711 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11712         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11713         ret = arg2;
11714         arg2 = arg3;
11715         arg3 = arg4;
11716         arg4 = arg5;
11717         arg5 = arg6;
11718         arg6 = ret;
11719 #else
11720         /* 6 args: fd, offset (high, low), len (high, low), advice */
11721         if (regpairs_aligned(cpu_env, num)) {
11722             /* offset is in (3,4), len in (5,6) and advice in 7 */
11723             arg2 = arg3;
11724             arg3 = arg4;
11725             arg4 = arg5;
11726             arg5 = arg6;
11727             arg6 = arg7;
11728         }
11729 #endif
11730         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11731                             target_offset64(arg4, arg5), arg6);
11732         return -host_to_target_errno(ret);
11733 #endif
11734 
11735 #ifdef TARGET_NR_fadvise64
11736     case TARGET_NR_fadvise64:
11737         /* 5 args: fd, offset (high, low), len, advice */
11738         if (regpairs_aligned(cpu_env, num)) {
11739             /* offset is in (3,4), len in 5 and advice in 6 */
11740             arg2 = arg3;
11741             arg3 = arg4;
11742             arg4 = arg5;
11743             arg5 = arg6;
11744         }
11745         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11746         return -host_to_target_errno(ret);
11747 #endif
11748 
11749 #else /* not a 32-bit ABI */
11750 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11751 #ifdef TARGET_NR_fadvise64_64
11752     case TARGET_NR_fadvise64_64:
11753 #endif
11754 #ifdef TARGET_NR_fadvise64
11755     case TARGET_NR_fadvise64:
11756 #endif
11757 #ifdef TARGET_S390X
11758         switch (arg4) {
11759         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11760         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11761         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11762         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11763         default: break;
11764         }
11765 #endif
11766         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11767 #endif
11768 #endif /* end of 64-bit ABI fadvise handling */
11769 
11770 #ifdef TARGET_NR_madvise
11771     case TARGET_NR_madvise:
11772         /* A straight passthrough may not be safe because qemu sometimes
11773            turns private file-backed mappings into anonymous mappings.
11774            This will break MADV_DONTNEED.
11775            This is a hint, so ignoring and returning success is ok.  */
11776         return 0;
11777 #endif
11778 #ifdef TARGET_NR_fcntl64
11779     case TARGET_NR_fcntl64:
11780     {
11781         int cmd;
11782         struct flock64 fl;
11783         from_flock64_fn *copyfrom = copy_from_user_flock64;
11784         to_flock64_fn *copyto = copy_to_user_flock64;
11785 
11786 #ifdef TARGET_ARM
11787         if (!((CPUARMState *)cpu_env)->eabi) {
11788             copyfrom = copy_from_user_oabi_flock64;
11789             copyto = copy_to_user_oabi_flock64;
11790         }
11791 #endif
11792 
11793         cmd = target_to_host_fcntl_cmd(arg2);
11794         if (cmd == -TARGET_EINVAL) {
11795             return cmd;
11796         }
11797 
11798         switch(arg2) {
11799         case TARGET_F_GETLK64:
11800             ret = copyfrom(&fl, arg3);
11801             if (ret) {
11802                 break;
11803             }
11804             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11805             if (ret == 0) {
11806                 ret = copyto(arg3, &fl);
11807             }
11808 	    break;
11809 
11810         case TARGET_F_SETLK64:
11811         case TARGET_F_SETLKW64:
11812             ret = copyfrom(&fl, arg3);
11813             if (ret) {
11814                 break;
11815             }
11816             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11817 	    break;
11818         default:
11819             ret = do_fcntl(arg1, arg2, arg3);
11820             break;
11821         }
11822         return ret;
11823     }
11824 #endif
11825 #ifdef TARGET_NR_cacheflush
11826     case TARGET_NR_cacheflush:
11827         /* self-modifying code is handled automatically, so nothing needed */
11828         return 0;
11829 #endif
11830 #ifdef TARGET_NR_getpagesize
11831     case TARGET_NR_getpagesize:
11832         return TARGET_PAGE_SIZE;
11833 #endif
11834     case TARGET_NR_gettid:
11835         return get_errno(sys_gettid());
11836 #ifdef TARGET_NR_readahead
11837     case TARGET_NR_readahead:
11838 #if TARGET_ABI_BITS == 32
11839         if (regpairs_aligned(cpu_env, num)) {
11840             arg2 = arg3;
11841             arg3 = arg4;
11842             arg4 = arg5;
11843         }
11844         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11845 #else
11846         ret = get_errno(readahead(arg1, arg2, arg3));
11847 #endif
11848         return ret;
11849 #endif
11850 #ifdef CONFIG_ATTR
11851 #ifdef TARGET_NR_setxattr
11852     case TARGET_NR_listxattr:
11853     case TARGET_NR_llistxattr:
11854     {
11855         void *p, *b = 0;
11856         if (arg2) {
11857             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11858             if (!b) {
11859                 return -TARGET_EFAULT;
11860             }
11861         }
11862         p = lock_user_string(arg1);
11863         if (p) {
11864             if (num == TARGET_NR_listxattr) {
11865                 ret = get_errno(listxattr(p, b, arg3));
11866             } else {
11867                 ret = get_errno(llistxattr(p, b, arg3));
11868             }
11869         } else {
11870             ret = -TARGET_EFAULT;
11871         }
11872         unlock_user(p, arg1, 0);
11873         unlock_user(b, arg2, arg3);
11874         return ret;
11875     }
11876     case TARGET_NR_flistxattr:
11877     {
11878         void *b = 0;
11879         if (arg2) {
11880             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11881             if (!b) {
11882                 return -TARGET_EFAULT;
11883             }
11884         }
11885         ret = get_errno(flistxattr(arg1, b, arg3));
11886         unlock_user(b, arg2, arg3);
11887         return ret;
11888     }
11889     case TARGET_NR_setxattr:
11890     case TARGET_NR_lsetxattr:
11891         {
11892             void *p, *n, *v = 0;
11893             if (arg3) {
11894                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11895                 if (!v) {
11896                     return -TARGET_EFAULT;
11897                 }
11898             }
11899             p = lock_user_string(arg1);
11900             n = lock_user_string(arg2);
11901             if (p && n) {
11902                 if (num == TARGET_NR_setxattr) {
11903                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11904                 } else {
11905                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11906                 }
11907             } else {
11908                 ret = -TARGET_EFAULT;
11909             }
11910             unlock_user(p, arg1, 0);
11911             unlock_user(n, arg2, 0);
11912             unlock_user(v, arg3, 0);
11913         }
11914         return ret;
11915     case TARGET_NR_fsetxattr:
11916         {
11917             void *n, *v = 0;
11918             if (arg3) {
11919                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11920                 if (!v) {
11921                     return -TARGET_EFAULT;
11922                 }
11923             }
11924             n = lock_user_string(arg2);
11925             if (n) {
11926                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11927             } else {
11928                 ret = -TARGET_EFAULT;
11929             }
11930             unlock_user(n, arg2, 0);
11931             unlock_user(v, arg3, 0);
11932         }
11933         return ret;
11934     case TARGET_NR_getxattr:
11935     case TARGET_NR_lgetxattr:
11936         {
11937             void *p, *n, *v = 0;
11938             if (arg3) {
11939                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11940                 if (!v) {
11941                     return -TARGET_EFAULT;
11942                 }
11943             }
11944             p = lock_user_string(arg1);
11945             n = lock_user_string(arg2);
11946             if (p && n) {
11947                 if (num == TARGET_NR_getxattr) {
11948                     ret = get_errno(getxattr(p, n, v, arg4));
11949                 } else {
11950                     ret = get_errno(lgetxattr(p, n, v, arg4));
11951                 }
11952             } else {
11953                 ret = -TARGET_EFAULT;
11954             }
11955             unlock_user(p, arg1, 0);
11956             unlock_user(n, arg2, 0);
11957             unlock_user(v, arg3, arg4);
11958         }
11959         return ret;
11960     case TARGET_NR_fgetxattr:
11961         {
11962             void *n, *v = 0;
11963             if (arg3) {
11964                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11965                 if (!v) {
11966                     return -TARGET_EFAULT;
11967                 }
11968             }
11969             n = lock_user_string(arg2);
11970             if (n) {
11971                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11972             } else {
11973                 ret = -TARGET_EFAULT;
11974             }
11975             unlock_user(n, arg2, 0);
11976             unlock_user(v, arg3, arg4);
11977         }
11978         return ret;
11979     case TARGET_NR_removexattr:
11980     case TARGET_NR_lremovexattr:
11981         {
11982             void *p, *n;
11983             p = lock_user_string(arg1);
11984             n = lock_user_string(arg2);
11985             if (p && n) {
11986                 if (num == TARGET_NR_removexattr) {
11987                     ret = get_errno(removexattr(p, n));
11988                 } else {
11989                     ret = get_errno(lremovexattr(p, n));
11990                 }
11991             } else {
11992                 ret = -TARGET_EFAULT;
11993             }
11994             unlock_user(p, arg1, 0);
11995             unlock_user(n, arg2, 0);
11996         }
11997         return ret;
11998     case TARGET_NR_fremovexattr:
11999         {
12000             void *n;
12001             n = lock_user_string(arg2);
12002             if (n) {
12003                 ret = get_errno(fremovexattr(arg1, n));
12004             } else {
12005                 ret = -TARGET_EFAULT;
12006             }
12007             unlock_user(n, arg2, 0);
12008         }
12009         return ret;
12010 #endif
12011 #endif /* CONFIG_ATTR */
12012 #ifdef TARGET_NR_set_thread_area
12013     case TARGET_NR_set_thread_area:
12014 #if defined(TARGET_MIPS)
12015       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12016       return 0;
12017 #elif defined(TARGET_CRIS)
12018       if (arg1 & 0xff)
12019           ret = -TARGET_EINVAL;
12020       else {
12021           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12022           ret = 0;
12023       }
12024       return ret;
12025 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12026       return do_set_thread_area(cpu_env, arg1);
12027 #elif defined(TARGET_M68K)
12028       {
12029           TaskState *ts = cpu->opaque;
12030           ts->tp_value = arg1;
12031           return 0;
12032       }
12033 #else
12034       return -TARGET_ENOSYS;
12035 #endif
12036 #endif
12037 #ifdef TARGET_NR_get_thread_area
12038     case TARGET_NR_get_thread_area:
12039 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12040         return do_get_thread_area(cpu_env, arg1);
12041 #elif defined(TARGET_M68K)
12042         {
12043             TaskState *ts = cpu->opaque;
12044             return ts->tp_value;
12045         }
12046 #else
12047         return -TARGET_ENOSYS;
12048 #endif
12049 #endif
12050 #ifdef TARGET_NR_getdomainname
12051     case TARGET_NR_getdomainname:
12052         return -TARGET_ENOSYS;
12053 #endif
12054 
12055 #ifdef TARGET_NR_clock_settime
12056     case TARGET_NR_clock_settime:
12057     {
12058         struct timespec ts;
12059 
12060         ret = target_to_host_timespec(&ts, arg2);
12061         if (!is_error(ret)) {
12062             ret = get_errno(clock_settime(arg1, &ts));
12063         }
12064         return ret;
12065     }
12066 #endif
12067 #ifdef TARGET_NR_clock_settime64
12068     case TARGET_NR_clock_settime64:
12069     {
12070         struct timespec ts;
12071 
12072         ret = target_to_host_timespec64(&ts, arg2);
12073         if (!is_error(ret)) {
12074             ret = get_errno(clock_settime(arg1, &ts));
12075         }
12076         return ret;
12077     }
12078 #endif
12079 #ifdef TARGET_NR_clock_gettime
12080     case TARGET_NR_clock_gettime:
12081     {
12082         struct timespec ts;
12083         ret = get_errno(clock_gettime(arg1, &ts));
12084         if (!is_error(ret)) {
12085             ret = host_to_target_timespec(arg2, &ts);
12086         }
12087         return ret;
12088     }
12089 #endif
12090 #ifdef TARGET_NR_clock_gettime64
12091     case TARGET_NR_clock_gettime64:
12092     {
12093         struct timespec ts;
12094         ret = get_errno(clock_gettime(arg1, &ts));
12095         if (!is_error(ret)) {
12096             ret = host_to_target_timespec64(arg2, &ts);
12097         }
12098         return ret;
12099     }
12100 #endif
12101 #ifdef TARGET_NR_clock_getres
12102     case TARGET_NR_clock_getres:
12103     {
12104         struct timespec ts;
12105         ret = get_errno(clock_getres(arg1, &ts));
12106         if (!is_error(ret)) {
12107             host_to_target_timespec(arg2, &ts);
12108         }
12109         return ret;
12110     }
12111 #endif
12112 #ifdef TARGET_NR_clock_getres_time64
12113     case TARGET_NR_clock_getres_time64:
12114     {
12115         struct timespec ts;
12116         ret = get_errno(clock_getres(arg1, &ts));
12117         if (!is_error(ret)) {
12118             host_to_target_timespec64(arg2, &ts);
12119         }
12120         return ret;
12121     }
12122 #endif
12123 #ifdef TARGET_NR_clock_nanosleep
12124     case TARGET_NR_clock_nanosleep:
12125     {
12126         struct timespec ts;
12127         if (target_to_host_timespec(&ts, arg3)) {
12128             return -TARGET_EFAULT;
12129         }
12130         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12131                                              &ts, arg4 ? &ts : NULL));
12132         /*
12133          * if the call is interrupted by a signal handler, it fails
12134          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12135          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12136          */
12137         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12138             host_to_target_timespec(arg4, &ts)) {
12139               return -TARGET_EFAULT;
12140         }
12141 
12142         return ret;
12143     }
12144 #endif
12145 #ifdef TARGET_NR_clock_nanosleep_time64
12146     case TARGET_NR_clock_nanosleep_time64:
12147     {
12148         struct timespec ts;
12149 
12150         if (target_to_host_timespec64(&ts, arg3)) {
12151             return -TARGET_EFAULT;
12152         }
12153 
12154         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12155                                              &ts, arg4 ? &ts : NULL));
12156 
12157         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12158             host_to_target_timespec64(arg4, &ts)) {
12159             return -TARGET_EFAULT;
12160         }
12161         return ret;
12162     }
12163 #endif
12164 
12165 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12166     case TARGET_NR_set_tid_address:
12167         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12168 #endif
12169 
12170     case TARGET_NR_tkill:
12171         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12172 
12173     case TARGET_NR_tgkill:
12174         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12175                          target_to_host_signal(arg3)));
12176 
12177 #ifdef TARGET_NR_set_robust_list
12178     case TARGET_NR_set_robust_list:
12179     case TARGET_NR_get_robust_list:
12180         /* The ABI for supporting robust futexes has userspace pass
12181          * the kernel a pointer to a linked list which is updated by
12182          * userspace after the syscall; the list is walked by the kernel
12183          * when the thread exits. Since the linked list in QEMU guest
12184          * memory isn't a valid linked list for the host and we have
12185          * no way to reliably intercept the thread-death event, we can't
12186          * support these. Silently return ENOSYS so that guest userspace
12187          * falls back to a non-robust futex implementation (which should
12188          * be OK except in the corner case of the guest crashing while
12189          * holding a mutex that is shared with another process via
12190          * shared memory).
12191          */
12192         return -TARGET_ENOSYS;
12193 #endif
12194 
12195 #if defined(TARGET_NR_utimensat)
12196     case TARGET_NR_utimensat:
12197         {
12198             struct timespec *tsp, ts[2];
12199             if (!arg3) {
12200                 tsp = NULL;
12201             } else {
12202                 if (target_to_host_timespec(ts, arg3)) {
12203                     return -TARGET_EFAULT;
12204                 }
12205                 if (target_to_host_timespec(ts + 1, arg3 +
12206                                             sizeof(struct target_timespec))) {
12207                     return -TARGET_EFAULT;
12208                 }
12209                 tsp = ts;
12210             }
12211             if (!arg2)
12212                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12213             else {
12214                 if (!(p = lock_user_string(arg2))) {
12215                     return -TARGET_EFAULT;
12216                 }
12217                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12218                 unlock_user(p, arg2, 0);
12219             }
12220         }
12221         return ret;
12222 #endif
12223 #ifdef TARGET_NR_utimensat_time64
12224     case TARGET_NR_utimensat_time64:
12225         {
12226             struct timespec *tsp, ts[2];
12227             if (!arg3) {
12228                 tsp = NULL;
12229             } else {
12230                 if (target_to_host_timespec64(ts, arg3)) {
12231                     return -TARGET_EFAULT;
12232                 }
12233                 if (target_to_host_timespec64(ts + 1, arg3 +
12234                                      sizeof(struct target__kernel_timespec))) {
12235                     return -TARGET_EFAULT;
12236                 }
12237                 tsp = ts;
12238             }
12239             if (!arg2)
12240                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12241             else {
12242                 p = lock_user_string(arg2);
12243                 if (!p) {
12244                     return -TARGET_EFAULT;
12245                 }
12246                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12247                 unlock_user(p, arg2, 0);
12248             }
12249         }
12250         return ret;
12251 #endif
12252 #ifdef TARGET_NR_futex
12253     case TARGET_NR_futex:
12254         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12255 #endif
12256 #ifdef TARGET_NR_futex_time64
12257     case TARGET_NR_futex_time64:
12258         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12259 #endif
12260 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12261     case TARGET_NR_inotify_init:
12262         ret = get_errno(sys_inotify_init());
12263         if (ret >= 0) {
12264             fd_trans_register(ret, &target_inotify_trans);
12265         }
12266         return ret;
12267 #endif
12268 #ifdef CONFIG_INOTIFY1
12269 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12270     case TARGET_NR_inotify_init1:
12271         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12272                                           fcntl_flags_tbl)));
12273         if (ret >= 0) {
12274             fd_trans_register(ret, &target_inotify_trans);
12275         }
12276         return ret;
12277 #endif
12278 #endif
12279 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12280     case TARGET_NR_inotify_add_watch:
12281         p = lock_user_string(arg2);
12282         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12283         unlock_user(p, arg2, 0);
12284         return ret;
12285 #endif
12286 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12287     case TARGET_NR_inotify_rm_watch:
12288         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12289 #endif
12290 
12291 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12292     case TARGET_NR_mq_open:
12293         {
12294             struct mq_attr posix_mq_attr;
12295             struct mq_attr *pposix_mq_attr;
12296             int host_flags;
12297 
12298             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12299             pposix_mq_attr = NULL;
12300             if (arg4) {
12301                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12302                     return -TARGET_EFAULT;
12303                 }
12304                 pposix_mq_attr = &posix_mq_attr;
12305             }
12306             p = lock_user_string(arg1 - 1);
12307             if (!p) {
12308                 return -TARGET_EFAULT;
12309             }
12310             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12311             unlock_user (p, arg1, 0);
12312         }
12313         return ret;
12314 
12315     case TARGET_NR_mq_unlink:
12316         p = lock_user_string(arg1 - 1);
12317         if (!p) {
12318             return -TARGET_EFAULT;
12319         }
12320         ret = get_errno(mq_unlink(p));
12321         unlock_user (p, arg1, 0);
12322         return ret;
12323 
12324 #ifdef TARGET_NR_mq_timedsend
12325     case TARGET_NR_mq_timedsend:
12326         {
12327             struct timespec ts;
12328 
12329             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12330             if (arg5 != 0) {
12331                 if (target_to_host_timespec(&ts, arg5)) {
12332                     return -TARGET_EFAULT;
12333                 }
12334                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12335                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12336                     return -TARGET_EFAULT;
12337                 }
12338             } else {
12339                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12340             }
12341             unlock_user (p, arg2, arg3);
12342         }
12343         return ret;
12344 #endif
12345 #ifdef TARGET_NR_mq_timedsend_time64
12346     case TARGET_NR_mq_timedsend_time64:
12347         {
12348             struct timespec ts;
12349 
12350             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12351             if (arg5 != 0) {
12352                 if (target_to_host_timespec64(&ts, arg5)) {
12353                     return -TARGET_EFAULT;
12354                 }
12355                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12356                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12357                     return -TARGET_EFAULT;
12358                 }
12359             } else {
12360                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12361             }
12362             unlock_user(p, arg2, arg3);
12363         }
12364         return ret;
12365 #endif
12366 
12367 #ifdef TARGET_NR_mq_timedreceive
12368     case TARGET_NR_mq_timedreceive:
12369         {
12370             struct timespec ts;
12371             unsigned int prio;
12372 
12373             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12374             if (arg5 != 0) {
12375                 if (target_to_host_timespec(&ts, arg5)) {
12376                     return -TARGET_EFAULT;
12377                 }
12378                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12379                                                      &prio, &ts));
12380                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12381                     return -TARGET_EFAULT;
12382                 }
12383             } else {
12384                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12385                                                      &prio, NULL));
12386             }
12387             unlock_user (p, arg2, arg3);
12388             if (arg4 != 0)
12389                 put_user_u32(prio, arg4);
12390         }
12391         return ret;
12392 #endif
12393 #ifdef TARGET_NR_mq_timedreceive_time64
12394     case TARGET_NR_mq_timedreceive_time64:
12395         {
12396             struct timespec ts;
12397             unsigned int prio;
12398 
12399             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12400             if (arg5 != 0) {
12401                 if (target_to_host_timespec64(&ts, arg5)) {
12402                     return -TARGET_EFAULT;
12403                 }
12404                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12405                                                      &prio, &ts));
12406                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12407                     return -TARGET_EFAULT;
12408                 }
12409             } else {
12410                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12411                                                      &prio, NULL));
12412             }
12413             unlock_user(p, arg2, arg3);
12414             if (arg4 != 0) {
12415                 put_user_u32(prio, arg4);
12416             }
12417         }
12418         return ret;
12419 #endif
12420 
12421     /* Not implemented for now... */
12422 /*     case TARGET_NR_mq_notify: */
12423 /*         break; */
12424 
12425     case TARGET_NR_mq_getsetattr:
12426         {
12427             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12428             ret = 0;
12429             if (arg2 != 0) {
12430                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12431                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12432                                            &posix_mq_attr_out));
12433             } else if (arg3 != 0) {
12434                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12435             }
12436             if (ret == 0 && arg3 != 0) {
12437                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12438             }
12439         }
12440         return ret;
12441 #endif
12442 
12443 #ifdef CONFIG_SPLICE
12444 #ifdef TARGET_NR_tee
12445     case TARGET_NR_tee:
12446         {
12447             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12448         }
12449         return ret;
12450 #endif
12451 #ifdef TARGET_NR_splice
12452     case TARGET_NR_splice:
12453         {
12454             loff_t loff_in, loff_out;
12455             loff_t *ploff_in = NULL, *ploff_out = NULL;
12456             if (arg2) {
12457                 if (get_user_u64(loff_in, arg2)) {
12458                     return -TARGET_EFAULT;
12459                 }
12460                 ploff_in = &loff_in;
12461             }
12462             if (arg4) {
12463                 if (get_user_u64(loff_out, arg4)) {
12464                     return -TARGET_EFAULT;
12465                 }
12466                 ploff_out = &loff_out;
12467             }
12468             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12469             if (arg2) {
12470                 if (put_user_u64(loff_in, arg2)) {
12471                     return -TARGET_EFAULT;
12472                 }
12473             }
12474             if (arg4) {
12475                 if (put_user_u64(loff_out, arg4)) {
12476                     return -TARGET_EFAULT;
12477                 }
12478             }
12479         }
12480         return ret;
12481 #endif
12482 #ifdef TARGET_NR_vmsplice
12483 	case TARGET_NR_vmsplice:
12484         {
12485             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12486             if (vec != NULL) {
12487                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12488                 unlock_iovec(vec, arg2, arg3, 0);
12489             } else {
12490                 ret = -host_to_target_errno(errno);
12491             }
12492         }
12493         return ret;
12494 #endif
12495 #endif /* CONFIG_SPLICE */
12496 #ifdef CONFIG_EVENTFD
12497 #if defined(TARGET_NR_eventfd)
12498     case TARGET_NR_eventfd:
12499         ret = get_errno(eventfd(arg1, 0));
12500         if (ret >= 0) {
12501             fd_trans_register(ret, &target_eventfd_trans);
12502         }
12503         return ret;
12504 #endif
12505 #if defined(TARGET_NR_eventfd2)
12506     case TARGET_NR_eventfd2:
12507     {
12508         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12509         if (arg2 & TARGET_O_NONBLOCK) {
12510             host_flags |= O_NONBLOCK;
12511         }
12512         if (arg2 & TARGET_O_CLOEXEC) {
12513             host_flags |= O_CLOEXEC;
12514         }
12515         ret = get_errno(eventfd(arg1, host_flags));
12516         if (ret >= 0) {
12517             fd_trans_register(ret, &target_eventfd_trans);
12518         }
12519         return ret;
12520     }
12521 #endif
12522 #endif /* CONFIG_EVENTFD  */
12523 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12524     case TARGET_NR_fallocate:
12525 #if TARGET_ABI_BITS == 32
12526         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12527                                   target_offset64(arg5, arg6)));
12528 #else
12529         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12530 #endif
12531         return ret;
12532 #endif
12533 #if defined(CONFIG_SYNC_FILE_RANGE)
12534 #if defined(TARGET_NR_sync_file_range)
12535     case TARGET_NR_sync_file_range:
12536 #if TARGET_ABI_BITS == 32
12537 #if defined(TARGET_MIPS)
12538         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12539                                         target_offset64(arg5, arg6), arg7));
12540 #else
12541         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12542                                         target_offset64(arg4, arg5), arg6));
12543 #endif /* !TARGET_MIPS */
12544 #else
12545         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12546 #endif
12547         return ret;
12548 #endif
12549 #if defined(TARGET_NR_sync_file_range2) || \
12550     defined(TARGET_NR_arm_sync_file_range)
12551 #if defined(TARGET_NR_sync_file_range2)
12552     case TARGET_NR_sync_file_range2:
12553 #endif
12554 #if defined(TARGET_NR_arm_sync_file_range)
12555     case TARGET_NR_arm_sync_file_range:
12556 #endif
12557         /* This is like sync_file_range but the arguments are reordered */
12558 #if TARGET_ABI_BITS == 32
12559         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12560                                         target_offset64(arg5, arg6), arg2));
12561 #else
12562         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12563 #endif
12564         return ret;
12565 #endif
12566 #endif
12567 #if defined(TARGET_NR_signalfd4)
12568     case TARGET_NR_signalfd4:
12569         return do_signalfd4(arg1, arg2, arg4);
12570 #endif
12571 #if defined(TARGET_NR_signalfd)
12572     case TARGET_NR_signalfd:
12573         return do_signalfd4(arg1, arg2, 0);
12574 #endif
12575 #if defined(CONFIG_EPOLL)
12576 #if defined(TARGET_NR_epoll_create)
12577     case TARGET_NR_epoll_create:
12578         return get_errno(epoll_create(arg1));
12579 #endif
12580 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12581     case TARGET_NR_epoll_create1:
12582         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12583 #endif
12584 #if defined(TARGET_NR_epoll_ctl)
12585     case TARGET_NR_epoll_ctl:
12586     {
12587         struct epoll_event ep;
12588         struct epoll_event *epp = 0;
12589         if (arg4) {
12590             if (arg2 != EPOLL_CTL_DEL) {
12591                 struct target_epoll_event *target_ep;
12592                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12593                     return -TARGET_EFAULT;
12594                 }
12595                 ep.events = tswap32(target_ep->events);
12596                 /*
12597                  * The epoll_data_t union is just opaque data to the kernel,
12598                  * so we transfer all 64 bits across and need not worry what
12599                  * actual data type it is.
12600                  */
12601                 ep.data.u64 = tswap64(target_ep->data.u64);
12602                 unlock_user_struct(target_ep, arg4, 0);
12603             }
12604             /*
12605              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12606              * non-null pointer, even though this argument is ignored.
12607              *
12608              */
12609             epp = &ep;
12610         }
12611         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12612     }
12613 #endif
12614 
12615 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12616 #if defined(TARGET_NR_epoll_wait)
12617     case TARGET_NR_epoll_wait:
12618 #endif
12619 #if defined(TARGET_NR_epoll_pwait)
12620     case TARGET_NR_epoll_pwait:
12621 #endif
12622     {
12623         struct target_epoll_event *target_ep;
12624         struct epoll_event *ep;
12625         int epfd = arg1;
12626         int maxevents = arg3;
12627         int timeout = arg4;
12628 
12629         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12630             return -TARGET_EINVAL;
12631         }
12632 
12633         target_ep = lock_user(VERIFY_WRITE, arg2,
12634                               maxevents * sizeof(struct target_epoll_event), 1);
12635         if (!target_ep) {
12636             return -TARGET_EFAULT;
12637         }
12638 
12639         ep = g_try_new(struct epoll_event, maxevents);
12640         if (!ep) {
12641             unlock_user(target_ep, arg2, 0);
12642             return -TARGET_ENOMEM;
12643         }
12644 
12645         switch (num) {
12646 #if defined(TARGET_NR_epoll_pwait)
12647         case TARGET_NR_epoll_pwait:
12648         {
12649             target_sigset_t *target_set;
12650             sigset_t _set, *set = &_set;
12651 
12652             if (arg5) {
12653                 if (arg6 != sizeof(target_sigset_t)) {
12654                     ret = -TARGET_EINVAL;
12655                     break;
12656                 }
12657 
12658                 target_set = lock_user(VERIFY_READ, arg5,
12659                                        sizeof(target_sigset_t), 1);
12660                 if (!target_set) {
12661                     ret = -TARGET_EFAULT;
12662                     break;
12663                 }
12664                 target_to_host_sigset(set, target_set);
12665                 unlock_user(target_set, arg5, 0);
12666             } else {
12667                 set = NULL;
12668             }
12669 
12670             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12671                                              set, SIGSET_T_SIZE));
12672             break;
12673         }
12674 #endif
12675 #if defined(TARGET_NR_epoll_wait)
12676         case TARGET_NR_epoll_wait:
12677             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12678                                              NULL, 0));
12679             break;
12680 #endif
12681         default:
12682             ret = -TARGET_ENOSYS;
12683         }
12684         if (!is_error(ret)) {
12685             int i;
12686             for (i = 0; i < ret; i++) {
12687                 target_ep[i].events = tswap32(ep[i].events);
12688                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12689             }
12690             unlock_user(target_ep, arg2,
12691                         ret * sizeof(struct target_epoll_event));
12692         } else {
12693             unlock_user(target_ep, arg2, 0);
12694         }
12695         g_free(ep);
12696         return ret;
12697     }
12698 #endif
12699 #endif
12700 #ifdef TARGET_NR_prlimit64
12701     case TARGET_NR_prlimit64:
12702     {
12703         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12704         struct target_rlimit64 *target_rnew, *target_rold;
12705         struct host_rlimit64 rnew, rold, *rnewp = 0;
12706         int resource = target_to_host_resource(arg2);
12707 
12708         if (arg3 && (resource != RLIMIT_AS &&
12709                      resource != RLIMIT_DATA &&
12710                      resource != RLIMIT_STACK)) {
12711             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12712                 return -TARGET_EFAULT;
12713             }
12714             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12715             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12716             unlock_user_struct(target_rnew, arg3, 0);
12717             rnewp = &rnew;
12718         }
12719 
12720         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12721         if (!is_error(ret) && arg4) {
12722             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12723                 return -TARGET_EFAULT;
12724             }
12725             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12726             target_rold->rlim_max = tswap64(rold.rlim_max);
12727             unlock_user_struct(target_rold, arg4, 1);
12728         }
12729         return ret;
12730     }
12731 #endif
12732 #ifdef TARGET_NR_gethostname
12733     case TARGET_NR_gethostname:
12734     {
12735         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12736         if (name) {
12737             ret = get_errno(gethostname(name, arg2));
12738             unlock_user(name, arg1, arg2);
12739         } else {
12740             ret = -TARGET_EFAULT;
12741         }
12742         return ret;
12743     }
12744 #endif
12745 #ifdef TARGET_NR_atomic_cmpxchg_32
12746     case TARGET_NR_atomic_cmpxchg_32:
12747     {
12748         /* should use start_exclusive from main.c */
12749         abi_ulong mem_value;
12750         if (get_user_u32(mem_value, arg6)) {
12751             target_siginfo_t info;
12752             info.si_signo = SIGSEGV;
12753             info.si_errno = 0;
12754             info.si_code = TARGET_SEGV_MAPERR;
12755             info._sifields._sigfault._addr = arg6;
12756             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12757                          QEMU_SI_FAULT, &info);
12758             ret = 0xdeadbeef;
12759 
12760         }
12761         if (mem_value == arg2)
12762             put_user_u32(arg1, arg6);
12763         return mem_value;
12764     }
12765 #endif
12766 #ifdef TARGET_NR_atomic_barrier
12767     case TARGET_NR_atomic_barrier:
12768         /* Like the kernel implementation and the
12769            qemu arm barrier, no-op this? */
12770         return 0;
12771 #endif
12772 
12773 #ifdef TARGET_NR_timer_create
12774     case TARGET_NR_timer_create:
12775     {
12776         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12777 
12778         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12779 
12780         int clkid = arg1;
12781         int timer_index = next_free_host_timer();
12782 
12783         if (timer_index < 0) {
12784             ret = -TARGET_EAGAIN;
12785         } else {
12786             timer_t *phtimer = g_posix_timers  + timer_index;
12787 
12788             if (arg2) {
12789                 phost_sevp = &host_sevp;
12790                 ret = target_to_host_sigevent(phost_sevp, arg2);
12791                 if (ret != 0) {
12792                     return ret;
12793                 }
12794             }
12795 
12796             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12797             if (ret) {
12798                 phtimer = NULL;
12799             } else {
12800                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12801                     return -TARGET_EFAULT;
12802                 }
12803             }
12804         }
12805         return ret;
12806     }
12807 #endif
12808 
12809 #ifdef TARGET_NR_timer_settime
12810     case TARGET_NR_timer_settime:
12811     {
12812         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12813          * struct itimerspec * old_value */
12814         target_timer_t timerid = get_timer_id(arg1);
12815 
12816         if (timerid < 0) {
12817             ret = timerid;
12818         } else if (arg3 == 0) {
12819             ret = -TARGET_EINVAL;
12820         } else {
12821             timer_t htimer = g_posix_timers[timerid];
12822             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12823 
12824             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12825                 return -TARGET_EFAULT;
12826             }
12827             ret = get_errno(
12828                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12829             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12830                 return -TARGET_EFAULT;
12831             }
12832         }
12833         return ret;
12834     }
12835 #endif
12836 
12837 #ifdef TARGET_NR_timer_settime64
12838     case TARGET_NR_timer_settime64:
12839     {
12840         target_timer_t timerid = get_timer_id(arg1);
12841 
12842         if (timerid < 0) {
12843             ret = timerid;
12844         } else if (arg3 == 0) {
12845             ret = -TARGET_EINVAL;
12846         } else {
12847             timer_t htimer = g_posix_timers[timerid];
12848             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12849 
12850             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12851                 return -TARGET_EFAULT;
12852             }
12853             ret = get_errno(
12854                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12855             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12856                 return -TARGET_EFAULT;
12857             }
12858         }
12859         return ret;
12860     }
12861 #endif
12862 
12863 #ifdef TARGET_NR_timer_gettime
12864     case TARGET_NR_timer_gettime:
12865     {
12866         /* args: timer_t timerid, struct itimerspec *curr_value */
12867         target_timer_t timerid = get_timer_id(arg1);
12868 
12869         if (timerid < 0) {
12870             ret = timerid;
12871         } else if (!arg2) {
12872             ret = -TARGET_EFAULT;
12873         } else {
12874             timer_t htimer = g_posix_timers[timerid];
12875             struct itimerspec hspec;
12876             ret = get_errno(timer_gettime(htimer, &hspec));
12877 
12878             if (host_to_target_itimerspec(arg2, &hspec)) {
12879                 ret = -TARGET_EFAULT;
12880             }
12881         }
12882         return ret;
12883     }
12884 #endif
12885 
12886 #ifdef TARGET_NR_timer_gettime64
12887     case TARGET_NR_timer_gettime64:
12888     {
12889         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12890         target_timer_t timerid = get_timer_id(arg1);
12891 
12892         if (timerid < 0) {
12893             ret = timerid;
12894         } else if (!arg2) {
12895             ret = -TARGET_EFAULT;
12896         } else {
12897             timer_t htimer = g_posix_timers[timerid];
12898             struct itimerspec hspec;
12899             ret = get_errno(timer_gettime(htimer, &hspec));
12900 
12901             if (host_to_target_itimerspec64(arg2, &hspec)) {
12902                 ret = -TARGET_EFAULT;
12903             }
12904         }
12905         return ret;
12906     }
12907 #endif
12908 
12909 #ifdef TARGET_NR_timer_getoverrun
12910     case TARGET_NR_timer_getoverrun:
12911     {
12912         /* args: timer_t timerid */
12913         target_timer_t timerid = get_timer_id(arg1);
12914 
12915         if (timerid < 0) {
12916             ret = timerid;
12917         } else {
12918             timer_t htimer = g_posix_timers[timerid];
12919             ret = get_errno(timer_getoverrun(htimer));
12920         }
12921         return ret;
12922     }
12923 #endif
12924 
12925 #ifdef TARGET_NR_timer_delete
12926     case TARGET_NR_timer_delete:
12927     {
12928         /* args: timer_t timerid */
12929         target_timer_t timerid = get_timer_id(arg1);
12930 
12931         if (timerid < 0) {
12932             ret = timerid;
12933         } else {
12934             timer_t htimer = g_posix_timers[timerid];
12935             ret = get_errno(timer_delete(htimer));
12936             g_posix_timers[timerid] = 0;
12937         }
12938         return ret;
12939     }
12940 #endif
12941 
12942 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12943     case TARGET_NR_timerfd_create:
12944         return get_errno(timerfd_create(arg1,
12945                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12946 #endif
12947 
12948 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12949     case TARGET_NR_timerfd_gettime:
12950         {
12951             struct itimerspec its_curr;
12952 
12953             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12954 
12955             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12956                 return -TARGET_EFAULT;
12957             }
12958         }
12959         return ret;
12960 #endif
12961 
12962 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12963     case TARGET_NR_timerfd_gettime64:
12964         {
12965             struct itimerspec its_curr;
12966 
12967             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12968 
12969             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12970                 return -TARGET_EFAULT;
12971             }
12972         }
12973         return ret;
12974 #endif
12975 
12976 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12977     case TARGET_NR_timerfd_settime:
12978         {
12979             struct itimerspec its_new, its_old, *p_new;
12980 
12981             if (arg3) {
12982                 if (target_to_host_itimerspec(&its_new, arg3)) {
12983                     return -TARGET_EFAULT;
12984                 }
12985                 p_new = &its_new;
12986             } else {
12987                 p_new = NULL;
12988             }
12989 
12990             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12991 
12992             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12993                 return -TARGET_EFAULT;
12994             }
12995         }
12996         return ret;
12997 #endif
12998 
12999 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13000     case TARGET_NR_timerfd_settime64:
13001         {
13002             struct itimerspec its_new, its_old, *p_new;
13003 
13004             if (arg3) {
13005                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13006                     return -TARGET_EFAULT;
13007                 }
13008                 p_new = &its_new;
13009             } else {
13010                 p_new = NULL;
13011             }
13012 
13013             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13014 
13015             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13016                 return -TARGET_EFAULT;
13017             }
13018         }
13019         return ret;
13020 #endif
13021 
13022 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13023     case TARGET_NR_ioprio_get:
13024         return get_errno(ioprio_get(arg1, arg2));
13025 #endif
13026 
13027 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13028     case TARGET_NR_ioprio_set:
13029         return get_errno(ioprio_set(arg1, arg2, arg3));
13030 #endif
13031 
13032 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13033     case TARGET_NR_setns:
13034         return get_errno(setns(arg1, arg2));
13035 #endif
13036 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13037     case TARGET_NR_unshare:
13038         return get_errno(unshare(arg1));
13039 #endif
13040 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13041     case TARGET_NR_kcmp:
13042         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13043 #endif
13044 #ifdef TARGET_NR_swapcontext
13045     case TARGET_NR_swapcontext:
13046         /* PowerPC specific.  */
13047         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13048 #endif
13049 #ifdef TARGET_NR_memfd_create
13050     case TARGET_NR_memfd_create:
13051         p = lock_user_string(arg1);
13052         if (!p) {
13053             return -TARGET_EFAULT;
13054         }
13055         ret = get_errno(memfd_create(p, arg2));
13056         fd_trans_unregister(ret);
13057         unlock_user(p, arg1, 0);
13058         return ret;
13059 #endif
13060 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13061     case TARGET_NR_membarrier:
13062         return get_errno(membarrier(arg1, arg2));
13063 #endif
13064 
13065 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13066     case TARGET_NR_copy_file_range:
13067         {
13068             loff_t inoff, outoff;
13069             loff_t *pinoff = NULL, *poutoff = NULL;
13070 
13071             if (arg2) {
13072                 if (get_user_u64(inoff, arg2)) {
13073                     return -TARGET_EFAULT;
13074                 }
13075                 pinoff = &inoff;
13076             }
13077             if (arg4) {
13078                 if (get_user_u64(outoff, arg4)) {
13079                     return -TARGET_EFAULT;
13080                 }
13081                 poutoff = &outoff;
13082             }
13083             /* Do not sign-extend the count parameter. */
13084             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13085                                                  (abi_ulong)arg5, arg6));
13086             if (!is_error(ret) && ret > 0) {
13087                 if (arg2) {
13088                     if (put_user_u64(inoff, arg2)) {
13089                         return -TARGET_EFAULT;
13090                     }
13091                 }
13092                 if (arg4) {
13093                     if (put_user_u64(outoff, arg4)) {
13094                         return -TARGET_EFAULT;
13095                     }
13096                 }
13097             }
13098         }
13099         return ret;
13100 #endif
13101 
13102 #if defined(TARGET_NR_pivot_root)
13103     case TARGET_NR_pivot_root:
13104         {
13105             void *p2;
13106             p = lock_user_string(arg1); /* new_root */
13107             p2 = lock_user_string(arg2); /* put_old */
13108             if (!p || !p2) {
13109                 ret = -TARGET_EFAULT;
13110             } else {
13111                 ret = get_errno(pivot_root(p, p2));
13112             }
13113             unlock_user(p2, arg2, 0);
13114             unlock_user(p, arg1, 0);
13115         }
13116         return ret;
13117 #endif
13118 
13119     default:
13120         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13121         return -TARGET_ENOSYS;
13122     }
13123     return ret;
13124 }
13125 
13126 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13127                     abi_long arg2, abi_long arg3, abi_long arg4,
13128                     abi_long arg5, abi_long arg6, abi_long arg7,
13129                     abi_long arg8)
13130 {
13131     CPUState *cpu = env_cpu(cpu_env);
13132     abi_long ret;
13133 
13134 #ifdef DEBUG_ERESTARTSYS
13135     /* Debug-only code for exercising the syscall-restart code paths
13136      * in the per-architecture cpu main loops: restart every syscall
13137      * the guest makes once before letting it through.
13138      */
13139     {
13140         static bool flag;
13141         flag = !flag;
13142         if (flag) {
13143             return -TARGET_ERESTARTSYS;
13144         }
13145     }
13146 #endif
13147 
13148     record_syscall_start(cpu, num, arg1,
13149                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13150 
13151     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13152         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13153     }
13154 
13155     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13156                       arg5, arg6, arg7, arg8);
13157 
13158     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13159         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13160                           arg3, arg4, arg5, arg6);
13161     }
13162 
13163     record_syscall_return(cpu, num, ret);
13164     return ret;
13165 }
13166