xref: /openbmc/qemu/linux-user/syscall.c (revision a57e0c36)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "strace.h"
131 #include "signal-common.h"
132 #include "loader.h"
133 #include "user-mmap.h"
134 #include "safe-syscall.h"
135 #include "qemu/guest-random.h"
136 #include "qemu/selfmap.h"
137 #include "user/syscall-trace.h"
138 #include "qapi/error.h"
139 #include "fd-trans.h"
140 #include "tcg/tcg.h"
141 
142 #ifndef CLONE_IO
143 #define CLONE_IO                0x80000000      /* Clone io context */
144 #endif
145 
146 /* We can't directly call the host clone syscall, because this will
147  * badly confuse libc (breaking mutexes, for example). So we must
148  * divide clone flags into:
149  *  * flag combinations that look like pthread_create()
150  *  * flag combinations that look like fork()
151  *  * flags we can implement within QEMU itself
152  *  * flags we can't support and will return an error for
153  */
154 /* For thread creation, all these flags must be present; for
155  * fork, none must be present.
156  */
157 #define CLONE_THREAD_FLAGS                              \
158     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
159      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
160 
161 /* These flags are ignored:
162  * CLONE_DETACHED is now ignored by the kernel;
163  * CLONE_IO is just an optimisation hint to the I/O scheduler
164  */
165 #define CLONE_IGNORED_FLAGS                     \
166     (CLONE_DETACHED | CLONE_IO)
167 
168 /* Flags for fork which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_FORK_FLAGS               \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
172 
173 /* Flags for thread creation which we can implement within QEMU itself */
174 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
175     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
176      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
177 
178 #define CLONE_INVALID_FORK_FLAGS                                        \
179     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
180 
181 #define CLONE_INVALID_THREAD_FLAGS                                      \
182     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
183        CLONE_IGNORED_FLAGS))
184 
185 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
186  * have almost all been allocated. We cannot support any of
187  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
188  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
189  * The checks against the invalid thread masks above will catch these.
190  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
191  */
192 
193 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
194  * once. This exercises the codepaths for restart.
195  */
196 //#define DEBUG_ERESTARTSYS
197 
198 //#include <linux/msdos_fs.h>
199 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
200 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
201 
202 #undef _syscall0
203 #undef _syscall1
204 #undef _syscall2
205 #undef _syscall3
206 #undef _syscall4
207 #undef _syscall5
208 #undef _syscall6
209 
210 #define _syscall0(type,name)		\
211 static type name (void)			\
212 {					\
213 	return syscall(__NR_##name);	\
214 }
215 
216 #define _syscall1(type,name,type1,arg1)		\
217 static type name (type1 arg1)			\
218 {						\
219 	return syscall(__NR_##name, arg1);	\
220 }
221 
222 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
223 static type name (type1 arg1,type2 arg2)		\
224 {							\
225 	return syscall(__NR_##name, arg1, arg2);	\
226 }
227 
228 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
229 static type name (type1 arg1,type2 arg2,type3 arg3)		\
230 {								\
231 	return syscall(__NR_##name, arg1, arg2, arg3);		\
232 }
233 
234 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
235 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
236 {										\
237 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
238 }
239 
240 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
241 		  type5,arg5)							\
242 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
243 {										\
244 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
245 }
246 
247 
248 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
249 		  type5,arg5,type6,arg6)					\
250 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
251                   type6 arg6)							\
252 {										\
253 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
254 }
255 
256 
257 #define __NR_sys_uname __NR_uname
258 #define __NR_sys_getcwd1 __NR_getcwd
259 #define __NR_sys_getdents __NR_getdents
260 #define __NR_sys_getdents64 __NR_getdents64
261 #define __NR_sys_getpriority __NR_getpriority
262 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
263 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
264 #define __NR_sys_syslog __NR_syslog
265 #if defined(__NR_futex)
266 # define __NR_sys_futex __NR_futex
267 #endif
268 #if defined(__NR_futex_time64)
269 # define __NR_sys_futex_time64 __NR_futex_time64
270 #endif
271 #define __NR_sys_inotify_init __NR_inotify_init
272 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
273 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
274 #define __NR_sys_statx __NR_statx
275 
276 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
277 #define __NR__llseek __NR_lseek
278 #endif
279 
280 /* Newer kernel ports have llseek() instead of _llseek() */
281 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
282 #define TARGET_NR__llseek TARGET_NR_llseek
283 #endif
284 
285 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
286 #ifndef TARGET_O_NONBLOCK_MASK
287 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
288 #endif
289 
290 #define __NR_sys_gettid __NR_gettid
291 _syscall0(int, sys_gettid)
292 
293 /* For the 64-bit guest on 32-bit host case we must emulate
294  * getdents using getdents64, because otherwise the host
295  * might hand us back more dirent records than we can fit
296  * into the guest buffer after structure format conversion.
297  * Otherwise we emulate getdents with getdents if the host has it.
298  */
299 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
300 #define EMULATE_GETDENTS_WITH_GETDENTS
301 #endif
302 
303 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
304 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
305 #endif
306 #if (defined(TARGET_NR_getdents) && \
307       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
308     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
309 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
310 #endif
311 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
312 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
313           loff_t *, res, uint, wh);
314 #endif
315 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
316 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
317           siginfo_t *, uinfo)
318 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
319 #ifdef __NR_exit_group
320 _syscall1(int,exit_group,int,error_code)
321 #endif
322 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
323 _syscall1(int,set_tid_address,int *,tidptr)
324 #endif
325 #if defined(__NR_futex)
326 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
327           const struct timespec *,timeout,int *,uaddr2,int,val3)
328 #endif
329 #if defined(__NR_futex_time64)
330 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
331           const struct timespec *,timeout,int *,uaddr2,int,val3)
332 #endif
333 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
334 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
335           unsigned long *, user_mask_ptr);
336 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
337 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
338           unsigned long *, user_mask_ptr);
339 #define __NR_sys_getcpu __NR_getcpu
340 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
341 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
342           void *, arg);
343 _syscall2(int, capget, struct __user_cap_header_struct *, header,
344           struct __user_cap_data_struct *, data);
345 _syscall2(int, capset, struct __user_cap_header_struct *, header,
346           struct __user_cap_data_struct *, data);
347 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
348 _syscall2(int, ioprio_get, int, which, int, who)
349 #endif
350 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
351 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
352 #endif
353 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
354 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
355 #endif
356 
357 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
358 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
359           unsigned long, idx1, unsigned long, idx2)
360 #endif
361 
362 /*
363  * It is assumed that struct statx is architecture independent.
364  */
365 #if defined(TARGET_NR_statx) && defined(__NR_statx)
366 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
367           unsigned int, mask, struct target_statx *, statxbuf)
368 #endif
369 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
370 _syscall2(int, membarrier, int, cmd, int, flags)
371 #endif
372 
373 static const bitmask_transtbl fcntl_flags_tbl[] = {
374   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
375   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
376   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
377   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
378   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
379   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
380   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
381   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
382   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
383   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
384   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
385   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
386   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
387 #if defined(O_DIRECT)
388   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
389 #endif
390 #if defined(O_NOATIME)
391   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
392 #endif
393 #if defined(O_CLOEXEC)
394   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
395 #endif
396 #if defined(O_PATH)
397   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
398 #endif
399 #if defined(O_TMPFILE)
400   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
401 #endif
402   /* Don't terminate the list prematurely on 64-bit host+guest.  */
403 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
404   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
405 #endif
406   { 0, 0, 0, 0 }
407 };
408 
409 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
410 
411 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
412 #if defined(__NR_utimensat)
413 #define __NR_sys_utimensat __NR_utimensat
414 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
415           const struct timespec *,tsp,int,flags)
416 #else
417 static int sys_utimensat(int dirfd, const char *pathname,
418                          const struct timespec times[2], int flags)
419 {
420     errno = ENOSYS;
421     return -1;
422 }
423 #endif
424 #endif /* TARGET_NR_utimensat */
425 
426 #ifdef TARGET_NR_renameat2
427 #if defined(__NR_renameat2)
428 #define __NR_sys_renameat2 __NR_renameat2
429 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
430           const char *, new, unsigned int, flags)
431 #else
432 static int sys_renameat2(int oldfd, const char *old,
433                          int newfd, const char *new, int flags)
434 {
435     if (flags == 0) {
436         return renameat(oldfd, old, newfd, new);
437     }
438     errno = ENOSYS;
439     return -1;
440 }
441 #endif
442 #endif /* TARGET_NR_renameat2 */
443 
444 #ifdef CONFIG_INOTIFY
445 #include <sys/inotify.h>
446 
447 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
448 static int sys_inotify_init(void)
449 {
450   return (inotify_init());
451 }
452 #endif
453 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
454 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
455 {
456   return (inotify_add_watch(fd, pathname, mask));
457 }
458 #endif
459 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
460 static int sys_inotify_rm_watch(int fd, int32_t wd)
461 {
462   return (inotify_rm_watch(fd, wd));
463 }
464 #endif
465 #ifdef CONFIG_INOTIFY1
466 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
467 static int sys_inotify_init1(int flags)
468 {
469   return (inotify_init1(flags));
470 }
471 #endif
472 #endif
473 #else
474 /* Userspace can usually survive runtime without inotify */
475 #undef TARGET_NR_inotify_init
476 #undef TARGET_NR_inotify_init1
477 #undef TARGET_NR_inotify_add_watch
478 #undef TARGET_NR_inotify_rm_watch
479 #endif /* CONFIG_INOTIFY  */
480 
481 #if defined(TARGET_NR_prlimit64)
482 #ifndef __NR_prlimit64
483 # define __NR_prlimit64 -1
484 #endif
485 #define __NR_sys_prlimit64 __NR_prlimit64
486 /* The glibc rlimit structure may not be that used by the underlying syscall */
487 struct host_rlimit64 {
488     uint64_t rlim_cur;
489     uint64_t rlim_max;
490 };
491 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
492           const struct host_rlimit64 *, new_limit,
493           struct host_rlimit64 *, old_limit)
494 #endif
495 
496 
497 #if defined(TARGET_NR_timer_create)
498 /* Maximum of 32 active POSIX timers allowed at any one time. */
499 static timer_t g_posix_timers[32] = { 0, } ;
500 
501 static inline int next_free_host_timer(void)
502 {
503     int k ;
504     /* FIXME: Does finding the next free slot require a lock? */
505     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
506         if (g_posix_timers[k] == 0) {
507             g_posix_timers[k] = (timer_t) 1;
508             return k;
509         }
510     }
511     return -1;
512 }
513 #endif
514 
515 static inline int host_to_target_errno(int host_errno)
516 {
517     switch (host_errno) {
518 #define E(X)  case X: return TARGET_##X;
519 #include "errnos.c.inc"
520 #undef E
521     default:
522         return host_errno;
523     }
524 }
525 
526 static inline int target_to_host_errno(int target_errno)
527 {
528     switch (target_errno) {
529 #define E(X)  case TARGET_##X: return X;
530 #include "errnos.c.inc"
531 #undef E
532     default:
533         return target_errno;
534     }
535 }
536 
537 static inline abi_long get_errno(abi_long ret)
538 {
539     if (ret == -1)
540         return -host_to_target_errno(errno);
541     else
542         return ret;
543 }
544 
545 const char *target_strerror(int err)
546 {
547     if (err == TARGET_ERESTARTSYS) {
548         return "To be restarted";
549     }
550     if (err == TARGET_QEMU_ESIGRETURN) {
551         return "Successful exit from sigreturn";
552     }
553 
554     return strerror(target_to_host_errno(err));
555 }
556 
557 #define safe_syscall0(type, name) \
558 static type safe_##name(void) \
559 { \
560     return safe_syscall(__NR_##name); \
561 }
562 
563 #define safe_syscall1(type, name, type1, arg1) \
564 static type safe_##name(type1 arg1) \
565 { \
566     return safe_syscall(__NR_##name, arg1); \
567 }
568 
569 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
570 static type safe_##name(type1 arg1, type2 arg2) \
571 { \
572     return safe_syscall(__NR_##name, arg1, arg2); \
573 }
574 
575 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
576 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
577 { \
578     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
579 }
580 
581 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
582     type4, arg4) \
583 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
584 { \
585     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
586 }
587 
588 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
589     type4, arg4, type5, arg5) \
590 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
591     type5 arg5) \
592 { \
593     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
594 }
595 
596 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
597     type4, arg4, type5, arg5, type6, arg6) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
599     type5 arg5, type6 arg6) \
600 { \
601     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
602 }
603 
604 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
605 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
606 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
607               int, flags, mode_t, mode)
608 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
609 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
610               struct rusage *, rusage)
611 #endif
612 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
613               int, options, struct rusage *, rusage)
614 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
615 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
616     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
617 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
618               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
619 #endif
620 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
621 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
622               struct timespec *, tsp, const sigset_t *, sigmask,
623               size_t, sigsetsize)
624 #endif
625 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
626               int, maxevents, int, timeout, const sigset_t *, sigmask,
627               size_t, sigsetsize)
628 #if defined(__NR_futex)
629 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
630               const struct timespec *,timeout,int *,uaddr2,int,val3)
631 #endif
632 #if defined(__NR_futex_time64)
633 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
634               const struct timespec *,timeout,int *,uaddr2,int,val3)
635 #endif
636 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
637 safe_syscall2(int, kill, pid_t, pid, int, sig)
638 safe_syscall2(int, tkill, int, tid, int, sig)
639 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
640 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
641 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
642 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
643               unsigned long, pos_l, unsigned long, pos_h)
644 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
645               unsigned long, pos_l, unsigned long, pos_h)
646 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
647               socklen_t, addrlen)
648 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
649               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
650 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
651               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
652 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
653 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
654 safe_syscall2(int, flock, int, fd, int, operation)
655 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
656 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
657               const struct timespec *, uts, size_t, sigsetsize)
658 #endif
659 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
660               int, flags)
661 #if defined(TARGET_NR_nanosleep)
662 safe_syscall2(int, nanosleep, const struct timespec *, req,
663               struct timespec *, rem)
664 #endif
665 #if defined(TARGET_NR_clock_nanosleep) || \
666     defined(TARGET_NR_clock_nanosleep_time64)
667 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
668               const struct timespec *, req, struct timespec *, rem)
669 #endif
670 #ifdef __NR_ipc
671 #ifdef __s390x__
672 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
673               void *, ptr)
674 #else
675 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
676               void *, ptr, long, fifth)
677 #endif
678 #endif
679 #ifdef __NR_msgsnd
680 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
681               int, flags)
682 #endif
683 #ifdef __NR_msgrcv
684 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
685               long, msgtype, int, flags)
686 #endif
687 #ifdef __NR_semtimedop
688 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
689               unsigned, nsops, const struct timespec *, timeout)
690 #endif
691 #if defined(TARGET_NR_mq_timedsend) || \
692     defined(TARGET_NR_mq_timedsend_time64)
693 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
694               size_t, len, unsigned, prio, const struct timespec *, timeout)
695 #endif
696 #if defined(TARGET_NR_mq_timedreceive) || \
697     defined(TARGET_NR_mq_timedreceive_time64)
698 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
699               size_t, len, unsigned *, prio, const struct timespec *, timeout)
700 #endif
701 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
702 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
703               int, outfd, loff_t *, poutoff, size_t, length,
704               unsigned int, flags)
705 #endif
706 
707 /* We do ioctl like this rather than via safe_syscall3 to preserve the
708  * "third argument might be integer or pointer or not present" behaviour of
709  * the libc function.
710  */
711 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
712 /* Similarly for fcntl. Note that callers must always:
713  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
714  *  use the flock64 struct rather than unsuffixed flock
715  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
716  */
717 #ifdef __NR_fcntl64
718 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
719 #else
720 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
721 #endif
722 
723 static inline int host_to_target_sock_type(int host_type)
724 {
725     int target_type;
726 
727     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
728     case SOCK_DGRAM:
729         target_type = TARGET_SOCK_DGRAM;
730         break;
731     case SOCK_STREAM:
732         target_type = TARGET_SOCK_STREAM;
733         break;
734     default:
735         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
736         break;
737     }
738 
739 #if defined(SOCK_CLOEXEC)
740     if (host_type & SOCK_CLOEXEC) {
741         target_type |= TARGET_SOCK_CLOEXEC;
742     }
743 #endif
744 
745 #if defined(SOCK_NONBLOCK)
746     if (host_type & SOCK_NONBLOCK) {
747         target_type |= TARGET_SOCK_NONBLOCK;
748     }
749 #endif
750 
751     return target_type;
752 }
753 
754 static abi_ulong target_brk;
755 static abi_ulong target_original_brk;
756 static abi_ulong brk_page;
757 
758 void target_set_brk(abi_ulong new_brk)
759 {
760     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
761     brk_page = HOST_PAGE_ALIGN(target_brk);
762 }
763 
764 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
765 #define DEBUGF_BRK(message, args...)
766 
767 /* do_brk() must return target values and target errnos. */
768 abi_long do_brk(abi_ulong new_brk)
769 {
770     abi_long mapped_addr;
771     abi_ulong new_alloc_size;
772 
773     /* brk pointers are always untagged */
774 
775     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
776 
777     if (!new_brk) {
778         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
779         return target_brk;
780     }
781     if (new_brk < target_original_brk) {
782         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
783                    target_brk);
784         return target_brk;
785     }
786 
787     /* If the new brk is less than the highest page reserved to the
788      * target heap allocation, set it and we're almost done...  */
789     if (new_brk <= brk_page) {
790         /* Heap contents are initialized to zero, as for anonymous
791          * mapped pages.  */
792         if (new_brk > target_brk) {
793             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
794         }
795 	target_brk = new_brk;
796         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
797 	return target_brk;
798     }
799 
800     /* We need to allocate more memory after the brk... Note that
801      * we don't use MAP_FIXED because that will map over the top of
802      * any existing mapping (like the one with the host libc or qemu
803      * itself); instead we treat "mapped but at wrong address" as
804      * a failure and unmap again.
805      */
806     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
807     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
808                                         PROT_READ|PROT_WRITE,
809                                         MAP_ANON|MAP_PRIVATE, 0, 0));
810 
811     if (mapped_addr == brk_page) {
812         /* Heap contents are initialized to zero, as for anonymous
813          * mapped pages.  Technically the new pages are already
814          * initialized to zero since they *are* anonymous mapped
815          * pages, however we have to take care with the contents that
816          * come from the remaining part of the previous page: it may
817          * contains garbage data due to a previous heap usage (grown
818          * then shrunken).  */
819         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
820 
821         target_brk = new_brk;
822         brk_page = HOST_PAGE_ALIGN(target_brk);
823         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
824             target_brk);
825         return target_brk;
826     } else if (mapped_addr != -1) {
827         /* Mapped but at wrong address, meaning there wasn't actually
828          * enough space for this brk.
829          */
830         target_munmap(mapped_addr, new_alloc_size);
831         mapped_addr = -1;
832         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
833     }
834     else {
835         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
836     }
837 
838 #if defined(TARGET_ALPHA)
839     /* We (partially) emulate OSF/1 on Alpha, which requires we
840        return a proper errno, not an unchanged brk value.  */
841     return -TARGET_ENOMEM;
842 #endif
843     /* For everything else, return the previous break. */
844     return target_brk;
845 }
846 
847 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
848     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
849 static inline abi_long copy_from_user_fdset(fd_set *fds,
850                                             abi_ulong target_fds_addr,
851                                             int n)
852 {
853     int i, nw, j, k;
854     abi_ulong b, *target_fds;
855 
856     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
857     if (!(target_fds = lock_user(VERIFY_READ,
858                                  target_fds_addr,
859                                  sizeof(abi_ulong) * nw,
860                                  1)))
861         return -TARGET_EFAULT;
862 
863     FD_ZERO(fds);
864     k = 0;
865     for (i = 0; i < nw; i++) {
866         /* grab the abi_ulong */
867         __get_user(b, &target_fds[i]);
868         for (j = 0; j < TARGET_ABI_BITS; j++) {
869             /* check the bit inside the abi_ulong */
870             if ((b >> j) & 1)
871                 FD_SET(k, fds);
872             k++;
873         }
874     }
875 
876     unlock_user(target_fds, target_fds_addr, 0);
877 
878     return 0;
879 }
880 
881 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
882                                                  abi_ulong target_fds_addr,
883                                                  int n)
884 {
885     if (target_fds_addr) {
886         if (copy_from_user_fdset(fds, target_fds_addr, n))
887             return -TARGET_EFAULT;
888         *fds_ptr = fds;
889     } else {
890         *fds_ptr = NULL;
891     }
892     return 0;
893 }
894 
895 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
896                                           const fd_set *fds,
897                                           int n)
898 {
899     int i, nw, j, k;
900     abi_long v;
901     abi_ulong *target_fds;
902 
903     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
904     if (!(target_fds = lock_user(VERIFY_WRITE,
905                                  target_fds_addr,
906                                  sizeof(abi_ulong) * nw,
907                                  0)))
908         return -TARGET_EFAULT;
909 
910     k = 0;
911     for (i = 0; i < nw; i++) {
912         v = 0;
913         for (j = 0; j < TARGET_ABI_BITS; j++) {
914             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
915             k++;
916         }
917         __put_user(v, &target_fds[i]);
918     }
919 
920     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
921 
922     return 0;
923 }
924 #endif
925 
926 #if defined(__alpha__)
927 #define HOST_HZ 1024
928 #else
929 #define HOST_HZ 100
930 #endif
931 
932 static inline abi_long host_to_target_clock_t(long ticks)
933 {
934 #if HOST_HZ == TARGET_HZ
935     return ticks;
936 #else
937     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
938 #endif
939 }
940 
941 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
942                                              const struct rusage *rusage)
943 {
944     struct target_rusage *target_rusage;
945 
946     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
947         return -TARGET_EFAULT;
948     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
949     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
950     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
951     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
952     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
953     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
954     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
955     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
956     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
957     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
958     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
959     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
960     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
961     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
962     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
963     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
964     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
965     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
966     unlock_user_struct(target_rusage, target_addr, 1);
967 
968     return 0;
969 }
970 
971 #ifdef TARGET_NR_setrlimit
972 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
973 {
974     abi_ulong target_rlim_swap;
975     rlim_t result;
976 
977     target_rlim_swap = tswapal(target_rlim);
978     if (target_rlim_swap == TARGET_RLIM_INFINITY)
979         return RLIM_INFINITY;
980 
981     result = target_rlim_swap;
982     if (target_rlim_swap != (rlim_t)result)
983         return RLIM_INFINITY;
984 
985     return result;
986 }
987 #endif
988 
989 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
990 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
991 {
992     abi_ulong target_rlim_swap;
993     abi_ulong result;
994 
995     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
996         target_rlim_swap = TARGET_RLIM_INFINITY;
997     else
998         target_rlim_swap = rlim;
999     result = tswapal(target_rlim_swap);
1000 
1001     return result;
1002 }
1003 #endif
1004 
1005 static inline int target_to_host_resource(int code)
1006 {
1007     switch (code) {
1008     case TARGET_RLIMIT_AS:
1009         return RLIMIT_AS;
1010     case TARGET_RLIMIT_CORE:
1011         return RLIMIT_CORE;
1012     case TARGET_RLIMIT_CPU:
1013         return RLIMIT_CPU;
1014     case TARGET_RLIMIT_DATA:
1015         return RLIMIT_DATA;
1016     case TARGET_RLIMIT_FSIZE:
1017         return RLIMIT_FSIZE;
1018     case TARGET_RLIMIT_LOCKS:
1019         return RLIMIT_LOCKS;
1020     case TARGET_RLIMIT_MEMLOCK:
1021         return RLIMIT_MEMLOCK;
1022     case TARGET_RLIMIT_MSGQUEUE:
1023         return RLIMIT_MSGQUEUE;
1024     case TARGET_RLIMIT_NICE:
1025         return RLIMIT_NICE;
1026     case TARGET_RLIMIT_NOFILE:
1027         return RLIMIT_NOFILE;
1028     case TARGET_RLIMIT_NPROC:
1029         return RLIMIT_NPROC;
1030     case TARGET_RLIMIT_RSS:
1031         return RLIMIT_RSS;
1032     case TARGET_RLIMIT_RTPRIO:
1033         return RLIMIT_RTPRIO;
1034     case TARGET_RLIMIT_SIGPENDING:
1035         return RLIMIT_SIGPENDING;
1036     case TARGET_RLIMIT_STACK:
1037         return RLIMIT_STACK;
1038     default:
1039         return code;
1040     }
1041 }
1042 
1043 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1044                                               abi_ulong target_tv_addr)
1045 {
1046     struct target_timeval *target_tv;
1047 
1048     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1049         return -TARGET_EFAULT;
1050     }
1051 
1052     __get_user(tv->tv_sec, &target_tv->tv_sec);
1053     __get_user(tv->tv_usec, &target_tv->tv_usec);
1054 
1055     unlock_user_struct(target_tv, target_tv_addr, 0);
1056 
1057     return 0;
1058 }
1059 
1060 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1061                                             const struct timeval *tv)
1062 {
1063     struct target_timeval *target_tv;
1064 
1065     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1066         return -TARGET_EFAULT;
1067     }
1068 
1069     __put_user(tv->tv_sec, &target_tv->tv_sec);
1070     __put_user(tv->tv_usec, &target_tv->tv_usec);
1071 
1072     unlock_user_struct(target_tv, target_tv_addr, 1);
1073 
1074     return 0;
1075 }
1076 
1077 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1078 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1079                                                 abi_ulong target_tv_addr)
1080 {
1081     struct target__kernel_sock_timeval *target_tv;
1082 
1083     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1084         return -TARGET_EFAULT;
1085     }
1086 
1087     __get_user(tv->tv_sec, &target_tv->tv_sec);
1088     __get_user(tv->tv_usec, &target_tv->tv_usec);
1089 
1090     unlock_user_struct(target_tv, target_tv_addr, 0);
1091 
1092     return 0;
1093 }
1094 #endif
1095 
1096 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1097                                               const struct timeval *tv)
1098 {
1099     struct target__kernel_sock_timeval *target_tv;
1100 
1101     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1102         return -TARGET_EFAULT;
1103     }
1104 
1105     __put_user(tv->tv_sec, &target_tv->tv_sec);
1106     __put_user(tv->tv_usec, &target_tv->tv_usec);
1107 
1108     unlock_user_struct(target_tv, target_tv_addr, 1);
1109 
1110     return 0;
1111 }
1112 
1113 #if defined(TARGET_NR_futex) || \
1114     defined(TARGET_NR_rt_sigtimedwait) || \
1115     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1116     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1117     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1118     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1119     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1120     defined(TARGET_NR_timer_settime) || \
1121     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1122 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1123                                                abi_ulong target_addr)
1124 {
1125     struct target_timespec *target_ts;
1126 
1127     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1128         return -TARGET_EFAULT;
1129     }
1130     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1131     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1132     unlock_user_struct(target_ts, target_addr, 0);
1133     return 0;
1134 }
1135 #endif
1136 
1137 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1138     defined(TARGET_NR_timer_settime64) || \
1139     defined(TARGET_NR_mq_timedsend_time64) || \
1140     defined(TARGET_NR_mq_timedreceive_time64) || \
1141     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1142     defined(TARGET_NR_clock_nanosleep_time64) || \
1143     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1144     defined(TARGET_NR_utimensat) || \
1145     defined(TARGET_NR_utimensat_time64) || \
1146     defined(TARGET_NR_semtimedop_time64) || \
1147     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1148 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1149                                                  abi_ulong target_addr)
1150 {
1151     struct target__kernel_timespec *target_ts;
1152 
1153     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1154         return -TARGET_EFAULT;
1155     }
1156     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1157     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1158     /* in 32bit mode, this drops the padding */
1159     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1160     unlock_user_struct(target_ts, target_addr, 0);
1161     return 0;
1162 }
1163 #endif
1164 
1165 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1166                                                struct timespec *host_ts)
1167 {
1168     struct target_timespec *target_ts;
1169 
1170     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1171         return -TARGET_EFAULT;
1172     }
1173     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1174     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1175     unlock_user_struct(target_ts, target_addr, 1);
1176     return 0;
1177 }
1178 
1179 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1180                                                  struct timespec *host_ts)
1181 {
1182     struct target__kernel_timespec *target_ts;
1183 
1184     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1185         return -TARGET_EFAULT;
1186     }
1187     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1188     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1189     unlock_user_struct(target_ts, target_addr, 1);
1190     return 0;
1191 }
1192 
1193 #if defined(TARGET_NR_gettimeofday)
1194 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1195                                              struct timezone *tz)
1196 {
1197     struct target_timezone *target_tz;
1198 
1199     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1200         return -TARGET_EFAULT;
1201     }
1202 
1203     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1204     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1205 
1206     unlock_user_struct(target_tz, target_tz_addr, 1);
1207 
1208     return 0;
1209 }
1210 #endif
1211 
1212 #if defined(TARGET_NR_settimeofday)
1213 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1214                                                abi_ulong target_tz_addr)
1215 {
1216     struct target_timezone *target_tz;
1217 
1218     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1219         return -TARGET_EFAULT;
1220     }
1221 
1222     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1223     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1224 
1225     unlock_user_struct(target_tz, target_tz_addr, 0);
1226 
1227     return 0;
1228 }
1229 #endif
1230 
1231 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1232 #include <mqueue.h>
1233 
1234 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1235                                               abi_ulong target_mq_attr_addr)
1236 {
1237     struct target_mq_attr *target_mq_attr;
1238 
1239     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1240                           target_mq_attr_addr, 1))
1241         return -TARGET_EFAULT;
1242 
1243     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1244     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1245     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1246     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1247 
1248     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1249 
1250     return 0;
1251 }
1252 
1253 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1254                                             const struct mq_attr *attr)
1255 {
1256     struct target_mq_attr *target_mq_attr;
1257 
1258     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1259                           target_mq_attr_addr, 0))
1260         return -TARGET_EFAULT;
1261 
1262     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1263     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1264     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1265     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1266 
1267     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1268 
1269     return 0;
1270 }
1271 #endif
1272 
1273 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1274 /* do_select() must return target values and target errnos. */
1275 static abi_long do_select(int n,
1276                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1277                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1278 {
1279     fd_set rfds, wfds, efds;
1280     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1281     struct timeval tv;
1282     struct timespec ts, *ts_ptr;
1283     abi_long ret;
1284 
1285     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1286     if (ret) {
1287         return ret;
1288     }
1289     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1290     if (ret) {
1291         return ret;
1292     }
1293     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1294     if (ret) {
1295         return ret;
1296     }
1297 
1298     if (target_tv_addr) {
1299         if (copy_from_user_timeval(&tv, target_tv_addr))
1300             return -TARGET_EFAULT;
1301         ts.tv_sec = tv.tv_sec;
1302         ts.tv_nsec = tv.tv_usec * 1000;
1303         ts_ptr = &ts;
1304     } else {
1305         ts_ptr = NULL;
1306     }
1307 
1308     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1309                                   ts_ptr, NULL));
1310 
1311     if (!is_error(ret)) {
1312         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1313             return -TARGET_EFAULT;
1314         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1315             return -TARGET_EFAULT;
1316         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1317             return -TARGET_EFAULT;
1318 
1319         if (target_tv_addr) {
1320             tv.tv_sec = ts.tv_sec;
1321             tv.tv_usec = ts.tv_nsec / 1000;
1322             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1323                 return -TARGET_EFAULT;
1324             }
1325         }
1326     }
1327 
1328     return ret;
1329 }
1330 
1331 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1332 static abi_long do_old_select(abi_ulong arg1)
1333 {
1334     struct target_sel_arg_struct *sel;
1335     abi_ulong inp, outp, exp, tvp;
1336     long nsel;
1337 
1338     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1339         return -TARGET_EFAULT;
1340     }
1341 
1342     nsel = tswapal(sel->n);
1343     inp = tswapal(sel->inp);
1344     outp = tswapal(sel->outp);
1345     exp = tswapal(sel->exp);
1346     tvp = tswapal(sel->tvp);
1347 
1348     unlock_user_struct(sel, arg1, 0);
1349 
1350     return do_select(nsel, inp, outp, exp, tvp);
1351 }
1352 #endif
1353 #endif
1354 
1355 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1356 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1357                             abi_long arg4, abi_long arg5, abi_long arg6,
1358                             bool time64)
1359 {
1360     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1361     fd_set rfds, wfds, efds;
1362     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1363     struct timespec ts, *ts_ptr;
1364     abi_long ret;
1365 
1366     /*
1367      * The 6th arg is actually two args smashed together,
1368      * so we cannot use the C library.
1369      */
1370     sigset_t set;
1371     struct {
1372         sigset_t *set;
1373         size_t size;
1374     } sig, *sig_ptr;
1375 
1376     abi_ulong arg_sigset, arg_sigsize, *arg7;
1377     target_sigset_t *target_sigset;
1378 
1379     n = arg1;
1380     rfd_addr = arg2;
1381     wfd_addr = arg3;
1382     efd_addr = arg4;
1383     ts_addr = arg5;
1384 
1385     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1386     if (ret) {
1387         return ret;
1388     }
1389     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1390     if (ret) {
1391         return ret;
1392     }
1393     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1394     if (ret) {
1395         return ret;
1396     }
1397 
1398     /*
1399      * This takes a timespec, and not a timeval, so we cannot
1400      * use the do_select() helper ...
1401      */
1402     if (ts_addr) {
1403         if (time64) {
1404             if (target_to_host_timespec64(&ts, ts_addr)) {
1405                 return -TARGET_EFAULT;
1406             }
1407         } else {
1408             if (target_to_host_timespec(&ts, ts_addr)) {
1409                 return -TARGET_EFAULT;
1410             }
1411         }
1412             ts_ptr = &ts;
1413     } else {
1414         ts_ptr = NULL;
1415     }
1416 
1417     /* Extract the two packed args for the sigset */
1418     if (arg6) {
1419         sig_ptr = &sig;
1420         sig.size = SIGSET_T_SIZE;
1421 
1422         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1423         if (!arg7) {
1424             return -TARGET_EFAULT;
1425         }
1426         arg_sigset = tswapal(arg7[0]);
1427         arg_sigsize = tswapal(arg7[1]);
1428         unlock_user(arg7, arg6, 0);
1429 
1430         if (arg_sigset) {
1431             sig.set = &set;
1432             if (arg_sigsize != sizeof(*target_sigset)) {
1433                 /* Like the kernel, we enforce correct size sigsets */
1434                 return -TARGET_EINVAL;
1435             }
1436             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1437                                       sizeof(*target_sigset), 1);
1438             if (!target_sigset) {
1439                 return -TARGET_EFAULT;
1440             }
1441             target_to_host_sigset(&set, target_sigset);
1442             unlock_user(target_sigset, arg_sigset, 0);
1443         } else {
1444             sig.set = NULL;
1445         }
1446     } else {
1447         sig_ptr = NULL;
1448     }
1449 
1450     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1451                                   ts_ptr, sig_ptr));
1452 
1453     if (!is_error(ret)) {
1454         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1455             return -TARGET_EFAULT;
1456         }
1457         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1458             return -TARGET_EFAULT;
1459         }
1460         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1461             return -TARGET_EFAULT;
1462         }
1463         if (time64) {
1464             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1465                 return -TARGET_EFAULT;
1466             }
1467         } else {
1468             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1469                 return -TARGET_EFAULT;
1470             }
1471         }
1472     }
1473     return ret;
1474 }
1475 #endif
1476 
1477 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1478     defined(TARGET_NR_ppoll_time64)
1479 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1480                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1481 {
1482     struct target_pollfd *target_pfd;
1483     unsigned int nfds = arg2;
1484     struct pollfd *pfd;
1485     unsigned int i;
1486     abi_long ret;
1487 
1488     pfd = NULL;
1489     target_pfd = NULL;
1490     if (nfds) {
1491         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1492             return -TARGET_EINVAL;
1493         }
1494         target_pfd = lock_user(VERIFY_WRITE, arg1,
1495                                sizeof(struct target_pollfd) * nfds, 1);
1496         if (!target_pfd) {
1497             return -TARGET_EFAULT;
1498         }
1499 
1500         pfd = alloca(sizeof(struct pollfd) * nfds);
1501         for (i = 0; i < nfds; i++) {
1502             pfd[i].fd = tswap32(target_pfd[i].fd);
1503             pfd[i].events = tswap16(target_pfd[i].events);
1504         }
1505     }
1506     if (ppoll) {
1507         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1508         target_sigset_t *target_set;
1509         sigset_t _set, *set = &_set;
1510 
1511         if (arg3) {
1512             if (time64) {
1513                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1514                     unlock_user(target_pfd, arg1, 0);
1515                     return -TARGET_EFAULT;
1516                 }
1517             } else {
1518                 if (target_to_host_timespec(timeout_ts, arg3)) {
1519                     unlock_user(target_pfd, arg1, 0);
1520                     return -TARGET_EFAULT;
1521                 }
1522             }
1523         } else {
1524             timeout_ts = NULL;
1525         }
1526 
1527         if (arg4) {
1528             if (arg5 != sizeof(target_sigset_t)) {
1529                 unlock_user(target_pfd, arg1, 0);
1530                 return -TARGET_EINVAL;
1531             }
1532 
1533             target_set = lock_user(VERIFY_READ, arg4,
1534                                    sizeof(target_sigset_t), 1);
1535             if (!target_set) {
1536                 unlock_user(target_pfd, arg1, 0);
1537                 return -TARGET_EFAULT;
1538             }
1539             target_to_host_sigset(set, target_set);
1540         } else {
1541             set = NULL;
1542         }
1543 
1544         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1545                                    set, SIGSET_T_SIZE));
1546 
1547         if (!is_error(ret) && arg3) {
1548             if (time64) {
1549                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1550                     return -TARGET_EFAULT;
1551                 }
1552             } else {
1553                 if (host_to_target_timespec(arg3, timeout_ts)) {
1554                     return -TARGET_EFAULT;
1555                 }
1556             }
1557         }
1558         if (arg4) {
1559             unlock_user(target_set, arg4, 0);
1560         }
1561     } else {
1562           struct timespec ts, *pts;
1563 
1564           if (arg3 >= 0) {
1565               /* Convert ms to secs, ns */
1566               ts.tv_sec = arg3 / 1000;
1567               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1568               pts = &ts;
1569           } else {
1570               /* -ve poll() timeout means "infinite" */
1571               pts = NULL;
1572           }
1573           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1574     }
1575 
1576     if (!is_error(ret)) {
1577         for (i = 0; i < nfds; i++) {
1578             target_pfd[i].revents = tswap16(pfd[i].revents);
1579         }
1580     }
1581     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1582     return ret;
1583 }
1584 #endif
1585 
1586 static abi_long do_pipe2(int host_pipe[], int flags)
1587 {
1588 #ifdef CONFIG_PIPE2
1589     return pipe2(host_pipe, flags);
1590 #else
1591     return -ENOSYS;
1592 #endif
1593 }
1594 
1595 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1596                         int flags, int is_pipe2)
1597 {
1598     int host_pipe[2];
1599     abi_long ret;
1600     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1601 
1602     if (is_error(ret))
1603         return get_errno(ret);
1604 
1605     /* Several targets have special calling conventions for the original
1606        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1607     if (!is_pipe2) {
1608 #if defined(TARGET_ALPHA)
1609         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1610         return host_pipe[0];
1611 #elif defined(TARGET_MIPS)
1612         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1613         return host_pipe[0];
1614 #elif defined(TARGET_SH4)
1615         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1616         return host_pipe[0];
1617 #elif defined(TARGET_SPARC)
1618         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1619         return host_pipe[0];
1620 #endif
1621     }
1622 
1623     if (put_user_s32(host_pipe[0], pipedes)
1624         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1625         return -TARGET_EFAULT;
1626     return get_errno(ret);
1627 }
1628 
1629 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1630                                               abi_ulong target_addr,
1631                                               socklen_t len)
1632 {
1633     struct target_ip_mreqn *target_smreqn;
1634 
1635     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1636     if (!target_smreqn)
1637         return -TARGET_EFAULT;
1638     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1639     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1640     if (len == sizeof(struct target_ip_mreqn))
1641         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1642     unlock_user(target_smreqn, target_addr, 0);
1643 
1644     return 0;
1645 }
1646 
1647 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1648                                                abi_ulong target_addr,
1649                                                socklen_t len)
1650 {
1651     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1652     sa_family_t sa_family;
1653     struct target_sockaddr *target_saddr;
1654 
1655     if (fd_trans_target_to_host_addr(fd)) {
1656         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1657     }
1658 
1659     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1660     if (!target_saddr)
1661         return -TARGET_EFAULT;
1662 
1663     sa_family = tswap16(target_saddr->sa_family);
1664 
1665     /* Oops. The caller might send a incomplete sun_path; sun_path
1666      * must be terminated by \0 (see the manual page), but
1667      * unfortunately it is quite common to specify sockaddr_un
1668      * length as "strlen(x->sun_path)" while it should be
1669      * "strlen(...) + 1". We'll fix that here if needed.
1670      * Linux kernel has a similar feature.
1671      */
1672 
1673     if (sa_family == AF_UNIX) {
1674         if (len < unix_maxlen && len > 0) {
1675             char *cp = (char*)target_saddr;
1676 
1677             if ( cp[len-1] && !cp[len] )
1678                 len++;
1679         }
1680         if (len > unix_maxlen)
1681             len = unix_maxlen;
1682     }
1683 
1684     memcpy(addr, target_saddr, len);
1685     addr->sa_family = sa_family;
1686     if (sa_family == AF_NETLINK) {
1687         struct sockaddr_nl *nladdr;
1688 
1689         nladdr = (struct sockaddr_nl *)addr;
1690         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1691         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1692     } else if (sa_family == AF_PACKET) {
1693 	struct target_sockaddr_ll *lladdr;
1694 
1695 	lladdr = (struct target_sockaddr_ll *)addr;
1696 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1697 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1698     }
1699     unlock_user(target_saddr, target_addr, 0);
1700 
1701     return 0;
1702 }
1703 
1704 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1705                                                struct sockaddr *addr,
1706                                                socklen_t len)
1707 {
1708     struct target_sockaddr *target_saddr;
1709 
1710     if (len == 0) {
1711         return 0;
1712     }
1713     assert(addr);
1714 
1715     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1716     if (!target_saddr)
1717         return -TARGET_EFAULT;
1718     memcpy(target_saddr, addr, len);
1719     if (len >= offsetof(struct target_sockaddr, sa_family) +
1720         sizeof(target_saddr->sa_family)) {
1721         target_saddr->sa_family = tswap16(addr->sa_family);
1722     }
1723     if (addr->sa_family == AF_NETLINK &&
1724         len >= sizeof(struct target_sockaddr_nl)) {
1725         struct target_sockaddr_nl *target_nl =
1726                (struct target_sockaddr_nl *)target_saddr;
1727         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1728         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1729     } else if (addr->sa_family == AF_PACKET) {
1730         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1731         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1732         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1733     } else if (addr->sa_family == AF_INET6 &&
1734                len >= sizeof(struct target_sockaddr_in6)) {
1735         struct target_sockaddr_in6 *target_in6 =
1736                (struct target_sockaddr_in6 *)target_saddr;
1737         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1738     }
1739     unlock_user(target_saddr, target_addr, len);
1740 
1741     return 0;
1742 }
1743 
1744 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1745                                            struct target_msghdr *target_msgh)
1746 {
1747     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1748     abi_long msg_controllen;
1749     abi_ulong target_cmsg_addr;
1750     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1751     socklen_t space = 0;
1752 
1753     msg_controllen = tswapal(target_msgh->msg_controllen);
1754     if (msg_controllen < sizeof (struct target_cmsghdr))
1755         goto the_end;
1756     target_cmsg_addr = tswapal(target_msgh->msg_control);
1757     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1758     target_cmsg_start = target_cmsg;
1759     if (!target_cmsg)
1760         return -TARGET_EFAULT;
1761 
1762     while (cmsg && target_cmsg) {
1763         void *data = CMSG_DATA(cmsg);
1764         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1765 
1766         int len = tswapal(target_cmsg->cmsg_len)
1767             - sizeof(struct target_cmsghdr);
1768 
1769         space += CMSG_SPACE(len);
1770         if (space > msgh->msg_controllen) {
1771             space -= CMSG_SPACE(len);
1772             /* This is a QEMU bug, since we allocated the payload
1773              * area ourselves (unlike overflow in host-to-target
1774              * conversion, which is just the guest giving us a buffer
1775              * that's too small). It can't happen for the payload types
1776              * we currently support; if it becomes an issue in future
1777              * we would need to improve our allocation strategy to
1778              * something more intelligent than "twice the size of the
1779              * target buffer we're reading from".
1780              */
1781             qemu_log_mask(LOG_UNIMP,
1782                           ("Unsupported ancillary data %d/%d: "
1783                            "unhandled msg size\n"),
1784                           tswap32(target_cmsg->cmsg_level),
1785                           tswap32(target_cmsg->cmsg_type));
1786             break;
1787         }
1788 
1789         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1790             cmsg->cmsg_level = SOL_SOCKET;
1791         } else {
1792             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1793         }
1794         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1795         cmsg->cmsg_len = CMSG_LEN(len);
1796 
1797         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1798             int *fd = (int *)data;
1799             int *target_fd = (int *)target_data;
1800             int i, numfds = len / sizeof(int);
1801 
1802             for (i = 0; i < numfds; i++) {
1803                 __get_user(fd[i], target_fd + i);
1804             }
1805         } else if (cmsg->cmsg_level == SOL_SOCKET
1806                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1807             struct ucred *cred = (struct ucred *)data;
1808             struct target_ucred *target_cred =
1809                 (struct target_ucred *)target_data;
1810 
1811             __get_user(cred->pid, &target_cred->pid);
1812             __get_user(cred->uid, &target_cred->uid);
1813             __get_user(cred->gid, &target_cred->gid);
1814         } else {
1815             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1816                           cmsg->cmsg_level, cmsg->cmsg_type);
1817             memcpy(data, target_data, len);
1818         }
1819 
1820         cmsg = CMSG_NXTHDR(msgh, cmsg);
1821         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1822                                          target_cmsg_start);
1823     }
1824     unlock_user(target_cmsg, target_cmsg_addr, 0);
1825  the_end:
1826     msgh->msg_controllen = space;
1827     return 0;
1828 }
1829 
1830 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1831                                            struct msghdr *msgh)
1832 {
1833     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1834     abi_long msg_controllen;
1835     abi_ulong target_cmsg_addr;
1836     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1837     socklen_t space = 0;
1838 
1839     msg_controllen = tswapal(target_msgh->msg_controllen);
1840     if (msg_controllen < sizeof (struct target_cmsghdr))
1841         goto the_end;
1842     target_cmsg_addr = tswapal(target_msgh->msg_control);
1843     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1844     target_cmsg_start = target_cmsg;
1845     if (!target_cmsg)
1846         return -TARGET_EFAULT;
1847 
1848     while (cmsg && target_cmsg) {
1849         void *data = CMSG_DATA(cmsg);
1850         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1851 
1852         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1853         int tgt_len, tgt_space;
1854 
1855         /* We never copy a half-header but may copy half-data;
1856          * this is Linux's behaviour in put_cmsg(). Note that
1857          * truncation here is a guest problem (which we report
1858          * to the guest via the CTRUNC bit), unlike truncation
1859          * in target_to_host_cmsg, which is a QEMU bug.
1860          */
1861         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1862             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1863             break;
1864         }
1865 
1866         if (cmsg->cmsg_level == SOL_SOCKET) {
1867             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1868         } else {
1869             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1870         }
1871         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1872 
1873         /* Payload types which need a different size of payload on
1874          * the target must adjust tgt_len here.
1875          */
1876         tgt_len = len;
1877         switch (cmsg->cmsg_level) {
1878         case SOL_SOCKET:
1879             switch (cmsg->cmsg_type) {
1880             case SO_TIMESTAMP:
1881                 tgt_len = sizeof(struct target_timeval);
1882                 break;
1883             default:
1884                 break;
1885             }
1886             break;
1887         default:
1888             break;
1889         }
1890 
1891         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1892             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1893             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1894         }
1895 
1896         /* We must now copy-and-convert len bytes of payload
1897          * into tgt_len bytes of destination space. Bear in mind
1898          * that in both source and destination we may be dealing
1899          * with a truncated value!
1900          */
1901         switch (cmsg->cmsg_level) {
1902         case SOL_SOCKET:
1903             switch (cmsg->cmsg_type) {
1904             case SCM_RIGHTS:
1905             {
1906                 int *fd = (int *)data;
1907                 int *target_fd = (int *)target_data;
1908                 int i, numfds = tgt_len / sizeof(int);
1909 
1910                 for (i = 0; i < numfds; i++) {
1911                     __put_user(fd[i], target_fd + i);
1912                 }
1913                 break;
1914             }
1915             case SO_TIMESTAMP:
1916             {
1917                 struct timeval *tv = (struct timeval *)data;
1918                 struct target_timeval *target_tv =
1919                     (struct target_timeval *)target_data;
1920 
1921                 if (len != sizeof(struct timeval) ||
1922                     tgt_len != sizeof(struct target_timeval)) {
1923                     goto unimplemented;
1924                 }
1925 
1926                 /* copy struct timeval to target */
1927                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1928                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1929                 break;
1930             }
1931             case SCM_CREDENTIALS:
1932             {
1933                 struct ucred *cred = (struct ucred *)data;
1934                 struct target_ucred *target_cred =
1935                     (struct target_ucred *)target_data;
1936 
1937                 __put_user(cred->pid, &target_cred->pid);
1938                 __put_user(cred->uid, &target_cred->uid);
1939                 __put_user(cred->gid, &target_cred->gid);
1940                 break;
1941             }
1942             default:
1943                 goto unimplemented;
1944             }
1945             break;
1946 
1947         case SOL_IP:
1948             switch (cmsg->cmsg_type) {
1949             case IP_TTL:
1950             {
1951                 uint32_t *v = (uint32_t *)data;
1952                 uint32_t *t_int = (uint32_t *)target_data;
1953 
1954                 if (len != sizeof(uint32_t) ||
1955                     tgt_len != sizeof(uint32_t)) {
1956                     goto unimplemented;
1957                 }
1958                 __put_user(*v, t_int);
1959                 break;
1960             }
1961             case IP_RECVERR:
1962             {
1963                 struct errhdr_t {
1964                    struct sock_extended_err ee;
1965                    struct sockaddr_in offender;
1966                 };
1967                 struct errhdr_t *errh = (struct errhdr_t *)data;
1968                 struct errhdr_t *target_errh =
1969                     (struct errhdr_t *)target_data;
1970 
1971                 if (len != sizeof(struct errhdr_t) ||
1972                     tgt_len != sizeof(struct errhdr_t)) {
1973                     goto unimplemented;
1974                 }
1975                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1976                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1977                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1978                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1979                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1980                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1981                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1982                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1983                     (void *) &errh->offender, sizeof(errh->offender));
1984                 break;
1985             }
1986             default:
1987                 goto unimplemented;
1988             }
1989             break;
1990 
1991         case SOL_IPV6:
1992             switch (cmsg->cmsg_type) {
1993             case IPV6_HOPLIMIT:
1994             {
1995                 uint32_t *v = (uint32_t *)data;
1996                 uint32_t *t_int = (uint32_t *)target_data;
1997 
1998                 if (len != sizeof(uint32_t) ||
1999                     tgt_len != sizeof(uint32_t)) {
2000                     goto unimplemented;
2001                 }
2002                 __put_user(*v, t_int);
2003                 break;
2004             }
2005             case IPV6_RECVERR:
2006             {
2007                 struct errhdr6_t {
2008                    struct sock_extended_err ee;
2009                    struct sockaddr_in6 offender;
2010                 };
2011                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2012                 struct errhdr6_t *target_errh =
2013                     (struct errhdr6_t *)target_data;
2014 
2015                 if (len != sizeof(struct errhdr6_t) ||
2016                     tgt_len != sizeof(struct errhdr6_t)) {
2017                     goto unimplemented;
2018                 }
2019                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2020                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2021                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2022                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2023                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2024                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2025                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2026                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2027                     (void *) &errh->offender, sizeof(errh->offender));
2028                 break;
2029             }
2030             default:
2031                 goto unimplemented;
2032             }
2033             break;
2034 
2035         default:
2036         unimplemented:
2037             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2038                           cmsg->cmsg_level, cmsg->cmsg_type);
2039             memcpy(target_data, data, MIN(len, tgt_len));
2040             if (tgt_len > len) {
2041                 memset(target_data + len, 0, tgt_len - len);
2042             }
2043         }
2044 
2045         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2046         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2047         if (msg_controllen < tgt_space) {
2048             tgt_space = msg_controllen;
2049         }
2050         msg_controllen -= tgt_space;
2051         space += tgt_space;
2052         cmsg = CMSG_NXTHDR(msgh, cmsg);
2053         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2054                                          target_cmsg_start);
2055     }
2056     unlock_user(target_cmsg, target_cmsg_addr, space);
2057  the_end:
2058     target_msgh->msg_controllen = tswapal(space);
2059     return 0;
2060 }
2061 
2062 /* do_setsockopt() Must return target values and target errnos. */
2063 static abi_long do_setsockopt(int sockfd, int level, int optname,
2064                               abi_ulong optval_addr, socklen_t optlen)
2065 {
2066     abi_long ret;
2067     int val;
2068     struct ip_mreqn *ip_mreq;
2069     struct ip_mreq_source *ip_mreq_source;
2070 
2071     switch(level) {
2072     case SOL_TCP:
2073     case SOL_UDP:
2074         /* TCP and UDP options all take an 'int' value.  */
2075         if (optlen < sizeof(uint32_t))
2076             return -TARGET_EINVAL;
2077 
2078         if (get_user_u32(val, optval_addr))
2079             return -TARGET_EFAULT;
2080         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2081         break;
2082     case SOL_IP:
2083         switch(optname) {
2084         case IP_TOS:
2085         case IP_TTL:
2086         case IP_HDRINCL:
2087         case IP_ROUTER_ALERT:
2088         case IP_RECVOPTS:
2089         case IP_RETOPTS:
2090         case IP_PKTINFO:
2091         case IP_MTU_DISCOVER:
2092         case IP_RECVERR:
2093         case IP_RECVTTL:
2094         case IP_RECVTOS:
2095 #ifdef IP_FREEBIND
2096         case IP_FREEBIND:
2097 #endif
2098         case IP_MULTICAST_TTL:
2099         case IP_MULTICAST_LOOP:
2100             val = 0;
2101             if (optlen >= sizeof(uint32_t)) {
2102                 if (get_user_u32(val, optval_addr))
2103                     return -TARGET_EFAULT;
2104             } else if (optlen >= 1) {
2105                 if (get_user_u8(val, optval_addr))
2106                     return -TARGET_EFAULT;
2107             }
2108             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2109             break;
2110         case IP_ADD_MEMBERSHIP:
2111         case IP_DROP_MEMBERSHIP:
2112             if (optlen < sizeof (struct target_ip_mreq) ||
2113                 optlen > sizeof (struct target_ip_mreqn))
2114                 return -TARGET_EINVAL;
2115 
2116             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2117             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2118             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2119             break;
2120 
2121         case IP_BLOCK_SOURCE:
2122         case IP_UNBLOCK_SOURCE:
2123         case IP_ADD_SOURCE_MEMBERSHIP:
2124         case IP_DROP_SOURCE_MEMBERSHIP:
2125             if (optlen != sizeof (struct target_ip_mreq_source))
2126                 return -TARGET_EINVAL;
2127 
2128             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2129             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2130             unlock_user (ip_mreq_source, optval_addr, 0);
2131             break;
2132 
2133         default:
2134             goto unimplemented;
2135         }
2136         break;
2137     case SOL_IPV6:
2138         switch (optname) {
2139         case IPV6_MTU_DISCOVER:
2140         case IPV6_MTU:
2141         case IPV6_V6ONLY:
2142         case IPV6_RECVPKTINFO:
2143         case IPV6_UNICAST_HOPS:
2144         case IPV6_MULTICAST_HOPS:
2145         case IPV6_MULTICAST_LOOP:
2146         case IPV6_RECVERR:
2147         case IPV6_RECVHOPLIMIT:
2148         case IPV6_2292HOPLIMIT:
2149         case IPV6_CHECKSUM:
2150         case IPV6_ADDRFORM:
2151         case IPV6_2292PKTINFO:
2152         case IPV6_RECVTCLASS:
2153         case IPV6_RECVRTHDR:
2154         case IPV6_2292RTHDR:
2155         case IPV6_RECVHOPOPTS:
2156         case IPV6_2292HOPOPTS:
2157         case IPV6_RECVDSTOPTS:
2158         case IPV6_2292DSTOPTS:
2159         case IPV6_TCLASS:
2160         case IPV6_ADDR_PREFERENCES:
2161 #ifdef IPV6_RECVPATHMTU
2162         case IPV6_RECVPATHMTU:
2163 #endif
2164 #ifdef IPV6_TRANSPARENT
2165         case IPV6_TRANSPARENT:
2166 #endif
2167 #ifdef IPV6_FREEBIND
2168         case IPV6_FREEBIND:
2169 #endif
2170 #ifdef IPV6_RECVORIGDSTADDR
2171         case IPV6_RECVORIGDSTADDR:
2172 #endif
2173             val = 0;
2174             if (optlen < sizeof(uint32_t)) {
2175                 return -TARGET_EINVAL;
2176             }
2177             if (get_user_u32(val, optval_addr)) {
2178                 return -TARGET_EFAULT;
2179             }
2180             ret = get_errno(setsockopt(sockfd, level, optname,
2181                                        &val, sizeof(val)));
2182             break;
2183         case IPV6_PKTINFO:
2184         {
2185             struct in6_pktinfo pki;
2186 
2187             if (optlen < sizeof(pki)) {
2188                 return -TARGET_EINVAL;
2189             }
2190 
2191             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2192                 return -TARGET_EFAULT;
2193             }
2194 
2195             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2196 
2197             ret = get_errno(setsockopt(sockfd, level, optname,
2198                                        &pki, sizeof(pki)));
2199             break;
2200         }
2201         case IPV6_ADD_MEMBERSHIP:
2202         case IPV6_DROP_MEMBERSHIP:
2203         {
2204             struct ipv6_mreq ipv6mreq;
2205 
2206             if (optlen < sizeof(ipv6mreq)) {
2207                 return -TARGET_EINVAL;
2208             }
2209 
2210             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2211                 return -TARGET_EFAULT;
2212             }
2213 
2214             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2215 
2216             ret = get_errno(setsockopt(sockfd, level, optname,
2217                                        &ipv6mreq, sizeof(ipv6mreq)));
2218             break;
2219         }
2220         default:
2221             goto unimplemented;
2222         }
2223         break;
2224     case SOL_ICMPV6:
2225         switch (optname) {
2226         case ICMPV6_FILTER:
2227         {
2228             struct icmp6_filter icmp6f;
2229 
2230             if (optlen > sizeof(icmp6f)) {
2231                 optlen = sizeof(icmp6f);
2232             }
2233 
2234             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2235                 return -TARGET_EFAULT;
2236             }
2237 
2238             for (val = 0; val < 8; val++) {
2239                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2240             }
2241 
2242             ret = get_errno(setsockopt(sockfd, level, optname,
2243                                        &icmp6f, optlen));
2244             break;
2245         }
2246         default:
2247             goto unimplemented;
2248         }
2249         break;
2250     case SOL_RAW:
2251         switch (optname) {
2252         case ICMP_FILTER:
2253         case IPV6_CHECKSUM:
2254             /* those take an u32 value */
2255             if (optlen < sizeof(uint32_t)) {
2256                 return -TARGET_EINVAL;
2257             }
2258 
2259             if (get_user_u32(val, optval_addr)) {
2260                 return -TARGET_EFAULT;
2261             }
2262             ret = get_errno(setsockopt(sockfd, level, optname,
2263                                        &val, sizeof(val)));
2264             break;
2265 
2266         default:
2267             goto unimplemented;
2268         }
2269         break;
2270 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2271     case SOL_ALG:
2272         switch (optname) {
2273         case ALG_SET_KEY:
2274         {
2275             char *alg_key = g_malloc(optlen);
2276 
2277             if (!alg_key) {
2278                 return -TARGET_ENOMEM;
2279             }
2280             if (copy_from_user(alg_key, optval_addr, optlen)) {
2281                 g_free(alg_key);
2282                 return -TARGET_EFAULT;
2283             }
2284             ret = get_errno(setsockopt(sockfd, level, optname,
2285                                        alg_key, optlen));
2286             g_free(alg_key);
2287             break;
2288         }
2289         case ALG_SET_AEAD_AUTHSIZE:
2290         {
2291             ret = get_errno(setsockopt(sockfd, level, optname,
2292                                        NULL, optlen));
2293             break;
2294         }
2295         default:
2296             goto unimplemented;
2297         }
2298         break;
2299 #endif
2300     case TARGET_SOL_SOCKET:
2301         switch (optname) {
2302         case TARGET_SO_RCVTIMEO:
2303         {
2304                 struct timeval tv;
2305 
2306                 optname = SO_RCVTIMEO;
2307 
2308 set_timeout:
2309                 if (optlen != sizeof(struct target_timeval)) {
2310                     return -TARGET_EINVAL;
2311                 }
2312 
2313                 if (copy_from_user_timeval(&tv, optval_addr)) {
2314                     return -TARGET_EFAULT;
2315                 }
2316 
2317                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2318                                 &tv, sizeof(tv)));
2319                 return ret;
2320         }
2321         case TARGET_SO_SNDTIMEO:
2322                 optname = SO_SNDTIMEO;
2323                 goto set_timeout;
2324         case TARGET_SO_ATTACH_FILTER:
2325         {
2326                 struct target_sock_fprog *tfprog;
2327                 struct target_sock_filter *tfilter;
2328                 struct sock_fprog fprog;
2329                 struct sock_filter *filter;
2330                 int i;
2331 
2332                 if (optlen != sizeof(*tfprog)) {
2333                     return -TARGET_EINVAL;
2334                 }
2335                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2336                     return -TARGET_EFAULT;
2337                 }
2338                 if (!lock_user_struct(VERIFY_READ, tfilter,
2339                                       tswapal(tfprog->filter), 0)) {
2340                     unlock_user_struct(tfprog, optval_addr, 1);
2341                     return -TARGET_EFAULT;
2342                 }
2343 
2344                 fprog.len = tswap16(tfprog->len);
2345                 filter = g_try_new(struct sock_filter, fprog.len);
2346                 if (filter == NULL) {
2347                     unlock_user_struct(tfilter, tfprog->filter, 1);
2348                     unlock_user_struct(tfprog, optval_addr, 1);
2349                     return -TARGET_ENOMEM;
2350                 }
2351                 for (i = 0; i < fprog.len; i++) {
2352                     filter[i].code = tswap16(tfilter[i].code);
2353                     filter[i].jt = tfilter[i].jt;
2354                     filter[i].jf = tfilter[i].jf;
2355                     filter[i].k = tswap32(tfilter[i].k);
2356                 }
2357                 fprog.filter = filter;
2358 
2359                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2360                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2361                 g_free(filter);
2362 
2363                 unlock_user_struct(tfilter, tfprog->filter, 1);
2364                 unlock_user_struct(tfprog, optval_addr, 1);
2365                 return ret;
2366         }
2367 	case TARGET_SO_BINDTODEVICE:
2368 	{
2369 		char *dev_ifname, *addr_ifname;
2370 
2371 		if (optlen > IFNAMSIZ - 1) {
2372 		    optlen = IFNAMSIZ - 1;
2373 		}
2374 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2375 		if (!dev_ifname) {
2376 		    return -TARGET_EFAULT;
2377 		}
2378 		optname = SO_BINDTODEVICE;
2379 		addr_ifname = alloca(IFNAMSIZ);
2380 		memcpy(addr_ifname, dev_ifname, optlen);
2381 		addr_ifname[optlen] = 0;
2382 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2383                                            addr_ifname, optlen));
2384 		unlock_user (dev_ifname, optval_addr, 0);
2385 		return ret;
2386 	}
2387         case TARGET_SO_LINGER:
2388         {
2389                 struct linger lg;
2390                 struct target_linger *tlg;
2391 
2392                 if (optlen != sizeof(struct target_linger)) {
2393                     return -TARGET_EINVAL;
2394                 }
2395                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2396                     return -TARGET_EFAULT;
2397                 }
2398                 __get_user(lg.l_onoff, &tlg->l_onoff);
2399                 __get_user(lg.l_linger, &tlg->l_linger);
2400                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2401                                 &lg, sizeof(lg)));
2402                 unlock_user_struct(tlg, optval_addr, 0);
2403                 return ret;
2404         }
2405             /* Options with 'int' argument.  */
2406         case TARGET_SO_DEBUG:
2407 		optname = SO_DEBUG;
2408 		break;
2409         case TARGET_SO_REUSEADDR:
2410 		optname = SO_REUSEADDR;
2411 		break;
2412 #ifdef SO_REUSEPORT
2413         case TARGET_SO_REUSEPORT:
2414                 optname = SO_REUSEPORT;
2415                 break;
2416 #endif
2417         case TARGET_SO_TYPE:
2418 		optname = SO_TYPE;
2419 		break;
2420         case TARGET_SO_ERROR:
2421 		optname = SO_ERROR;
2422 		break;
2423         case TARGET_SO_DONTROUTE:
2424 		optname = SO_DONTROUTE;
2425 		break;
2426         case TARGET_SO_BROADCAST:
2427 		optname = SO_BROADCAST;
2428 		break;
2429         case TARGET_SO_SNDBUF:
2430 		optname = SO_SNDBUF;
2431 		break;
2432         case TARGET_SO_SNDBUFFORCE:
2433                 optname = SO_SNDBUFFORCE;
2434                 break;
2435         case TARGET_SO_RCVBUF:
2436 		optname = SO_RCVBUF;
2437 		break;
2438         case TARGET_SO_RCVBUFFORCE:
2439                 optname = SO_RCVBUFFORCE;
2440                 break;
2441         case TARGET_SO_KEEPALIVE:
2442 		optname = SO_KEEPALIVE;
2443 		break;
2444         case TARGET_SO_OOBINLINE:
2445 		optname = SO_OOBINLINE;
2446 		break;
2447         case TARGET_SO_NO_CHECK:
2448 		optname = SO_NO_CHECK;
2449 		break;
2450         case TARGET_SO_PRIORITY:
2451 		optname = SO_PRIORITY;
2452 		break;
2453 #ifdef SO_BSDCOMPAT
2454         case TARGET_SO_BSDCOMPAT:
2455 		optname = SO_BSDCOMPAT;
2456 		break;
2457 #endif
2458         case TARGET_SO_PASSCRED:
2459 		optname = SO_PASSCRED;
2460 		break;
2461         case TARGET_SO_PASSSEC:
2462                 optname = SO_PASSSEC;
2463                 break;
2464         case TARGET_SO_TIMESTAMP:
2465 		optname = SO_TIMESTAMP;
2466 		break;
2467         case TARGET_SO_RCVLOWAT:
2468 		optname = SO_RCVLOWAT;
2469 		break;
2470         default:
2471             goto unimplemented;
2472         }
2473 	if (optlen < sizeof(uint32_t))
2474             return -TARGET_EINVAL;
2475 
2476 	if (get_user_u32(val, optval_addr))
2477             return -TARGET_EFAULT;
2478 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2479         break;
2480 #ifdef SOL_NETLINK
2481     case SOL_NETLINK:
2482         switch (optname) {
2483         case NETLINK_PKTINFO:
2484         case NETLINK_ADD_MEMBERSHIP:
2485         case NETLINK_DROP_MEMBERSHIP:
2486         case NETLINK_BROADCAST_ERROR:
2487         case NETLINK_NO_ENOBUFS:
2488 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2489         case NETLINK_LISTEN_ALL_NSID:
2490         case NETLINK_CAP_ACK:
2491 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2492 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2493         case NETLINK_EXT_ACK:
2494 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2495 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2496         case NETLINK_GET_STRICT_CHK:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2498             break;
2499         default:
2500             goto unimplemented;
2501         }
2502         val = 0;
2503         if (optlen < sizeof(uint32_t)) {
2504             return -TARGET_EINVAL;
2505         }
2506         if (get_user_u32(val, optval_addr)) {
2507             return -TARGET_EFAULT;
2508         }
2509         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2510                                    sizeof(val)));
2511         break;
2512 #endif /* SOL_NETLINK */
2513     default:
2514     unimplemented:
2515         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2516                       level, optname);
2517         ret = -TARGET_ENOPROTOOPT;
2518     }
2519     return ret;
2520 }
2521 
2522 /* do_getsockopt() Must return target values and target errnos. */
2523 static abi_long do_getsockopt(int sockfd, int level, int optname,
2524                               abi_ulong optval_addr, abi_ulong optlen)
2525 {
2526     abi_long ret;
2527     int len, val;
2528     socklen_t lv;
2529 
2530     switch(level) {
2531     case TARGET_SOL_SOCKET:
2532         level = SOL_SOCKET;
2533         switch (optname) {
2534         /* These don't just return a single integer */
2535         case TARGET_SO_PEERNAME:
2536             goto unimplemented;
2537         case TARGET_SO_RCVTIMEO: {
2538             struct timeval tv;
2539             socklen_t tvlen;
2540 
2541             optname = SO_RCVTIMEO;
2542 
2543 get_timeout:
2544             if (get_user_u32(len, optlen)) {
2545                 return -TARGET_EFAULT;
2546             }
2547             if (len < 0) {
2548                 return -TARGET_EINVAL;
2549             }
2550 
2551             tvlen = sizeof(tv);
2552             ret = get_errno(getsockopt(sockfd, level, optname,
2553                                        &tv, &tvlen));
2554             if (ret < 0) {
2555                 return ret;
2556             }
2557             if (len > sizeof(struct target_timeval)) {
2558                 len = sizeof(struct target_timeval);
2559             }
2560             if (copy_to_user_timeval(optval_addr, &tv)) {
2561                 return -TARGET_EFAULT;
2562             }
2563             if (put_user_u32(len, optlen)) {
2564                 return -TARGET_EFAULT;
2565             }
2566             break;
2567         }
2568         case TARGET_SO_SNDTIMEO:
2569             optname = SO_SNDTIMEO;
2570             goto get_timeout;
2571         case TARGET_SO_PEERCRED: {
2572             struct ucred cr;
2573             socklen_t crlen;
2574             struct target_ucred *tcr;
2575 
2576             if (get_user_u32(len, optlen)) {
2577                 return -TARGET_EFAULT;
2578             }
2579             if (len < 0) {
2580                 return -TARGET_EINVAL;
2581             }
2582 
2583             crlen = sizeof(cr);
2584             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2585                                        &cr, &crlen));
2586             if (ret < 0) {
2587                 return ret;
2588             }
2589             if (len > crlen) {
2590                 len = crlen;
2591             }
2592             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2593                 return -TARGET_EFAULT;
2594             }
2595             __put_user(cr.pid, &tcr->pid);
2596             __put_user(cr.uid, &tcr->uid);
2597             __put_user(cr.gid, &tcr->gid);
2598             unlock_user_struct(tcr, optval_addr, 1);
2599             if (put_user_u32(len, optlen)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             break;
2603         }
2604         case TARGET_SO_PEERSEC: {
2605             char *name;
2606 
2607             if (get_user_u32(len, optlen)) {
2608                 return -TARGET_EFAULT;
2609             }
2610             if (len < 0) {
2611                 return -TARGET_EINVAL;
2612             }
2613             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2614             if (!name) {
2615                 return -TARGET_EFAULT;
2616             }
2617             lv = len;
2618             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2619                                        name, &lv));
2620             if (put_user_u32(lv, optlen)) {
2621                 ret = -TARGET_EFAULT;
2622             }
2623             unlock_user(name, optval_addr, lv);
2624             break;
2625         }
2626         case TARGET_SO_LINGER:
2627         {
2628             struct linger lg;
2629             socklen_t lglen;
2630             struct target_linger *tlg;
2631 
2632             if (get_user_u32(len, optlen)) {
2633                 return -TARGET_EFAULT;
2634             }
2635             if (len < 0) {
2636                 return -TARGET_EINVAL;
2637             }
2638 
2639             lglen = sizeof(lg);
2640             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2641                                        &lg, &lglen));
2642             if (ret < 0) {
2643                 return ret;
2644             }
2645             if (len > lglen) {
2646                 len = lglen;
2647             }
2648             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2649                 return -TARGET_EFAULT;
2650             }
2651             __put_user(lg.l_onoff, &tlg->l_onoff);
2652             __put_user(lg.l_linger, &tlg->l_linger);
2653             unlock_user_struct(tlg, optval_addr, 1);
2654             if (put_user_u32(len, optlen)) {
2655                 return -TARGET_EFAULT;
2656             }
2657             break;
2658         }
2659         /* Options with 'int' argument.  */
2660         case TARGET_SO_DEBUG:
2661             optname = SO_DEBUG;
2662             goto int_case;
2663         case TARGET_SO_REUSEADDR:
2664             optname = SO_REUSEADDR;
2665             goto int_case;
2666 #ifdef SO_REUSEPORT
2667         case TARGET_SO_REUSEPORT:
2668             optname = SO_REUSEPORT;
2669             goto int_case;
2670 #endif
2671         case TARGET_SO_TYPE:
2672             optname = SO_TYPE;
2673             goto int_case;
2674         case TARGET_SO_ERROR:
2675             optname = SO_ERROR;
2676             goto int_case;
2677         case TARGET_SO_DONTROUTE:
2678             optname = SO_DONTROUTE;
2679             goto int_case;
2680         case TARGET_SO_BROADCAST:
2681             optname = SO_BROADCAST;
2682             goto int_case;
2683         case TARGET_SO_SNDBUF:
2684             optname = SO_SNDBUF;
2685             goto int_case;
2686         case TARGET_SO_RCVBUF:
2687             optname = SO_RCVBUF;
2688             goto int_case;
2689         case TARGET_SO_KEEPALIVE:
2690             optname = SO_KEEPALIVE;
2691             goto int_case;
2692         case TARGET_SO_OOBINLINE:
2693             optname = SO_OOBINLINE;
2694             goto int_case;
2695         case TARGET_SO_NO_CHECK:
2696             optname = SO_NO_CHECK;
2697             goto int_case;
2698         case TARGET_SO_PRIORITY:
2699             optname = SO_PRIORITY;
2700             goto int_case;
2701 #ifdef SO_BSDCOMPAT
2702         case TARGET_SO_BSDCOMPAT:
2703             optname = SO_BSDCOMPAT;
2704             goto int_case;
2705 #endif
2706         case TARGET_SO_PASSCRED:
2707             optname = SO_PASSCRED;
2708             goto int_case;
2709         case TARGET_SO_TIMESTAMP:
2710             optname = SO_TIMESTAMP;
2711             goto int_case;
2712         case TARGET_SO_RCVLOWAT:
2713             optname = SO_RCVLOWAT;
2714             goto int_case;
2715         case TARGET_SO_ACCEPTCONN:
2716             optname = SO_ACCEPTCONN;
2717             goto int_case;
2718         case TARGET_SO_PROTOCOL:
2719             optname = SO_PROTOCOL;
2720             goto int_case;
2721         case TARGET_SO_DOMAIN:
2722             optname = SO_DOMAIN;
2723             goto int_case;
2724         default:
2725             goto int_case;
2726         }
2727         break;
2728     case SOL_TCP:
2729     case SOL_UDP:
2730         /* TCP and UDP options all take an 'int' value.  */
2731     int_case:
2732         if (get_user_u32(len, optlen))
2733             return -TARGET_EFAULT;
2734         if (len < 0)
2735             return -TARGET_EINVAL;
2736         lv = sizeof(lv);
2737         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2738         if (ret < 0)
2739             return ret;
2740         if (optname == SO_TYPE) {
2741             val = host_to_target_sock_type(val);
2742         }
2743         if (len > lv)
2744             len = lv;
2745         if (len == 4) {
2746             if (put_user_u32(val, optval_addr))
2747                 return -TARGET_EFAULT;
2748         } else {
2749             if (put_user_u8(val, optval_addr))
2750                 return -TARGET_EFAULT;
2751         }
2752         if (put_user_u32(len, optlen))
2753             return -TARGET_EFAULT;
2754         break;
2755     case SOL_IP:
2756         switch(optname) {
2757         case IP_TOS:
2758         case IP_TTL:
2759         case IP_HDRINCL:
2760         case IP_ROUTER_ALERT:
2761         case IP_RECVOPTS:
2762         case IP_RETOPTS:
2763         case IP_PKTINFO:
2764         case IP_MTU_DISCOVER:
2765         case IP_RECVERR:
2766         case IP_RECVTOS:
2767 #ifdef IP_FREEBIND
2768         case IP_FREEBIND:
2769 #endif
2770         case IP_MULTICAST_TTL:
2771         case IP_MULTICAST_LOOP:
2772             if (get_user_u32(len, optlen))
2773                 return -TARGET_EFAULT;
2774             if (len < 0)
2775                 return -TARGET_EINVAL;
2776             lv = sizeof(lv);
2777             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2778             if (ret < 0)
2779                 return ret;
2780             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2781                 len = 1;
2782                 if (put_user_u32(len, optlen)
2783                     || put_user_u8(val, optval_addr))
2784                     return -TARGET_EFAULT;
2785             } else {
2786                 if (len > sizeof(int))
2787                     len = sizeof(int);
2788                 if (put_user_u32(len, optlen)
2789                     || put_user_u32(val, optval_addr))
2790                     return -TARGET_EFAULT;
2791             }
2792             break;
2793         default:
2794             ret = -TARGET_ENOPROTOOPT;
2795             break;
2796         }
2797         break;
2798     case SOL_IPV6:
2799         switch (optname) {
2800         case IPV6_MTU_DISCOVER:
2801         case IPV6_MTU:
2802         case IPV6_V6ONLY:
2803         case IPV6_RECVPKTINFO:
2804         case IPV6_UNICAST_HOPS:
2805         case IPV6_MULTICAST_HOPS:
2806         case IPV6_MULTICAST_LOOP:
2807         case IPV6_RECVERR:
2808         case IPV6_RECVHOPLIMIT:
2809         case IPV6_2292HOPLIMIT:
2810         case IPV6_CHECKSUM:
2811         case IPV6_ADDRFORM:
2812         case IPV6_2292PKTINFO:
2813         case IPV6_RECVTCLASS:
2814         case IPV6_RECVRTHDR:
2815         case IPV6_2292RTHDR:
2816         case IPV6_RECVHOPOPTS:
2817         case IPV6_2292HOPOPTS:
2818         case IPV6_RECVDSTOPTS:
2819         case IPV6_2292DSTOPTS:
2820         case IPV6_TCLASS:
2821         case IPV6_ADDR_PREFERENCES:
2822 #ifdef IPV6_RECVPATHMTU
2823         case IPV6_RECVPATHMTU:
2824 #endif
2825 #ifdef IPV6_TRANSPARENT
2826         case IPV6_TRANSPARENT:
2827 #endif
2828 #ifdef IPV6_FREEBIND
2829         case IPV6_FREEBIND:
2830 #endif
2831 #ifdef IPV6_RECVORIGDSTADDR
2832         case IPV6_RECVORIGDSTADDR:
2833 #endif
2834             if (get_user_u32(len, optlen))
2835                 return -TARGET_EFAULT;
2836             if (len < 0)
2837                 return -TARGET_EINVAL;
2838             lv = sizeof(lv);
2839             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2840             if (ret < 0)
2841                 return ret;
2842             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2843                 len = 1;
2844                 if (put_user_u32(len, optlen)
2845                     || put_user_u8(val, optval_addr))
2846                     return -TARGET_EFAULT;
2847             } else {
2848                 if (len > sizeof(int))
2849                     len = sizeof(int);
2850                 if (put_user_u32(len, optlen)
2851                     || put_user_u32(val, optval_addr))
2852                     return -TARGET_EFAULT;
2853             }
2854             break;
2855         default:
2856             ret = -TARGET_ENOPROTOOPT;
2857             break;
2858         }
2859         break;
2860 #ifdef SOL_NETLINK
2861     case SOL_NETLINK:
2862         switch (optname) {
2863         case NETLINK_PKTINFO:
2864         case NETLINK_BROADCAST_ERROR:
2865         case NETLINK_NO_ENOBUFS:
2866 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2867         case NETLINK_LISTEN_ALL_NSID:
2868         case NETLINK_CAP_ACK:
2869 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2870 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2871         case NETLINK_EXT_ACK:
2872 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2873 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2874         case NETLINK_GET_STRICT_CHK:
2875 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2876             if (get_user_u32(len, optlen)) {
2877                 return -TARGET_EFAULT;
2878             }
2879             if (len != sizeof(val)) {
2880                 return -TARGET_EINVAL;
2881             }
2882             lv = len;
2883             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2884             if (ret < 0) {
2885                 return ret;
2886             }
2887             if (put_user_u32(lv, optlen)
2888                 || put_user_u32(val, optval_addr)) {
2889                 return -TARGET_EFAULT;
2890             }
2891             break;
2892 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2893         case NETLINK_LIST_MEMBERSHIPS:
2894         {
2895             uint32_t *results;
2896             int i;
2897             if (get_user_u32(len, optlen)) {
2898                 return -TARGET_EFAULT;
2899             }
2900             if (len < 0) {
2901                 return -TARGET_EINVAL;
2902             }
2903             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2904             if (!results && len > 0) {
2905                 return -TARGET_EFAULT;
2906             }
2907             lv = len;
2908             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2909             if (ret < 0) {
2910                 unlock_user(results, optval_addr, 0);
2911                 return ret;
2912             }
2913             /* swap host endianess to target endianess. */
2914             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2915                 results[i] = tswap32(results[i]);
2916             }
2917             if (put_user_u32(lv, optlen)) {
2918                 return -TARGET_EFAULT;
2919             }
2920             unlock_user(results, optval_addr, 0);
2921             break;
2922         }
2923 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2924         default:
2925             goto unimplemented;
2926         }
2927         break;
2928 #endif /* SOL_NETLINK */
2929     default:
2930     unimplemented:
2931         qemu_log_mask(LOG_UNIMP,
2932                       "getsockopt level=%d optname=%d not yet supported\n",
2933                       level, optname);
2934         ret = -TARGET_EOPNOTSUPP;
2935         break;
2936     }
2937     return ret;
2938 }
2939 
2940 /* Convert target low/high pair representing file offset into the host
2941  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2942  * as the kernel doesn't handle them either.
2943  */
2944 static void target_to_host_low_high(abi_ulong tlow,
2945                                     abi_ulong thigh,
2946                                     unsigned long *hlow,
2947                                     unsigned long *hhigh)
2948 {
2949     uint64_t off = tlow |
2950         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2951         TARGET_LONG_BITS / 2;
2952 
2953     *hlow = off;
2954     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2955 }
2956 
2957 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2958                                 abi_ulong count, int copy)
2959 {
2960     struct target_iovec *target_vec;
2961     struct iovec *vec;
2962     abi_ulong total_len, max_len;
2963     int i;
2964     int err = 0;
2965     bool bad_address = false;
2966 
2967     if (count == 0) {
2968         errno = 0;
2969         return NULL;
2970     }
2971     if (count > IOV_MAX) {
2972         errno = EINVAL;
2973         return NULL;
2974     }
2975 
2976     vec = g_try_new0(struct iovec, count);
2977     if (vec == NULL) {
2978         errno = ENOMEM;
2979         return NULL;
2980     }
2981 
2982     target_vec = lock_user(VERIFY_READ, target_addr,
2983                            count * sizeof(struct target_iovec), 1);
2984     if (target_vec == NULL) {
2985         err = EFAULT;
2986         goto fail2;
2987     }
2988 
2989     /* ??? If host page size > target page size, this will result in a
2990        value larger than what we can actually support.  */
2991     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2992     total_len = 0;
2993 
2994     for (i = 0; i < count; i++) {
2995         abi_ulong base = tswapal(target_vec[i].iov_base);
2996         abi_long len = tswapal(target_vec[i].iov_len);
2997 
2998         if (len < 0) {
2999             err = EINVAL;
3000             goto fail;
3001         } else if (len == 0) {
3002             /* Zero length pointer is ignored.  */
3003             vec[i].iov_base = 0;
3004         } else {
3005             vec[i].iov_base = lock_user(type, base, len, copy);
3006             /* If the first buffer pointer is bad, this is a fault.  But
3007              * subsequent bad buffers will result in a partial write; this
3008              * is realized by filling the vector with null pointers and
3009              * zero lengths. */
3010             if (!vec[i].iov_base) {
3011                 if (i == 0) {
3012                     err = EFAULT;
3013                     goto fail;
3014                 } else {
3015                     bad_address = true;
3016                 }
3017             }
3018             if (bad_address) {
3019                 len = 0;
3020             }
3021             if (len > max_len - total_len) {
3022                 len = max_len - total_len;
3023             }
3024         }
3025         vec[i].iov_len = len;
3026         total_len += len;
3027     }
3028 
3029     unlock_user(target_vec, target_addr, 0);
3030     return vec;
3031 
3032  fail:
3033     while (--i >= 0) {
3034         if (tswapal(target_vec[i].iov_len) > 0) {
3035             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3036         }
3037     }
3038     unlock_user(target_vec, target_addr, 0);
3039  fail2:
3040     g_free(vec);
3041     errno = err;
3042     return NULL;
3043 }
3044 
3045 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3046                          abi_ulong count, int copy)
3047 {
3048     struct target_iovec *target_vec;
3049     int i;
3050 
3051     target_vec = lock_user(VERIFY_READ, target_addr,
3052                            count * sizeof(struct target_iovec), 1);
3053     if (target_vec) {
3054         for (i = 0; i < count; i++) {
3055             abi_ulong base = tswapal(target_vec[i].iov_base);
3056             abi_long len = tswapal(target_vec[i].iov_len);
3057             if (len < 0) {
3058                 break;
3059             }
3060             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3061         }
3062         unlock_user(target_vec, target_addr, 0);
3063     }
3064 
3065     g_free(vec);
3066 }
3067 
3068 static inline int target_to_host_sock_type(int *type)
3069 {
3070     int host_type = 0;
3071     int target_type = *type;
3072 
3073     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3074     case TARGET_SOCK_DGRAM:
3075         host_type = SOCK_DGRAM;
3076         break;
3077     case TARGET_SOCK_STREAM:
3078         host_type = SOCK_STREAM;
3079         break;
3080     default:
3081         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3082         break;
3083     }
3084     if (target_type & TARGET_SOCK_CLOEXEC) {
3085 #if defined(SOCK_CLOEXEC)
3086         host_type |= SOCK_CLOEXEC;
3087 #else
3088         return -TARGET_EINVAL;
3089 #endif
3090     }
3091     if (target_type & TARGET_SOCK_NONBLOCK) {
3092 #if defined(SOCK_NONBLOCK)
3093         host_type |= SOCK_NONBLOCK;
3094 #elif !defined(O_NONBLOCK)
3095         return -TARGET_EINVAL;
3096 #endif
3097     }
3098     *type = host_type;
3099     return 0;
3100 }
3101 
3102 /* Try to emulate socket type flags after socket creation.  */
3103 static int sock_flags_fixup(int fd, int target_type)
3104 {
3105 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3106     if (target_type & TARGET_SOCK_NONBLOCK) {
3107         int flags = fcntl(fd, F_GETFL);
3108         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3109             close(fd);
3110             return -TARGET_EINVAL;
3111         }
3112     }
3113 #endif
3114     return fd;
3115 }
3116 
3117 /* do_socket() Must return target values and target errnos. */
3118 static abi_long do_socket(int domain, int type, int protocol)
3119 {
3120     int target_type = type;
3121     int ret;
3122 
3123     ret = target_to_host_sock_type(&type);
3124     if (ret) {
3125         return ret;
3126     }
3127 
3128     if (domain == PF_NETLINK && !(
3129 #ifdef CONFIG_RTNETLINK
3130          protocol == NETLINK_ROUTE ||
3131 #endif
3132          protocol == NETLINK_KOBJECT_UEVENT ||
3133          protocol == NETLINK_AUDIT)) {
3134         return -TARGET_EPROTONOSUPPORT;
3135     }
3136 
3137     if (domain == AF_PACKET ||
3138         (domain == AF_INET && type == SOCK_PACKET)) {
3139         protocol = tswap16(protocol);
3140     }
3141 
3142     ret = get_errno(socket(domain, type, protocol));
3143     if (ret >= 0) {
3144         ret = sock_flags_fixup(ret, target_type);
3145         if (type == SOCK_PACKET) {
3146             /* Manage an obsolete case :
3147              * if socket type is SOCK_PACKET, bind by name
3148              */
3149             fd_trans_register(ret, &target_packet_trans);
3150         } else if (domain == PF_NETLINK) {
3151             switch (protocol) {
3152 #ifdef CONFIG_RTNETLINK
3153             case NETLINK_ROUTE:
3154                 fd_trans_register(ret, &target_netlink_route_trans);
3155                 break;
3156 #endif
3157             case NETLINK_KOBJECT_UEVENT:
3158                 /* nothing to do: messages are strings */
3159                 break;
3160             case NETLINK_AUDIT:
3161                 fd_trans_register(ret, &target_netlink_audit_trans);
3162                 break;
3163             default:
3164                 g_assert_not_reached();
3165             }
3166         }
3167     }
3168     return ret;
3169 }
3170 
3171 /* do_bind() Must return target values and target errnos. */
3172 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3173                         socklen_t addrlen)
3174 {
3175     void *addr;
3176     abi_long ret;
3177 
3178     if ((int)addrlen < 0) {
3179         return -TARGET_EINVAL;
3180     }
3181 
3182     addr = alloca(addrlen+1);
3183 
3184     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3185     if (ret)
3186         return ret;
3187 
3188     return get_errno(bind(sockfd, addr, addrlen));
3189 }
3190 
3191 /* do_connect() Must return target values and target errnos. */
3192 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3193                            socklen_t addrlen)
3194 {
3195     void *addr;
3196     abi_long ret;
3197 
3198     if ((int)addrlen < 0) {
3199         return -TARGET_EINVAL;
3200     }
3201 
3202     addr = alloca(addrlen+1);
3203 
3204     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3205     if (ret)
3206         return ret;
3207 
3208     return get_errno(safe_connect(sockfd, addr, addrlen));
3209 }
3210 
3211 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3212 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3213                                       int flags, int send)
3214 {
3215     abi_long ret, len;
3216     struct msghdr msg;
3217     abi_ulong count;
3218     struct iovec *vec;
3219     abi_ulong target_vec;
3220 
3221     if (msgp->msg_name) {
3222         msg.msg_namelen = tswap32(msgp->msg_namelen);
3223         msg.msg_name = alloca(msg.msg_namelen+1);
3224         ret = target_to_host_sockaddr(fd, msg.msg_name,
3225                                       tswapal(msgp->msg_name),
3226                                       msg.msg_namelen);
3227         if (ret == -TARGET_EFAULT) {
3228             /* For connected sockets msg_name and msg_namelen must
3229              * be ignored, so returning EFAULT immediately is wrong.
3230              * Instead, pass a bad msg_name to the host kernel, and
3231              * let it decide whether to return EFAULT or not.
3232              */
3233             msg.msg_name = (void *)-1;
3234         } else if (ret) {
3235             goto out2;
3236         }
3237     } else {
3238         msg.msg_name = NULL;
3239         msg.msg_namelen = 0;
3240     }
3241     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3242     msg.msg_control = alloca(msg.msg_controllen);
3243     memset(msg.msg_control, 0, msg.msg_controllen);
3244 
3245     msg.msg_flags = tswap32(msgp->msg_flags);
3246 
3247     count = tswapal(msgp->msg_iovlen);
3248     target_vec = tswapal(msgp->msg_iov);
3249 
3250     if (count > IOV_MAX) {
3251         /* sendrcvmsg returns a different errno for this condition than
3252          * readv/writev, so we must catch it here before lock_iovec() does.
3253          */
3254         ret = -TARGET_EMSGSIZE;
3255         goto out2;
3256     }
3257 
3258     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3259                      target_vec, count, send);
3260     if (vec == NULL) {
3261         ret = -host_to_target_errno(errno);
3262         goto out2;
3263     }
3264     msg.msg_iovlen = count;
3265     msg.msg_iov = vec;
3266 
3267     if (send) {
3268         if (fd_trans_target_to_host_data(fd)) {
3269             void *host_msg;
3270 
3271             host_msg = g_malloc(msg.msg_iov->iov_len);
3272             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3273             ret = fd_trans_target_to_host_data(fd)(host_msg,
3274                                                    msg.msg_iov->iov_len);
3275             if (ret >= 0) {
3276                 msg.msg_iov->iov_base = host_msg;
3277                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3278             }
3279             g_free(host_msg);
3280         } else {
3281             ret = target_to_host_cmsg(&msg, msgp);
3282             if (ret == 0) {
3283                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3284             }
3285         }
3286     } else {
3287         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3288         if (!is_error(ret)) {
3289             len = ret;
3290             if (fd_trans_host_to_target_data(fd)) {
3291                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3292                                                MIN(msg.msg_iov->iov_len, len));
3293             } else {
3294                 ret = host_to_target_cmsg(msgp, &msg);
3295             }
3296             if (!is_error(ret)) {
3297                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3298                 msgp->msg_flags = tswap32(msg.msg_flags);
3299                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3300                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3301                                     msg.msg_name, msg.msg_namelen);
3302                     if (ret) {
3303                         goto out;
3304                     }
3305                 }
3306 
3307                 ret = len;
3308             }
3309         }
3310     }
3311 
3312 out:
3313     unlock_iovec(vec, target_vec, count, !send);
3314 out2:
3315     return ret;
3316 }
3317 
3318 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3319                                int flags, int send)
3320 {
3321     abi_long ret;
3322     struct target_msghdr *msgp;
3323 
3324     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3325                           msgp,
3326                           target_msg,
3327                           send ? 1 : 0)) {
3328         return -TARGET_EFAULT;
3329     }
3330     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3331     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3332     return ret;
3333 }
3334 
3335 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3336  * so it might not have this *mmsg-specific flag either.
3337  */
3338 #ifndef MSG_WAITFORONE
3339 #define MSG_WAITFORONE 0x10000
3340 #endif
3341 
3342 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3343                                 unsigned int vlen, unsigned int flags,
3344                                 int send)
3345 {
3346     struct target_mmsghdr *mmsgp;
3347     abi_long ret = 0;
3348     int i;
3349 
3350     if (vlen > UIO_MAXIOV) {
3351         vlen = UIO_MAXIOV;
3352     }
3353 
3354     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3355     if (!mmsgp) {
3356         return -TARGET_EFAULT;
3357     }
3358 
3359     for (i = 0; i < vlen; i++) {
3360         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3361         if (is_error(ret)) {
3362             break;
3363         }
3364         mmsgp[i].msg_len = tswap32(ret);
3365         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3366         if (flags & MSG_WAITFORONE) {
3367             flags |= MSG_DONTWAIT;
3368         }
3369     }
3370 
3371     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3372 
3373     /* Return number of datagrams sent if we sent any at all;
3374      * otherwise return the error.
3375      */
3376     if (i) {
3377         return i;
3378     }
3379     return ret;
3380 }
3381 
3382 /* do_accept4() Must return target values and target errnos. */
3383 static abi_long do_accept4(int fd, abi_ulong target_addr,
3384                            abi_ulong target_addrlen_addr, int flags)
3385 {
3386     socklen_t addrlen, ret_addrlen;
3387     void *addr;
3388     abi_long ret;
3389     int host_flags;
3390 
3391     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3392 
3393     if (target_addr == 0) {
3394         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3395     }
3396 
3397     /* linux returns EFAULT if addrlen pointer is invalid */
3398     if (get_user_u32(addrlen, target_addrlen_addr))
3399         return -TARGET_EFAULT;
3400 
3401     if ((int)addrlen < 0) {
3402         return -TARGET_EINVAL;
3403     }
3404 
3405     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3406         return -TARGET_EFAULT;
3407     }
3408 
3409     addr = alloca(addrlen);
3410 
3411     ret_addrlen = addrlen;
3412     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3413     if (!is_error(ret)) {
3414         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3415         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3416             ret = -TARGET_EFAULT;
3417         }
3418     }
3419     return ret;
3420 }
3421 
3422 /* do_getpeername() Must return target values and target errnos. */
3423 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3424                                abi_ulong target_addrlen_addr)
3425 {
3426     socklen_t addrlen, ret_addrlen;
3427     void *addr;
3428     abi_long ret;
3429 
3430     if (get_user_u32(addrlen, target_addrlen_addr))
3431         return -TARGET_EFAULT;
3432 
3433     if ((int)addrlen < 0) {
3434         return -TARGET_EINVAL;
3435     }
3436 
3437     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3438         return -TARGET_EFAULT;
3439     }
3440 
3441     addr = alloca(addrlen);
3442 
3443     ret_addrlen = addrlen;
3444     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3445     if (!is_error(ret)) {
3446         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3447         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3448             ret = -TARGET_EFAULT;
3449         }
3450     }
3451     return ret;
3452 }
3453 
3454 /* do_getsockname() Must return target values and target errnos. */
3455 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3456                                abi_ulong target_addrlen_addr)
3457 {
3458     socklen_t addrlen, ret_addrlen;
3459     void *addr;
3460     abi_long ret;
3461 
3462     if (get_user_u32(addrlen, target_addrlen_addr))
3463         return -TARGET_EFAULT;
3464 
3465     if ((int)addrlen < 0) {
3466         return -TARGET_EINVAL;
3467     }
3468 
3469     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3470         return -TARGET_EFAULT;
3471     }
3472 
3473     addr = alloca(addrlen);
3474 
3475     ret_addrlen = addrlen;
3476     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3477     if (!is_error(ret)) {
3478         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3479         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3480             ret = -TARGET_EFAULT;
3481         }
3482     }
3483     return ret;
3484 }
3485 
3486 /* do_socketpair() Must return target values and target errnos. */
3487 static abi_long do_socketpair(int domain, int type, int protocol,
3488                               abi_ulong target_tab_addr)
3489 {
3490     int tab[2];
3491     abi_long ret;
3492 
3493     target_to_host_sock_type(&type);
3494 
3495     ret = get_errno(socketpair(domain, type, protocol, tab));
3496     if (!is_error(ret)) {
3497         if (put_user_s32(tab[0], target_tab_addr)
3498             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3499             ret = -TARGET_EFAULT;
3500     }
3501     return ret;
3502 }
3503 
3504 /* do_sendto() Must return target values and target errnos. */
3505 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3506                           abi_ulong target_addr, socklen_t addrlen)
3507 {
3508     void *addr;
3509     void *host_msg;
3510     void *copy_msg = NULL;
3511     abi_long ret;
3512 
3513     if ((int)addrlen < 0) {
3514         return -TARGET_EINVAL;
3515     }
3516 
3517     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3518     if (!host_msg)
3519         return -TARGET_EFAULT;
3520     if (fd_trans_target_to_host_data(fd)) {
3521         copy_msg = host_msg;
3522         host_msg = g_malloc(len);
3523         memcpy(host_msg, copy_msg, len);
3524         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3525         if (ret < 0) {
3526             goto fail;
3527         }
3528     }
3529     if (target_addr) {
3530         addr = alloca(addrlen+1);
3531         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3532         if (ret) {
3533             goto fail;
3534         }
3535         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3536     } else {
3537         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3538     }
3539 fail:
3540     if (copy_msg) {
3541         g_free(host_msg);
3542         host_msg = copy_msg;
3543     }
3544     unlock_user(host_msg, msg, 0);
3545     return ret;
3546 }
3547 
3548 /* do_recvfrom() Must return target values and target errnos. */
3549 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3550                             abi_ulong target_addr,
3551                             abi_ulong target_addrlen)
3552 {
3553     socklen_t addrlen, ret_addrlen;
3554     void *addr;
3555     void *host_msg;
3556     abi_long ret;
3557 
3558     if (!msg) {
3559         host_msg = NULL;
3560     } else {
3561         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3562         if (!host_msg) {
3563             return -TARGET_EFAULT;
3564         }
3565     }
3566     if (target_addr) {
3567         if (get_user_u32(addrlen, target_addrlen)) {
3568             ret = -TARGET_EFAULT;
3569             goto fail;
3570         }
3571         if ((int)addrlen < 0) {
3572             ret = -TARGET_EINVAL;
3573             goto fail;
3574         }
3575         addr = alloca(addrlen);
3576         ret_addrlen = addrlen;
3577         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3578                                       addr, &ret_addrlen));
3579     } else {
3580         addr = NULL; /* To keep compiler quiet.  */
3581         addrlen = 0; /* To keep compiler quiet.  */
3582         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3583     }
3584     if (!is_error(ret)) {
3585         if (fd_trans_host_to_target_data(fd)) {
3586             abi_long trans;
3587             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3588             if (is_error(trans)) {
3589                 ret = trans;
3590                 goto fail;
3591             }
3592         }
3593         if (target_addr) {
3594             host_to_target_sockaddr(target_addr, addr,
3595                                     MIN(addrlen, ret_addrlen));
3596             if (put_user_u32(ret_addrlen, target_addrlen)) {
3597                 ret = -TARGET_EFAULT;
3598                 goto fail;
3599             }
3600         }
3601         unlock_user(host_msg, msg, len);
3602     } else {
3603 fail:
3604         unlock_user(host_msg, msg, 0);
3605     }
3606     return ret;
3607 }
3608 
3609 #ifdef TARGET_NR_socketcall
3610 /* do_socketcall() must return target values and target errnos. */
3611 static abi_long do_socketcall(int num, abi_ulong vptr)
3612 {
3613     static const unsigned nargs[] = { /* number of arguments per operation */
3614         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3615         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3616         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3617         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3618         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3619         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3620         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3621         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3622         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3623         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3624         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3625         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3626         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3627         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3628         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3629         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3630         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3631         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3632         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3633         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3634     };
3635     abi_long a[6]; /* max 6 args */
3636     unsigned i;
3637 
3638     /* check the range of the first argument num */
3639     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3640     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3641         return -TARGET_EINVAL;
3642     }
3643     /* ensure we have space for args */
3644     if (nargs[num] > ARRAY_SIZE(a)) {
3645         return -TARGET_EINVAL;
3646     }
3647     /* collect the arguments in a[] according to nargs[] */
3648     for (i = 0; i < nargs[num]; ++i) {
3649         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3650             return -TARGET_EFAULT;
3651         }
3652     }
3653     /* now when we have the args, invoke the appropriate underlying function */
3654     switch (num) {
3655     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3656         return do_socket(a[0], a[1], a[2]);
3657     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3658         return do_bind(a[0], a[1], a[2]);
3659     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3660         return do_connect(a[0], a[1], a[2]);
3661     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3662         return get_errno(listen(a[0], a[1]));
3663     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3664         return do_accept4(a[0], a[1], a[2], 0);
3665     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3666         return do_getsockname(a[0], a[1], a[2]);
3667     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3668         return do_getpeername(a[0], a[1], a[2]);
3669     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3670         return do_socketpair(a[0], a[1], a[2], a[3]);
3671     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3672         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3673     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3674         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3675     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3676         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3677     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3678         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3679     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3680         return get_errno(shutdown(a[0], a[1]));
3681     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3682         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3683     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3684         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3685     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3686         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3687     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3688         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3689     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3690         return do_accept4(a[0], a[1], a[2], a[3]);
3691     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3692         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3693     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3694         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3695     default:
3696         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3697         return -TARGET_EINVAL;
3698     }
3699 }
3700 #endif
3701 
3702 #define N_SHM_REGIONS	32
3703 
3704 static struct shm_region {
3705     abi_ulong start;
3706     abi_ulong size;
3707     bool in_use;
3708 } shm_regions[N_SHM_REGIONS];
3709 
3710 #ifndef TARGET_SEMID64_DS
3711 /* asm-generic version of this struct */
3712 struct target_semid64_ds
3713 {
3714   struct target_ipc_perm sem_perm;
3715   abi_ulong sem_otime;
3716 #if TARGET_ABI_BITS == 32
3717   abi_ulong __unused1;
3718 #endif
3719   abi_ulong sem_ctime;
3720 #if TARGET_ABI_BITS == 32
3721   abi_ulong __unused2;
3722 #endif
3723   abi_ulong sem_nsems;
3724   abi_ulong __unused3;
3725   abi_ulong __unused4;
3726 };
3727 #endif
3728 
3729 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3730                                                abi_ulong target_addr)
3731 {
3732     struct target_ipc_perm *target_ip;
3733     struct target_semid64_ds *target_sd;
3734 
3735     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3736         return -TARGET_EFAULT;
3737     target_ip = &(target_sd->sem_perm);
3738     host_ip->__key = tswap32(target_ip->__key);
3739     host_ip->uid = tswap32(target_ip->uid);
3740     host_ip->gid = tswap32(target_ip->gid);
3741     host_ip->cuid = tswap32(target_ip->cuid);
3742     host_ip->cgid = tswap32(target_ip->cgid);
3743 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3744     host_ip->mode = tswap32(target_ip->mode);
3745 #else
3746     host_ip->mode = tswap16(target_ip->mode);
3747 #endif
3748 #if defined(TARGET_PPC)
3749     host_ip->__seq = tswap32(target_ip->__seq);
3750 #else
3751     host_ip->__seq = tswap16(target_ip->__seq);
3752 #endif
3753     unlock_user_struct(target_sd, target_addr, 0);
3754     return 0;
3755 }
3756 
3757 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3758                                                struct ipc_perm *host_ip)
3759 {
3760     struct target_ipc_perm *target_ip;
3761     struct target_semid64_ds *target_sd;
3762 
3763     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3764         return -TARGET_EFAULT;
3765     target_ip = &(target_sd->sem_perm);
3766     target_ip->__key = tswap32(host_ip->__key);
3767     target_ip->uid = tswap32(host_ip->uid);
3768     target_ip->gid = tswap32(host_ip->gid);
3769     target_ip->cuid = tswap32(host_ip->cuid);
3770     target_ip->cgid = tswap32(host_ip->cgid);
3771 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3772     target_ip->mode = tswap32(host_ip->mode);
3773 #else
3774     target_ip->mode = tswap16(host_ip->mode);
3775 #endif
3776 #if defined(TARGET_PPC)
3777     target_ip->__seq = tswap32(host_ip->__seq);
3778 #else
3779     target_ip->__seq = tswap16(host_ip->__seq);
3780 #endif
3781     unlock_user_struct(target_sd, target_addr, 1);
3782     return 0;
3783 }
3784 
3785 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3786                                                abi_ulong target_addr)
3787 {
3788     struct target_semid64_ds *target_sd;
3789 
3790     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3791         return -TARGET_EFAULT;
3792     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3793         return -TARGET_EFAULT;
3794     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3795     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3796     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3797     unlock_user_struct(target_sd, target_addr, 0);
3798     return 0;
3799 }
3800 
3801 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3802                                                struct semid_ds *host_sd)
3803 {
3804     struct target_semid64_ds *target_sd;
3805 
3806     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3807         return -TARGET_EFAULT;
3808     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3809         return -TARGET_EFAULT;
3810     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3811     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3812     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3813     unlock_user_struct(target_sd, target_addr, 1);
3814     return 0;
3815 }
3816 
3817 struct target_seminfo {
3818     int semmap;
3819     int semmni;
3820     int semmns;
3821     int semmnu;
3822     int semmsl;
3823     int semopm;
3824     int semume;
3825     int semusz;
3826     int semvmx;
3827     int semaem;
3828 };
3829 
3830 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3831                                               struct seminfo *host_seminfo)
3832 {
3833     struct target_seminfo *target_seminfo;
3834     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3835         return -TARGET_EFAULT;
3836     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3837     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3838     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3839     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3840     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3841     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3842     __put_user(host_seminfo->semume, &target_seminfo->semume);
3843     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3844     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3845     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3846     unlock_user_struct(target_seminfo, target_addr, 1);
3847     return 0;
3848 }
3849 
3850 union semun {
3851 	int val;
3852 	struct semid_ds *buf;
3853 	unsigned short *array;
3854 	struct seminfo *__buf;
3855 };
3856 
3857 union target_semun {
3858 	int val;
3859 	abi_ulong buf;
3860 	abi_ulong array;
3861 	abi_ulong __buf;
3862 };
3863 
3864 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3865                                                abi_ulong target_addr)
3866 {
3867     int nsems;
3868     unsigned short *array;
3869     union semun semun;
3870     struct semid_ds semid_ds;
3871     int i, ret;
3872 
3873     semun.buf = &semid_ds;
3874 
3875     ret = semctl(semid, 0, IPC_STAT, semun);
3876     if (ret == -1)
3877         return get_errno(ret);
3878 
3879     nsems = semid_ds.sem_nsems;
3880 
3881     *host_array = g_try_new(unsigned short, nsems);
3882     if (!*host_array) {
3883         return -TARGET_ENOMEM;
3884     }
3885     array = lock_user(VERIFY_READ, target_addr,
3886                       nsems*sizeof(unsigned short), 1);
3887     if (!array) {
3888         g_free(*host_array);
3889         return -TARGET_EFAULT;
3890     }
3891 
3892     for(i=0; i<nsems; i++) {
3893         __get_user((*host_array)[i], &array[i]);
3894     }
3895     unlock_user(array, target_addr, 0);
3896 
3897     return 0;
3898 }
3899 
3900 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3901                                                unsigned short **host_array)
3902 {
3903     int nsems;
3904     unsigned short *array;
3905     union semun semun;
3906     struct semid_ds semid_ds;
3907     int i, ret;
3908 
3909     semun.buf = &semid_ds;
3910 
3911     ret = semctl(semid, 0, IPC_STAT, semun);
3912     if (ret == -1)
3913         return get_errno(ret);
3914 
3915     nsems = semid_ds.sem_nsems;
3916 
3917     array = lock_user(VERIFY_WRITE, target_addr,
3918                       nsems*sizeof(unsigned short), 0);
3919     if (!array)
3920         return -TARGET_EFAULT;
3921 
3922     for(i=0; i<nsems; i++) {
3923         __put_user((*host_array)[i], &array[i]);
3924     }
3925     g_free(*host_array);
3926     unlock_user(array, target_addr, 1);
3927 
3928     return 0;
3929 }
3930 
3931 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3932                                  abi_ulong target_arg)
3933 {
3934     union target_semun target_su = { .buf = target_arg };
3935     union semun arg;
3936     struct semid_ds dsarg;
3937     unsigned short *array = NULL;
3938     struct seminfo seminfo;
3939     abi_long ret = -TARGET_EINVAL;
3940     abi_long err;
3941     cmd &= 0xff;
3942 
3943     switch( cmd ) {
3944 	case GETVAL:
3945 	case SETVAL:
3946             /* In 64 bit cross-endian situations, we will erroneously pick up
3947              * the wrong half of the union for the "val" element.  To rectify
3948              * this, the entire 8-byte structure is byteswapped, followed by
3949 	     * a swap of the 4 byte val field. In other cases, the data is
3950 	     * already in proper host byte order. */
3951 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3952 		target_su.buf = tswapal(target_su.buf);
3953 		arg.val = tswap32(target_su.val);
3954 	    } else {
3955 		arg.val = target_su.val;
3956 	    }
3957             ret = get_errno(semctl(semid, semnum, cmd, arg));
3958             break;
3959 	case GETALL:
3960 	case SETALL:
3961             err = target_to_host_semarray(semid, &array, target_su.array);
3962             if (err)
3963                 return err;
3964             arg.array = array;
3965             ret = get_errno(semctl(semid, semnum, cmd, arg));
3966             err = host_to_target_semarray(semid, target_su.array, &array);
3967             if (err)
3968                 return err;
3969             break;
3970 	case IPC_STAT:
3971 	case IPC_SET:
3972 	case SEM_STAT:
3973             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3974             if (err)
3975                 return err;
3976             arg.buf = &dsarg;
3977             ret = get_errno(semctl(semid, semnum, cmd, arg));
3978             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3979             if (err)
3980                 return err;
3981             break;
3982 	case IPC_INFO:
3983 	case SEM_INFO:
3984             arg.__buf = &seminfo;
3985             ret = get_errno(semctl(semid, semnum, cmd, arg));
3986             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3987             if (err)
3988                 return err;
3989             break;
3990 	case IPC_RMID:
3991 	case GETPID:
3992 	case GETNCNT:
3993 	case GETZCNT:
3994             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3995             break;
3996     }
3997 
3998     return ret;
3999 }
4000 
4001 struct target_sembuf {
4002     unsigned short sem_num;
4003     short sem_op;
4004     short sem_flg;
4005 };
4006 
4007 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4008                                              abi_ulong target_addr,
4009                                              unsigned nsops)
4010 {
4011     struct target_sembuf *target_sembuf;
4012     int i;
4013 
4014     target_sembuf = lock_user(VERIFY_READ, target_addr,
4015                               nsops*sizeof(struct target_sembuf), 1);
4016     if (!target_sembuf)
4017         return -TARGET_EFAULT;
4018 
4019     for(i=0; i<nsops; i++) {
4020         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4021         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4022         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4023     }
4024 
4025     unlock_user(target_sembuf, target_addr, 0);
4026 
4027     return 0;
4028 }
4029 
4030 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4031     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4032 
4033 /*
4034  * This macro is required to handle the s390 variants, which passes the
4035  * arguments in a different order than default.
4036  */
4037 #ifdef __s390x__
4038 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4039   (__nsops), (__timeout), (__sops)
4040 #else
4041 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4042   (__nsops), 0, (__sops), (__timeout)
4043 #endif
4044 
4045 static inline abi_long do_semtimedop(int semid,
4046                                      abi_long ptr,
4047                                      unsigned nsops,
4048                                      abi_long timeout, bool time64)
4049 {
4050     struct sembuf *sops;
4051     struct timespec ts, *pts = NULL;
4052     abi_long ret;
4053 
4054     if (timeout) {
4055         pts = &ts;
4056         if (time64) {
4057             if (target_to_host_timespec64(pts, timeout)) {
4058                 return -TARGET_EFAULT;
4059             }
4060         } else {
4061             if (target_to_host_timespec(pts, timeout)) {
4062                 return -TARGET_EFAULT;
4063             }
4064         }
4065     }
4066 
4067     if (nsops > TARGET_SEMOPM) {
4068         return -TARGET_E2BIG;
4069     }
4070 
4071     sops = g_new(struct sembuf, nsops);
4072 
4073     if (target_to_host_sembuf(sops, ptr, nsops)) {
4074         g_free(sops);
4075         return -TARGET_EFAULT;
4076     }
4077 
4078     ret = -TARGET_ENOSYS;
4079 #ifdef __NR_semtimedop
4080     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4081 #endif
4082 #ifdef __NR_ipc
4083     if (ret == -TARGET_ENOSYS) {
4084         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4085                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4086     }
4087 #endif
4088     g_free(sops);
4089     return ret;
4090 }
4091 #endif
4092 
4093 struct target_msqid_ds
4094 {
4095     struct target_ipc_perm msg_perm;
4096     abi_ulong msg_stime;
4097 #if TARGET_ABI_BITS == 32
4098     abi_ulong __unused1;
4099 #endif
4100     abi_ulong msg_rtime;
4101 #if TARGET_ABI_BITS == 32
4102     abi_ulong __unused2;
4103 #endif
4104     abi_ulong msg_ctime;
4105 #if TARGET_ABI_BITS == 32
4106     abi_ulong __unused3;
4107 #endif
4108     abi_ulong __msg_cbytes;
4109     abi_ulong msg_qnum;
4110     abi_ulong msg_qbytes;
4111     abi_ulong msg_lspid;
4112     abi_ulong msg_lrpid;
4113     abi_ulong __unused4;
4114     abi_ulong __unused5;
4115 };
4116 
4117 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4118                                                abi_ulong target_addr)
4119 {
4120     struct target_msqid_ds *target_md;
4121 
4122     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4123         return -TARGET_EFAULT;
4124     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4125         return -TARGET_EFAULT;
4126     host_md->msg_stime = tswapal(target_md->msg_stime);
4127     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4128     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4129     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4130     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4131     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4132     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4133     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4134     unlock_user_struct(target_md, target_addr, 0);
4135     return 0;
4136 }
4137 
4138 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4139                                                struct msqid_ds *host_md)
4140 {
4141     struct target_msqid_ds *target_md;
4142 
4143     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4144         return -TARGET_EFAULT;
4145     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4146         return -TARGET_EFAULT;
4147     target_md->msg_stime = tswapal(host_md->msg_stime);
4148     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4149     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4150     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4151     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4152     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4153     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4154     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4155     unlock_user_struct(target_md, target_addr, 1);
4156     return 0;
4157 }
4158 
4159 struct target_msginfo {
4160     int msgpool;
4161     int msgmap;
4162     int msgmax;
4163     int msgmnb;
4164     int msgmni;
4165     int msgssz;
4166     int msgtql;
4167     unsigned short int msgseg;
4168 };
4169 
4170 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4171                                               struct msginfo *host_msginfo)
4172 {
4173     struct target_msginfo *target_msginfo;
4174     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4175         return -TARGET_EFAULT;
4176     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4177     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4178     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4179     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4180     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4181     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4182     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4183     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4184     unlock_user_struct(target_msginfo, target_addr, 1);
4185     return 0;
4186 }
4187 
4188 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4189 {
4190     struct msqid_ds dsarg;
4191     struct msginfo msginfo;
4192     abi_long ret = -TARGET_EINVAL;
4193 
4194     cmd &= 0xff;
4195 
4196     switch (cmd) {
4197     case IPC_STAT:
4198     case IPC_SET:
4199     case MSG_STAT:
4200         if (target_to_host_msqid_ds(&dsarg,ptr))
4201             return -TARGET_EFAULT;
4202         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4203         if (host_to_target_msqid_ds(ptr,&dsarg))
4204             return -TARGET_EFAULT;
4205         break;
4206     case IPC_RMID:
4207         ret = get_errno(msgctl(msgid, cmd, NULL));
4208         break;
4209     case IPC_INFO:
4210     case MSG_INFO:
4211         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4212         if (host_to_target_msginfo(ptr, &msginfo))
4213             return -TARGET_EFAULT;
4214         break;
4215     }
4216 
4217     return ret;
4218 }
4219 
4220 struct target_msgbuf {
4221     abi_long mtype;
4222     char	mtext[1];
4223 };
4224 
4225 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4226                                  ssize_t msgsz, int msgflg)
4227 {
4228     struct target_msgbuf *target_mb;
4229     struct msgbuf *host_mb;
4230     abi_long ret = 0;
4231 
4232     if (msgsz < 0) {
4233         return -TARGET_EINVAL;
4234     }
4235 
4236     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4237         return -TARGET_EFAULT;
4238     host_mb = g_try_malloc(msgsz + sizeof(long));
4239     if (!host_mb) {
4240         unlock_user_struct(target_mb, msgp, 0);
4241         return -TARGET_ENOMEM;
4242     }
4243     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4244     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4245     ret = -TARGET_ENOSYS;
4246 #ifdef __NR_msgsnd
4247     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4248 #endif
4249 #ifdef __NR_ipc
4250     if (ret == -TARGET_ENOSYS) {
4251 #ifdef __s390x__
4252         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4253                                  host_mb));
4254 #else
4255         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4256                                  host_mb, 0));
4257 #endif
4258     }
4259 #endif
4260     g_free(host_mb);
4261     unlock_user_struct(target_mb, msgp, 0);
4262 
4263     return ret;
4264 }
4265 
4266 #ifdef __NR_ipc
4267 #if defined(__sparc__)
4268 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4269 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4270 #elif defined(__s390x__)
4271 /* The s390 sys_ipc variant has only five parameters.  */
4272 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4273     ((long int[]){(long int)__msgp, __msgtyp})
4274 #else
4275 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4276     ((long int[]){(long int)__msgp, __msgtyp}), 0
4277 #endif
4278 #endif
4279 
4280 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4281                                  ssize_t msgsz, abi_long msgtyp,
4282                                  int msgflg)
4283 {
4284     struct target_msgbuf *target_mb;
4285     char *target_mtext;
4286     struct msgbuf *host_mb;
4287     abi_long ret = 0;
4288 
4289     if (msgsz < 0) {
4290         return -TARGET_EINVAL;
4291     }
4292 
4293     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4294         return -TARGET_EFAULT;
4295 
4296     host_mb = g_try_malloc(msgsz + sizeof(long));
4297     if (!host_mb) {
4298         ret = -TARGET_ENOMEM;
4299         goto end;
4300     }
4301     ret = -TARGET_ENOSYS;
4302 #ifdef __NR_msgrcv
4303     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4304 #endif
4305 #ifdef __NR_ipc
4306     if (ret == -TARGET_ENOSYS) {
4307         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4308                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4309     }
4310 #endif
4311 
4312     if (ret > 0) {
4313         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4314         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4315         if (!target_mtext) {
4316             ret = -TARGET_EFAULT;
4317             goto end;
4318         }
4319         memcpy(target_mb->mtext, host_mb->mtext, ret);
4320         unlock_user(target_mtext, target_mtext_addr, ret);
4321     }
4322 
4323     target_mb->mtype = tswapal(host_mb->mtype);
4324 
4325 end:
4326     if (target_mb)
4327         unlock_user_struct(target_mb, msgp, 1);
4328     g_free(host_mb);
4329     return ret;
4330 }
4331 
4332 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4333                                                abi_ulong target_addr)
4334 {
4335     struct target_shmid_ds *target_sd;
4336 
4337     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4338         return -TARGET_EFAULT;
4339     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4340         return -TARGET_EFAULT;
4341     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4342     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4343     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4344     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4345     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4346     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4347     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4348     unlock_user_struct(target_sd, target_addr, 0);
4349     return 0;
4350 }
4351 
4352 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4353                                                struct shmid_ds *host_sd)
4354 {
4355     struct target_shmid_ds *target_sd;
4356 
4357     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4358         return -TARGET_EFAULT;
4359     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4360         return -TARGET_EFAULT;
4361     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4362     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4363     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4364     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4365     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4366     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4367     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4368     unlock_user_struct(target_sd, target_addr, 1);
4369     return 0;
4370 }
4371 
4372 struct  target_shminfo {
4373     abi_ulong shmmax;
4374     abi_ulong shmmin;
4375     abi_ulong shmmni;
4376     abi_ulong shmseg;
4377     abi_ulong shmall;
4378 };
4379 
4380 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4381                                               struct shminfo *host_shminfo)
4382 {
4383     struct target_shminfo *target_shminfo;
4384     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4385         return -TARGET_EFAULT;
4386     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4387     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4388     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4389     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4390     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4391     unlock_user_struct(target_shminfo, target_addr, 1);
4392     return 0;
4393 }
4394 
4395 struct target_shm_info {
4396     int used_ids;
4397     abi_ulong shm_tot;
4398     abi_ulong shm_rss;
4399     abi_ulong shm_swp;
4400     abi_ulong swap_attempts;
4401     abi_ulong swap_successes;
4402 };
4403 
4404 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4405                                                struct shm_info *host_shm_info)
4406 {
4407     struct target_shm_info *target_shm_info;
4408     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4409         return -TARGET_EFAULT;
4410     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4411     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4412     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4413     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4414     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4415     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4416     unlock_user_struct(target_shm_info, target_addr, 1);
4417     return 0;
4418 }
4419 
4420 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4421 {
4422     struct shmid_ds dsarg;
4423     struct shminfo shminfo;
4424     struct shm_info shm_info;
4425     abi_long ret = -TARGET_EINVAL;
4426 
4427     cmd &= 0xff;
4428 
4429     switch(cmd) {
4430     case IPC_STAT:
4431     case IPC_SET:
4432     case SHM_STAT:
4433         if (target_to_host_shmid_ds(&dsarg, buf))
4434             return -TARGET_EFAULT;
4435         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4436         if (host_to_target_shmid_ds(buf, &dsarg))
4437             return -TARGET_EFAULT;
4438         break;
4439     case IPC_INFO:
4440         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4441         if (host_to_target_shminfo(buf, &shminfo))
4442             return -TARGET_EFAULT;
4443         break;
4444     case SHM_INFO:
4445         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4446         if (host_to_target_shm_info(buf, &shm_info))
4447             return -TARGET_EFAULT;
4448         break;
4449     case IPC_RMID:
4450     case SHM_LOCK:
4451     case SHM_UNLOCK:
4452         ret = get_errno(shmctl(shmid, cmd, NULL));
4453         break;
4454     }
4455 
4456     return ret;
4457 }
4458 
4459 #ifndef TARGET_FORCE_SHMLBA
4460 /* For most architectures, SHMLBA is the same as the page size;
4461  * some architectures have larger values, in which case they should
4462  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4463  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4464  * and defining its own value for SHMLBA.
4465  *
4466  * The kernel also permits SHMLBA to be set by the architecture to a
4467  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4468  * this means that addresses are rounded to the large size if
4469  * SHM_RND is set but addresses not aligned to that size are not rejected
4470  * as long as they are at least page-aligned. Since the only architecture
4471  * which uses this is ia64 this code doesn't provide for that oddity.
4472  */
4473 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4474 {
4475     return TARGET_PAGE_SIZE;
4476 }
4477 #endif
4478 
4479 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4480                                  int shmid, abi_ulong shmaddr, int shmflg)
4481 {
4482     CPUState *cpu = env_cpu(cpu_env);
4483     abi_long raddr;
4484     void *host_raddr;
4485     struct shmid_ds shm_info;
4486     int i,ret;
4487     abi_ulong shmlba;
4488 
4489     /* shmat pointers are always untagged */
4490 
4491     /* find out the length of the shared memory segment */
4492     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4493     if (is_error(ret)) {
4494         /* can't get length, bail out */
4495         return ret;
4496     }
4497 
4498     shmlba = target_shmlba(cpu_env);
4499 
4500     if (shmaddr & (shmlba - 1)) {
4501         if (shmflg & SHM_RND) {
4502             shmaddr &= ~(shmlba - 1);
4503         } else {
4504             return -TARGET_EINVAL;
4505         }
4506     }
4507     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4508         return -TARGET_EINVAL;
4509     }
4510 
4511     mmap_lock();
4512 
4513     /*
4514      * We're mapping shared memory, so ensure we generate code for parallel
4515      * execution and flush old translations.  This will work up to the level
4516      * supported by the host -- anything that requires EXCP_ATOMIC will not
4517      * be atomic with respect to an external process.
4518      */
4519     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4520         cpu->tcg_cflags |= CF_PARALLEL;
4521         tb_flush(cpu);
4522     }
4523 
4524     if (shmaddr)
4525         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4526     else {
4527         abi_ulong mmap_start;
4528 
4529         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4530         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4531 
4532         if (mmap_start == -1) {
4533             errno = ENOMEM;
4534             host_raddr = (void *)-1;
4535         } else
4536             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4537                                shmflg | SHM_REMAP);
4538     }
4539 
4540     if (host_raddr == (void *)-1) {
4541         mmap_unlock();
4542         return get_errno((long)host_raddr);
4543     }
4544     raddr=h2g((unsigned long)host_raddr);
4545 
4546     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4547                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4548                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4549 
4550     for (i = 0; i < N_SHM_REGIONS; i++) {
4551         if (!shm_regions[i].in_use) {
4552             shm_regions[i].in_use = true;
4553             shm_regions[i].start = raddr;
4554             shm_regions[i].size = shm_info.shm_segsz;
4555             break;
4556         }
4557     }
4558 
4559     mmap_unlock();
4560     return raddr;
4561 
4562 }
4563 
4564 static inline abi_long do_shmdt(abi_ulong shmaddr)
4565 {
4566     int i;
4567     abi_long rv;
4568 
4569     /* shmdt pointers are always untagged */
4570 
4571     mmap_lock();
4572 
4573     for (i = 0; i < N_SHM_REGIONS; ++i) {
4574         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4575             shm_regions[i].in_use = false;
4576             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4577             break;
4578         }
4579     }
4580     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4581 
4582     mmap_unlock();
4583 
4584     return rv;
4585 }
4586 
4587 #ifdef TARGET_NR_ipc
4588 /* ??? This only works with linear mappings.  */
4589 /* do_ipc() must return target values and target errnos. */
4590 static abi_long do_ipc(CPUArchState *cpu_env,
4591                        unsigned int call, abi_long first,
4592                        abi_long second, abi_long third,
4593                        abi_long ptr, abi_long fifth)
4594 {
4595     int version;
4596     abi_long ret = 0;
4597 
4598     version = call >> 16;
4599     call &= 0xffff;
4600 
4601     switch (call) {
4602     case IPCOP_semop:
4603         ret = do_semtimedop(first, ptr, second, 0, false);
4604         break;
4605     case IPCOP_semtimedop:
4606     /*
4607      * The s390 sys_ipc variant has only five parameters instead of six
4608      * (as for default variant) and the only difference is the handling of
4609      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4610      * to a struct timespec where the generic variant uses fifth parameter.
4611      */
4612 #if defined(TARGET_S390X)
4613         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4614 #else
4615         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4616 #endif
4617         break;
4618 
4619     case IPCOP_semget:
4620         ret = get_errno(semget(first, second, third));
4621         break;
4622 
4623     case IPCOP_semctl: {
4624         /* The semun argument to semctl is passed by value, so dereference the
4625          * ptr argument. */
4626         abi_ulong atptr;
4627         get_user_ual(atptr, ptr);
4628         ret = do_semctl(first, second, third, atptr);
4629         break;
4630     }
4631 
4632     case IPCOP_msgget:
4633         ret = get_errno(msgget(first, second));
4634         break;
4635 
4636     case IPCOP_msgsnd:
4637         ret = do_msgsnd(first, ptr, second, third);
4638         break;
4639 
4640     case IPCOP_msgctl:
4641         ret = do_msgctl(first, second, ptr);
4642         break;
4643 
4644     case IPCOP_msgrcv:
4645         switch (version) {
4646         case 0:
4647             {
4648                 struct target_ipc_kludge {
4649                     abi_long msgp;
4650                     abi_long msgtyp;
4651                 } *tmp;
4652 
4653                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4654                     ret = -TARGET_EFAULT;
4655                     break;
4656                 }
4657 
4658                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4659 
4660                 unlock_user_struct(tmp, ptr, 0);
4661                 break;
4662             }
4663         default:
4664             ret = do_msgrcv(first, ptr, second, fifth, third);
4665         }
4666         break;
4667 
4668     case IPCOP_shmat:
4669         switch (version) {
4670         default:
4671         {
4672             abi_ulong raddr;
4673             raddr = do_shmat(cpu_env, first, ptr, second);
4674             if (is_error(raddr))
4675                 return get_errno(raddr);
4676             if (put_user_ual(raddr, third))
4677                 return -TARGET_EFAULT;
4678             break;
4679         }
4680         case 1:
4681             ret = -TARGET_EINVAL;
4682             break;
4683         }
4684 	break;
4685     case IPCOP_shmdt:
4686         ret = do_shmdt(ptr);
4687 	break;
4688 
4689     case IPCOP_shmget:
4690 	/* IPC_* flag values are the same on all linux platforms */
4691 	ret = get_errno(shmget(first, second, third));
4692 	break;
4693 
4694 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4695     case IPCOP_shmctl:
4696         ret = do_shmctl(first, second, ptr);
4697         break;
4698     default:
4699         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4700                       call, version);
4701 	ret = -TARGET_ENOSYS;
4702 	break;
4703     }
4704     return ret;
4705 }
4706 #endif
4707 
4708 /* kernel structure types definitions */
4709 
4710 #define STRUCT(name, ...) STRUCT_ ## name,
4711 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4712 enum {
4713 #include "syscall_types.h"
4714 STRUCT_MAX
4715 };
4716 #undef STRUCT
4717 #undef STRUCT_SPECIAL
4718 
4719 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4720 #define STRUCT_SPECIAL(name)
4721 #include "syscall_types.h"
4722 #undef STRUCT
4723 #undef STRUCT_SPECIAL
4724 
4725 #define MAX_STRUCT_SIZE 4096
4726 
4727 #ifdef CONFIG_FIEMAP
4728 /* So fiemap access checks don't overflow on 32 bit systems.
4729  * This is very slightly smaller than the limit imposed by
4730  * the underlying kernel.
4731  */
4732 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4733                             / sizeof(struct fiemap_extent))
4734 
4735 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4736                                        int fd, int cmd, abi_long arg)
4737 {
4738     /* The parameter for this ioctl is a struct fiemap followed
4739      * by an array of struct fiemap_extent whose size is set
4740      * in fiemap->fm_extent_count. The array is filled in by the
4741      * ioctl.
4742      */
4743     int target_size_in, target_size_out;
4744     struct fiemap *fm;
4745     const argtype *arg_type = ie->arg_type;
4746     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4747     void *argptr, *p;
4748     abi_long ret;
4749     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4750     uint32_t outbufsz;
4751     int free_fm = 0;
4752 
4753     assert(arg_type[0] == TYPE_PTR);
4754     assert(ie->access == IOC_RW);
4755     arg_type++;
4756     target_size_in = thunk_type_size(arg_type, 0);
4757     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4758     if (!argptr) {
4759         return -TARGET_EFAULT;
4760     }
4761     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4762     unlock_user(argptr, arg, 0);
4763     fm = (struct fiemap *)buf_temp;
4764     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4765         return -TARGET_EINVAL;
4766     }
4767 
4768     outbufsz = sizeof (*fm) +
4769         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4770 
4771     if (outbufsz > MAX_STRUCT_SIZE) {
4772         /* We can't fit all the extents into the fixed size buffer.
4773          * Allocate one that is large enough and use it instead.
4774          */
4775         fm = g_try_malloc(outbufsz);
4776         if (!fm) {
4777             return -TARGET_ENOMEM;
4778         }
4779         memcpy(fm, buf_temp, sizeof(struct fiemap));
4780         free_fm = 1;
4781     }
4782     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4783     if (!is_error(ret)) {
4784         target_size_out = target_size_in;
4785         /* An extent_count of 0 means we were only counting the extents
4786          * so there are no structs to copy
4787          */
4788         if (fm->fm_extent_count != 0) {
4789             target_size_out += fm->fm_mapped_extents * extent_size;
4790         }
4791         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4792         if (!argptr) {
4793             ret = -TARGET_EFAULT;
4794         } else {
4795             /* Convert the struct fiemap */
4796             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4797             if (fm->fm_extent_count != 0) {
4798                 p = argptr + target_size_in;
4799                 /* ...and then all the struct fiemap_extents */
4800                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4801                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4802                                   THUNK_TARGET);
4803                     p += extent_size;
4804                 }
4805             }
4806             unlock_user(argptr, arg, target_size_out);
4807         }
4808     }
4809     if (free_fm) {
4810         g_free(fm);
4811     }
4812     return ret;
4813 }
4814 #endif
4815 
4816 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4817                                 int fd, int cmd, abi_long arg)
4818 {
4819     const argtype *arg_type = ie->arg_type;
4820     int target_size;
4821     void *argptr;
4822     int ret;
4823     struct ifconf *host_ifconf;
4824     uint32_t outbufsz;
4825     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4826     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4827     int target_ifreq_size;
4828     int nb_ifreq;
4829     int free_buf = 0;
4830     int i;
4831     int target_ifc_len;
4832     abi_long target_ifc_buf;
4833     int host_ifc_len;
4834     char *host_ifc_buf;
4835 
4836     assert(arg_type[0] == TYPE_PTR);
4837     assert(ie->access == IOC_RW);
4838 
4839     arg_type++;
4840     target_size = thunk_type_size(arg_type, 0);
4841 
4842     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4843     if (!argptr)
4844         return -TARGET_EFAULT;
4845     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4846     unlock_user(argptr, arg, 0);
4847 
4848     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4849     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4850     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4851 
4852     if (target_ifc_buf != 0) {
4853         target_ifc_len = host_ifconf->ifc_len;
4854         nb_ifreq = target_ifc_len / target_ifreq_size;
4855         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4856 
4857         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4858         if (outbufsz > MAX_STRUCT_SIZE) {
4859             /*
4860              * We can't fit all the extents into the fixed size buffer.
4861              * Allocate one that is large enough and use it instead.
4862              */
4863             host_ifconf = malloc(outbufsz);
4864             if (!host_ifconf) {
4865                 return -TARGET_ENOMEM;
4866             }
4867             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4868             free_buf = 1;
4869         }
4870         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4871 
4872         host_ifconf->ifc_len = host_ifc_len;
4873     } else {
4874       host_ifc_buf = NULL;
4875     }
4876     host_ifconf->ifc_buf = host_ifc_buf;
4877 
4878     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4879     if (!is_error(ret)) {
4880 	/* convert host ifc_len to target ifc_len */
4881 
4882         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4883         target_ifc_len = nb_ifreq * target_ifreq_size;
4884         host_ifconf->ifc_len = target_ifc_len;
4885 
4886 	/* restore target ifc_buf */
4887 
4888         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4889 
4890 	/* copy struct ifconf to target user */
4891 
4892         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4893         if (!argptr)
4894             return -TARGET_EFAULT;
4895         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4896         unlock_user(argptr, arg, target_size);
4897 
4898         if (target_ifc_buf != 0) {
4899             /* copy ifreq[] to target user */
4900             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4901             for (i = 0; i < nb_ifreq ; i++) {
4902                 thunk_convert(argptr + i * target_ifreq_size,
4903                               host_ifc_buf + i * sizeof(struct ifreq),
4904                               ifreq_arg_type, THUNK_TARGET);
4905             }
4906             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4907         }
4908     }
4909 
4910     if (free_buf) {
4911         free(host_ifconf);
4912     }
4913 
4914     return ret;
4915 }
4916 
4917 #if defined(CONFIG_USBFS)
4918 #if HOST_LONG_BITS > 64
4919 #error USBDEVFS thunks do not support >64 bit hosts yet.
4920 #endif
4921 struct live_urb {
4922     uint64_t target_urb_adr;
4923     uint64_t target_buf_adr;
4924     char *target_buf_ptr;
4925     struct usbdevfs_urb host_urb;
4926 };
4927 
4928 static GHashTable *usbdevfs_urb_hashtable(void)
4929 {
4930     static GHashTable *urb_hashtable;
4931 
4932     if (!urb_hashtable) {
4933         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4934     }
4935     return urb_hashtable;
4936 }
4937 
4938 static void urb_hashtable_insert(struct live_urb *urb)
4939 {
4940     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4941     g_hash_table_insert(urb_hashtable, urb, urb);
4942 }
4943 
4944 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4945 {
4946     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4947     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4948 }
4949 
4950 static void urb_hashtable_remove(struct live_urb *urb)
4951 {
4952     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4953     g_hash_table_remove(urb_hashtable, urb);
4954 }
4955 
4956 static abi_long
4957 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4958                           int fd, int cmd, abi_long arg)
4959 {
4960     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4961     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4962     struct live_urb *lurb;
4963     void *argptr;
4964     uint64_t hurb;
4965     int target_size;
4966     uintptr_t target_urb_adr;
4967     abi_long ret;
4968 
4969     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4970 
4971     memset(buf_temp, 0, sizeof(uint64_t));
4972     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4973     if (is_error(ret)) {
4974         return ret;
4975     }
4976 
4977     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4978     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4979     if (!lurb->target_urb_adr) {
4980         return -TARGET_EFAULT;
4981     }
4982     urb_hashtable_remove(lurb);
4983     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4984         lurb->host_urb.buffer_length);
4985     lurb->target_buf_ptr = NULL;
4986 
4987     /* restore the guest buffer pointer */
4988     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4989 
4990     /* update the guest urb struct */
4991     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4992     if (!argptr) {
4993         g_free(lurb);
4994         return -TARGET_EFAULT;
4995     }
4996     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4997     unlock_user(argptr, lurb->target_urb_adr, target_size);
4998 
4999     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5000     /* write back the urb handle */
5001     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5002     if (!argptr) {
5003         g_free(lurb);
5004         return -TARGET_EFAULT;
5005     }
5006 
5007     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5008     target_urb_adr = lurb->target_urb_adr;
5009     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5010     unlock_user(argptr, arg, target_size);
5011 
5012     g_free(lurb);
5013     return ret;
5014 }
5015 
5016 static abi_long
5017 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5018                              uint8_t *buf_temp __attribute__((unused)),
5019                              int fd, int cmd, abi_long arg)
5020 {
5021     struct live_urb *lurb;
5022 
5023     /* map target address back to host URB with metadata. */
5024     lurb = urb_hashtable_lookup(arg);
5025     if (!lurb) {
5026         return -TARGET_EFAULT;
5027     }
5028     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5029 }
5030 
5031 static abi_long
5032 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5033                             int fd, int cmd, abi_long arg)
5034 {
5035     const argtype *arg_type = ie->arg_type;
5036     int target_size;
5037     abi_long ret;
5038     void *argptr;
5039     int rw_dir;
5040     struct live_urb *lurb;
5041 
5042     /*
5043      * each submitted URB needs to map to a unique ID for the
5044      * kernel, and that unique ID needs to be a pointer to
5045      * host memory.  hence, we need to malloc for each URB.
5046      * isochronous transfers have a variable length struct.
5047      */
5048     arg_type++;
5049     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5050 
5051     /* construct host copy of urb and metadata */
5052     lurb = g_try_malloc0(sizeof(struct live_urb));
5053     if (!lurb) {
5054         return -TARGET_ENOMEM;
5055     }
5056 
5057     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5058     if (!argptr) {
5059         g_free(lurb);
5060         return -TARGET_EFAULT;
5061     }
5062     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5063     unlock_user(argptr, arg, 0);
5064 
5065     lurb->target_urb_adr = arg;
5066     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5067 
5068     /* buffer space used depends on endpoint type so lock the entire buffer */
5069     /* control type urbs should check the buffer contents for true direction */
5070     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5071     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5072         lurb->host_urb.buffer_length, 1);
5073     if (lurb->target_buf_ptr == NULL) {
5074         g_free(lurb);
5075         return -TARGET_EFAULT;
5076     }
5077 
5078     /* update buffer pointer in host copy */
5079     lurb->host_urb.buffer = lurb->target_buf_ptr;
5080 
5081     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5082     if (is_error(ret)) {
5083         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5084         g_free(lurb);
5085     } else {
5086         urb_hashtable_insert(lurb);
5087     }
5088 
5089     return ret;
5090 }
5091 #endif /* CONFIG_USBFS */
5092 
5093 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5094                             int cmd, abi_long arg)
5095 {
5096     void *argptr;
5097     struct dm_ioctl *host_dm;
5098     abi_long guest_data;
5099     uint32_t guest_data_size;
5100     int target_size;
5101     const argtype *arg_type = ie->arg_type;
5102     abi_long ret;
5103     void *big_buf = NULL;
5104     char *host_data;
5105 
5106     arg_type++;
5107     target_size = thunk_type_size(arg_type, 0);
5108     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5109     if (!argptr) {
5110         ret = -TARGET_EFAULT;
5111         goto out;
5112     }
5113     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5114     unlock_user(argptr, arg, 0);
5115 
5116     /* buf_temp is too small, so fetch things into a bigger buffer */
5117     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5118     memcpy(big_buf, buf_temp, target_size);
5119     buf_temp = big_buf;
5120     host_dm = big_buf;
5121 
5122     guest_data = arg + host_dm->data_start;
5123     if ((guest_data - arg) < 0) {
5124         ret = -TARGET_EINVAL;
5125         goto out;
5126     }
5127     guest_data_size = host_dm->data_size - host_dm->data_start;
5128     host_data = (char*)host_dm + host_dm->data_start;
5129 
5130     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5131     if (!argptr) {
5132         ret = -TARGET_EFAULT;
5133         goto out;
5134     }
5135 
5136     switch (ie->host_cmd) {
5137     case DM_REMOVE_ALL:
5138     case DM_LIST_DEVICES:
5139     case DM_DEV_CREATE:
5140     case DM_DEV_REMOVE:
5141     case DM_DEV_SUSPEND:
5142     case DM_DEV_STATUS:
5143     case DM_DEV_WAIT:
5144     case DM_TABLE_STATUS:
5145     case DM_TABLE_CLEAR:
5146     case DM_TABLE_DEPS:
5147     case DM_LIST_VERSIONS:
5148         /* no input data */
5149         break;
5150     case DM_DEV_RENAME:
5151     case DM_DEV_SET_GEOMETRY:
5152         /* data contains only strings */
5153         memcpy(host_data, argptr, guest_data_size);
5154         break;
5155     case DM_TARGET_MSG:
5156         memcpy(host_data, argptr, guest_data_size);
5157         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5158         break;
5159     case DM_TABLE_LOAD:
5160     {
5161         void *gspec = argptr;
5162         void *cur_data = host_data;
5163         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5164         int spec_size = thunk_type_size(arg_type, 0);
5165         int i;
5166 
5167         for (i = 0; i < host_dm->target_count; i++) {
5168             struct dm_target_spec *spec = cur_data;
5169             uint32_t next;
5170             int slen;
5171 
5172             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5173             slen = strlen((char*)gspec + spec_size) + 1;
5174             next = spec->next;
5175             spec->next = sizeof(*spec) + slen;
5176             strcpy((char*)&spec[1], gspec + spec_size);
5177             gspec += next;
5178             cur_data += spec->next;
5179         }
5180         break;
5181     }
5182     default:
5183         ret = -TARGET_EINVAL;
5184         unlock_user(argptr, guest_data, 0);
5185         goto out;
5186     }
5187     unlock_user(argptr, guest_data, 0);
5188 
5189     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5190     if (!is_error(ret)) {
5191         guest_data = arg + host_dm->data_start;
5192         guest_data_size = host_dm->data_size - host_dm->data_start;
5193         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5194         switch (ie->host_cmd) {
5195         case DM_REMOVE_ALL:
5196         case DM_DEV_CREATE:
5197         case DM_DEV_REMOVE:
5198         case DM_DEV_RENAME:
5199         case DM_DEV_SUSPEND:
5200         case DM_DEV_STATUS:
5201         case DM_TABLE_LOAD:
5202         case DM_TABLE_CLEAR:
5203         case DM_TARGET_MSG:
5204         case DM_DEV_SET_GEOMETRY:
5205             /* no return data */
5206             break;
5207         case DM_LIST_DEVICES:
5208         {
5209             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5210             uint32_t remaining_data = guest_data_size;
5211             void *cur_data = argptr;
5212             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5213             int nl_size = 12; /* can't use thunk_size due to alignment */
5214 
5215             while (1) {
5216                 uint32_t next = nl->next;
5217                 if (next) {
5218                     nl->next = nl_size + (strlen(nl->name) + 1);
5219                 }
5220                 if (remaining_data < nl->next) {
5221                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5222                     break;
5223                 }
5224                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5225                 strcpy(cur_data + nl_size, nl->name);
5226                 cur_data += nl->next;
5227                 remaining_data -= nl->next;
5228                 if (!next) {
5229                     break;
5230                 }
5231                 nl = (void*)nl + next;
5232             }
5233             break;
5234         }
5235         case DM_DEV_WAIT:
5236         case DM_TABLE_STATUS:
5237         {
5238             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5239             void *cur_data = argptr;
5240             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5241             int spec_size = thunk_type_size(arg_type, 0);
5242             int i;
5243 
5244             for (i = 0; i < host_dm->target_count; i++) {
5245                 uint32_t next = spec->next;
5246                 int slen = strlen((char*)&spec[1]) + 1;
5247                 spec->next = (cur_data - argptr) + spec_size + slen;
5248                 if (guest_data_size < spec->next) {
5249                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5250                     break;
5251                 }
5252                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5253                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5254                 cur_data = argptr + spec->next;
5255                 spec = (void*)host_dm + host_dm->data_start + next;
5256             }
5257             break;
5258         }
5259         case DM_TABLE_DEPS:
5260         {
5261             void *hdata = (void*)host_dm + host_dm->data_start;
5262             int count = *(uint32_t*)hdata;
5263             uint64_t *hdev = hdata + 8;
5264             uint64_t *gdev = argptr + 8;
5265             int i;
5266 
5267             *(uint32_t*)argptr = tswap32(count);
5268             for (i = 0; i < count; i++) {
5269                 *gdev = tswap64(*hdev);
5270                 gdev++;
5271                 hdev++;
5272             }
5273             break;
5274         }
5275         case DM_LIST_VERSIONS:
5276         {
5277             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5278             uint32_t remaining_data = guest_data_size;
5279             void *cur_data = argptr;
5280             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5281             int vers_size = thunk_type_size(arg_type, 0);
5282 
5283             while (1) {
5284                 uint32_t next = vers->next;
5285                 if (next) {
5286                     vers->next = vers_size + (strlen(vers->name) + 1);
5287                 }
5288                 if (remaining_data < vers->next) {
5289                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5290                     break;
5291                 }
5292                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5293                 strcpy(cur_data + vers_size, vers->name);
5294                 cur_data += vers->next;
5295                 remaining_data -= vers->next;
5296                 if (!next) {
5297                     break;
5298                 }
5299                 vers = (void*)vers + next;
5300             }
5301             break;
5302         }
5303         default:
5304             unlock_user(argptr, guest_data, 0);
5305             ret = -TARGET_EINVAL;
5306             goto out;
5307         }
5308         unlock_user(argptr, guest_data, guest_data_size);
5309 
5310         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5311         if (!argptr) {
5312             ret = -TARGET_EFAULT;
5313             goto out;
5314         }
5315         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5316         unlock_user(argptr, arg, target_size);
5317     }
5318 out:
5319     g_free(big_buf);
5320     return ret;
5321 }
5322 
5323 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5324                                int cmd, abi_long arg)
5325 {
5326     void *argptr;
5327     int target_size;
5328     const argtype *arg_type = ie->arg_type;
5329     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5330     abi_long ret;
5331 
5332     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5333     struct blkpg_partition host_part;
5334 
5335     /* Read and convert blkpg */
5336     arg_type++;
5337     target_size = thunk_type_size(arg_type, 0);
5338     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5339     if (!argptr) {
5340         ret = -TARGET_EFAULT;
5341         goto out;
5342     }
5343     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5344     unlock_user(argptr, arg, 0);
5345 
5346     switch (host_blkpg->op) {
5347     case BLKPG_ADD_PARTITION:
5348     case BLKPG_DEL_PARTITION:
5349         /* payload is struct blkpg_partition */
5350         break;
5351     default:
5352         /* Unknown opcode */
5353         ret = -TARGET_EINVAL;
5354         goto out;
5355     }
5356 
5357     /* Read and convert blkpg->data */
5358     arg = (abi_long)(uintptr_t)host_blkpg->data;
5359     target_size = thunk_type_size(part_arg_type, 0);
5360     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5361     if (!argptr) {
5362         ret = -TARGET_EFAULT;
5363         goto out;
5364     }
5365     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5366     unlock_user(argptr, arg, 0);
5367 
5368     /* Swizzle the data pointer to our local copy and call! */
5369     host_blkpg->data = &host_part;
5370     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5371 
5372 out:
5373     return ret;
5374 }
5375 
5376 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5377                                 int fd, int cmd, abi_long arg)
5378 {
5379     const argtype *arg_type = ie->arg_type;
5380     const StructEntry *se;
5381     const argtype *field_types;
5382     const int *dst_offsets, *src_offsets;
5383     int target_size;
5384     void *argptr;
5385     abi_ulong *target_rt_dev_ptr = NULL;
5386     unsigned long *host_rt_dev_ptr = NULL;
5387     abi_long ret;
5388     int i;
5389 
5390     assert(ie->access == IOC_W);
5391     assert(*arg_type == TYPE_PTR);
5392     arg_type++;
5393     assert(*arg_type == TYPE_STRUCT);
5394     target_size = thunk_type_size(arg_type, 0);
5395     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5396     if (!argptr) {
5397         return -TARGET_EFAULT;
5398     }
5399     arg_type++;
5400     assert(*arg_type == (int)STRUCT_rtentry);
5401     se = struct_entries + *arg_type++;
5402     assert(se->convert[0] == NULL);
5403     /* convert struct here to be able to catch rt_dev string */
5404     field_types = se->field_types;
5405     dst_offsets = se->field_offsets[THUNK_HOST];
5406     src_offsets = se->field_offsets[THUNK_TARGET];
5407     for (i = 0; i < se->nb_fields; i++) {
5408         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5409             assert(*field_types == TYPE_PTRVOID);
5410             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5411             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5412             if (*target_rt_dev_ptr != 0) {
5413                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5414                                                   tswapal(*target_rt_dev_ptr));
5415                 if (!*host_rt_dev_ptr) {
5416                     unlock_user(argptr, arg, 0);
5417                     return -TARGET_EFAULT;
5418                 }
5419             } else {
5420                 *host_rt_dev_ptr = 0;
5421             }
5422             field_types++;
5423             continue;
5424         }
5425         field_types = thunk_convert(buf_temp + dst_offsets[i],
5426                                     argptr + src_offsets[i],
5427                                     field_types, THUNK_HOST);
5428     }
5429     unlock_user(argptr, arg, 0);
5430 
5431     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5432 
5433     assert(host_rt_dev_ptr != NULL);
5434     assert(target_rt_dev_ptr != NULL);
5435     if (*host_rt_dev_ptr != 0) {
5436         unlock_user((void *)*host_rt_dev_ptr,
5437                     *target_rt_dev_ptr, 0);
5438     }
5439     return ret;
5440 }
5441 
5442 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5443                                      int fd, int cmd, abi_long arg)
5444 {
5445     int sig = target_to_host_signal(arg);
5446     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5447 }
5448 
5449 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5450                                     int fd, int cmd, abi_long arg)
5451 {
5452     struct timeval tv;
5453     abi_long ret;
5454 
5455     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5456     if (is_error(ret)) {
5457         return ret;
5458     }
5459 
5460     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5461         if (copy_to_user_timeval(arg, &tv)) {
5462             return -TARGET_EFAULT;
5463         }
5464     } else {
5465         if (copy_to_user_timeval64(arg, &tv)) {
5466             return -TARGET_EFAULT;
5467         }
5468     }
5469 
5470     return ret;
5471 }
5472 
5473 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5474                                       int fd, int cmd, abi_long arg)
5475 {
5476     struct timespec ts;
5477     abi_long ret;
5478 
5479     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5480     if (is_error(ret)) {
5481         return ret;
5482     }
5483 
5484     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5485         if (host_to_target_timespec(arg, &ts)) {
5486             return -TARGET_EFAULT;
5487         }
5488     } else{
5489         if (host_to_target_timespec64(arg, &ts)) {
5490             return -TARGET_EFAULT;
5491         }
5492     }
5493 
5494     return ret;
5495 }
5496 
5497 #ifdef TIOCGPTPEER
5498 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5499                                      int fd, int cmd, abi_long arg)
5500 {
5501     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5502     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5503 }
5504 #endif
5505 
5506 #ifdef HAVE_DRM_H
5507 
5508 static void unlock_drm_version(struct drm_version *host_ver,
5509                                struct target_drm_version *target_ver,
5510                                bool copy)
5511 {
5512     unlock_user(host_ver->name, target_ver->name,
5513                                 copy ? host_ver->name_len : 0);
5514     unlock_user(host_ver->date, target_ver->date,
5515                                 copy ? host_ver->date_len : 0);
5516     unlock_user(host_ver->desc, target_ver->desc,
5517                                 copy ? host_ver->desc_len : 0);
5518 }
5519 
5520 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5521                                           struct target_drm_version *target_ver)
5522 {
5523     memset(host_ver, 0, sizeof(*host_ver));
5524 
5525     __get_user(host_ver->name_len, &target_ver->name_len);
5526     if (host_ver->name_len) {
5527         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5528                                    target_ver->name_len, 0);
5529         if (!host_ver->name) {
5530             return -EFAULT;
5531         }
5532     }
5533 
5534     __get_user(host_ver->date_len, &target_ver->date_len);
5535     if (host_ver->date_len) {
5536         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5537                                    target_ver->date_len, 0);
5538         if (!host_ver->date) {
5539             goto err;
5540         }
5541     }
5542 
5543     __get_user(host_ver->desc_len, &target_ver->desc_len);
5544     if (host_ver->desc_len) {
5545         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5546                                    target_ver->desc_len, 0);
5547         if (!host_ver->desc) {
5548             goto err;
5549         }
5550     }
5551 
5552     return 0;
5553 err:
5554     unlock_drm_version(host_ver, target_ver, false);
5555     return -EFAULT;
5556 }
5557 
5558 static inline void host_to_target_drmversion(
5559                                           struct target_drm_version *target_ver,
5560                                           struct drm_version *host_ver)
5561 {
5562     __put_user(host_ver->version_major, &target_ver->version_major);
5563     __put_user(host_ver->version_minor, &target_ver->version_minor);
5564     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5565     __put_user(host_ver->name_len, &target_ver->name_len);
5566     __put_user(host_ver->date_len, &target_ver->date_len);
5567     __put_user(host_ver->desc_len, &target_ver->desc_len);
5568     unlock_drm_version(host_ver, target_ver, true);
5569 }
5570 
5571 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5572                              int fd, int cmd, abi_long arg)
5573 {
5574     struct drm_version *ver;
5575     struct target_drm_version *target_ver;
5576     abi_long ret;
5577 
5578     switch (ie->host_cmd) {
5579     case DRM_IOCTL_VERSION:
5580         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5581             return -TARGET_EFAULT;
5582         }
5583         ver = (struct drm_version *)buf_temp;
5584         ret = target_to_host_drmversion(ver, target_ver);
5585         if (!is_error(ret)) {
5586             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5587             if (is_error(ret)) {
5588                 unlock_drm_version(ver, target_ver, false);
5589             } else {
5590                 host_to_target_drmversion(target_ver, ver);
5591             }
5592         }
5593         unlock_user_struct(target_ver, arg, 0);
5594         return ret;
5595     }
5596     return -TARGET_ENOSYS;
5597 }
5598 
5599 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5600                                            struct drm_i915_getparam *gparam,
5601                                            int fd, abi_long arg)
5602 {
5603     abi_long ret;
5604     int value;
5605     struct target_drm_i915_getparam *target_gparam;
5606 
5607     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5608         return -TARGET_EFAULT;
5609     }
5610 
5611     __get_user(gparam->param, &target_gparam->param);
5612     gparam->value = &value;
5613     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5614     put_user_s32(value, target_gparam->value);
5615 
5616     unlock_user_struct(target_gparam, arg, 0);
5617     return ret;
5618 }
5619 
5620 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5621                                   int fd, int cmd, abi_long arg)
5622 {
5623     switch (ie->host_cmd) {
5624     case DRM_IOCTL_I915_GETPARAM:
5625         return do_ioctl_drm_i915_getparam(ie,
5626                                           (struct drm_i915_getparam *)buf_temp,
5627                                           fd, arg);
5628     default:
5629         return -TARGET_ENOSYS;
5630     }
5631 }
5632 
5633 #endif
5634 
5635 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5636                                         int fd, int cmd, abi_long arg)
5637 {
5638     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5639     struct tun_filter *target_filter;
5640     char *target_addr;
5641 
5642     assert(ie->access == IOC_W);
5643 
5644     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5645     if (!target_filter) {
5646         return -TARGET_EFAULT;
5647     }
5648     filter->flags = tswap16(target_filter->flags);
5649     filter->count = tswap16(target_filter->count);
5650     unlock_user(target_filter, arg, 0);
5651 
5652     if (filter->count) {
5653         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5654             MAX_STRUCT_SIZE) {
5655             return -TARGET_EFAULT;
5656         }
5657 
5658         target_addr = lock_user(VERIFY_READ,
5659                                 arg + offsetof(struct tun_filter, addr),
5660                                 filter->count * ETH_ALEN, 1);
5661         if (!target_addr) {
5662             return -TARGET_EFAULT;
5663         }
5664         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5665         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5666     }
5667 
5668     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5669 }
5670 
5671 IOCTLEntry ioctl_entries[] = {
5672 #define IOCTL(cmd, access, ...) \
5673     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5674 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5675     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5676 #define IOCTL_IGNORE(cmd) \
5677     { TARGET_ ## cmd, 0, #cmd },
5678 #include "ioctls.h"
5679     { 0, 0, },
5680 };
5681 
5682 /* ??? Implement proper locking for ioctls.  */
5683 /* do_ioctl() Must return target values and target errnos. */
5684 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5685 {
5686     const IOCTLEntry *ie;
5687     const argtype *arg_type;
5688     abi_long ret;
5689     uint8_t buf_temp[MAX_STRUCT_SIZE];
5690     int target_size;
5691     void *argptr;
5692 
5693     ie = ioctl_entries;
5694     for(;;) {
5695         if (ie->target_cmd == 0) {
5696             qemu_log_mask(
5697                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5698             return -TARGET_ENOSYS;
5699         }
5700         if (ie->target_cmd == cmd)
5701             break;
5702         ie++;
5703     }
5704     arg_type = ie->arg_type;
5705     if (ie->do_ioctl) {
5706         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5707     } else if (!ie->host_cmd) {
5708         /* Some architectures define BSD ioctls in their headers
5709            that are not implemented in Linux.  */
5710         return -TARGET_ENOSYS;
5711     }
5712 
5713     switch(arg_type[0]) {
5714     case TYPE_NULL:
5715         /* no argument */
5716         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5717         break;
5718     case TYPE_PTRVOID:
5719     case TYPE_INT:
5720     case TYPE_LONG:
5721     case TYPE_ULONG:
5722         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5723         break;
5724     case TYPE_PTR:
5725         arg_type++;
5726         target_size = thunk_type_size(arg_type, 0);
5727         switch(ie->access) {
5728         case IOC_R:
5729             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5730             if (!is_error(ret)) {
5731                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5732                 if (!argptr)
5733                     return -TARGET_EFAULT;
5734                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5735                 unlock_user(argptr, arg, target_size);
5736             }
5737             break;
5738         case IOC_W:
5739             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5740             if (!argptr)
5741                 return -TARGET_EFAULT;
5742             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5743             unlock_user(argptr, arg, 0);
5744             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5745             break;
5746         default:
5747         case IOC_RW:
5748             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5749             if (!argptr)
5750                 return -TARGET_EFAULT;
5751             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5752             unlock_user(argptr, arg, 0);
5753             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5754             if (!is_error(ret)) {
5755                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5756                 if (!argptr)
5757                     return -TARGET_EFAULT;
5758                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5759                 unlock_user(argptr, arg, target_size);
5760             }
5761             break;
5762         }
5763         break;
5764     default:
5765         qemu_log_mask(LOG_UNIMP,
5766                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5767                       (long)cmd, arg_type[0]);
5768         ret = -TARGET_ENOSYS;
5769         break;
5770     }
5771     return ret;
5772 }
5773 
5774 static const bitmask_transtbl iflag_tbl[] = {
5775         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5776         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5777         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5778         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5779         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5780         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5781         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5782         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5783         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5784         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5785         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5786         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5787         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5788         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5789         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5790         { 0, 0, 0, 0 }
5791 };
5792 
5793 static const bitmask_transtbl oflag_tbl[] = {
5794 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5795 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5796 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5797 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5798 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5799 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5800 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5801 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5802 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5803 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5804 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5805 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5806 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5807 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5808 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5809 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5810 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5811 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5812 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5813 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5814 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5815 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5816 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5817 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5818 	{ 0, 0, 0, 0 }
5819 };
5820 
5821 static const bitmask_transtbl cflag_tbl[] = {
5822 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5823 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5824 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5825 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5826 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5827 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5828 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5829 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5830 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5831 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5832 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5833 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5834 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5835 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5836 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5837 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5838 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5839 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5840 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5841 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5842 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5843 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5844 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5845 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5846 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5847 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5848 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5849 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5850 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5851 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5852 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5853 	{ 0, 0, 0, 0 }
5854 };
5855 
5856 static const bitmask_transtbl lflag_tbl[] = {
5857   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5858   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5859   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5860   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5861   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5862   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5863   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5864   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5865   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5866   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5867   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5868   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5869   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5870   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5871   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5872   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5873   { 0, 0, 0, 0 }
5874 };
5875 
5876 static void target_to_host_termios (void *dst, const void *src)
5877 {
5878     struct host_termios *host = dst;
5879     const struct target_termios *target = src;
5880 
5881     host->c_iflag =
5882         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5883     host->c_oflag =
5884         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5885     host->c_cflag =
5886         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5887     host->c_lflag =
5888         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5889     host->c_line = target->c_line;
5890 
5891     memset(host->c_cc, 0, sizeof(host->c_cc));
5892     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5893     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5894     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5895     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5896     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5897     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5898     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5899     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5900     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5901     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5902     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5903     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5904     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5905     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5906     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5907     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5908     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5909 }
5910 
5911 static void host_to_target_termios (void *dst, const void *src)
5912 {
5913     struct target_termios *target = dst;
5914     const struct host_termios *host = src;
5915 
5916     target->c_iflag =
5917         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5918     target->c_oflag =
5919         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5920     target->c_cflag =
5921         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5922     target->c_lflag =
5923         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5924     target->c_line = host->c_line;
5925 
5926     memset(target->c_cc, 0, sizeof(target->c_cc));
5927     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5928     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5929     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5930     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5931     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5932     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5933     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5934     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5935     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5936     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5937     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5938     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5939     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5940     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5941     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5942     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5943     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5944 }
5945 
5946 static const StructEntry struct_termios_def = {
5947     .convert = { host_to_target_termios, target_to_host_termios },
5948     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5949     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5950     .print = print_termios,
5951 };
5952 
5953 static const bitmask_transtbl mmap_flags_tbl[] = {
5954     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5955     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5956     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5957     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5958       MAP_ANONYMOUS, MAP_ANONYMOUS },
5959     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5960       MAP_GROWSDOWN, MAP_GROWSDOWN },
5961     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5962       MAP_DENYWRITE, MAP_DENYWRITE },
5963     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5964       MAP_EXECUTABLE, MAP_EXECUTABLE },
5965     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5966     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5967       MAP_NORESERVE, MAP_NORESERVE },
5968     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5969     /* MAP_STACK had been ignored by the kernel for quite some time.
5970        Recognize it for the target insofar as we do not want to pass
5971        it through to the host.  */
5972     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5973     { 0, 0, 0, 0 }
5974 };
5975 
5976 /*
5977  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5978  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5979  */
5980 #if defined(TARGET_I386)
5981 
5982 /* NOTE: there is really one LDT for all the threads */
5983 static uint8_t *ldt_table;
5984 
5985 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5986 {
5987     int size;
5988     void *p;
5989 
5990     if (!ldt_table)
5991         return 0;
5992     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5993     if (size > bytecount)
5994         size = bytecount;
5995     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5996     if (!p)
5997         return -TARGET_EFAULT;
5998     /* ??? Should this by byteswapped?  */
5999     memcpy(p, ldt_table, size);
6000     unlock_user(p, ptr, size);
6001     return size;
6002 }
6003 
6004 /* XXX: add locking support */
6005 static abi_long write_ldt(CPUX86State *env,
6006                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6007 {
6008     struct target_modify_ldt_ldt_s ldt_info;
6009     struct target_modify_ldt_ldt_s *target_ldt_info;
6010     int seg_32bit, contents, read_exec_only, limit_in_pages;
6011     int seg_not_present, useable, lm;
6012     uint32_t *lp, entry_1, entry_2;
6013 
6014     if (bytecount != sizeof(ldt_info))
6015         return -TARGET_EINVAL;
6016     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6017         return -TARGET_EFAULT;
6018     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6019     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6020     ldt_info.limit = tswap32(target_ldt_info->limit);
6021     ldt_info.flags = tswap32(target_ldt_info->flags);
6022     unlock_user_struct(target_ldt_info, ptr, 0);
6023 
6024     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6025         return -TARGET_EINVAL;
6026     seg_32bit = ldt_info.flags & 1;
6027     contents = (ldt_info.flags >> 1) & 3;
6028     read_exec_only = (ldt_info.flags >> 3) & 1;
6029     limit_in_pages = (ldt_info.flags >> 4) & 1;
6030     seg_not_present = (ldt_info.flags >> 5) & 1;
6031     useable = (ldt_info.flags >> 6) & 1;
6032 #ifdef TARGET_ABI32
6033     lm = 0;
6034 #else
6035     lm = (ldt_info.flags >> 7) & 1;
6036 #endif
6037     if (contents == 3) {
6038         if (oldmode)
6039             return -TARGET_EINVAL;
6040         if (seg_not_present == 0)
6041             return -TARGET_EINVAL;
6042     }
6043     /* allocate the LDT */
6044     if (!ldt_table) {
6045         env->ldt.base = target_mmap(0,
6046                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6047                                     PROT_READ|PROT_WRITE,
6048                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6049         if (env->ldt.base == -1)
6050             return -TARGET_ENOMEM;
6051         memset(g2h_untagged(env->ldt.base), 0,
6052                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6053         env->ldt.limit = 0xffff;
6054         ldt_table = g2h_untagged(env->ldt.base);
6055     }
6056 
6057     /* NOTE: same code as Linux kernel */
6058     /* Allow LDTs to be cleared by the user. */
6059     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6060         if (oldmode ||
6061             (contents == 0		&&
6062              read_exec_only == 1	&&
6063              seg_32bit == 0		&&
6064              limit_in_pages == 0	&&
6065              seg_not_present == 1	&&
6066              useable == 0 )) {
6067             entry_1 = 0;
6068             entry_2 = 0;
6069             goto install;
6070         }
6071     }
6072 
6073     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6074         (ldt_info.limit & 0x0ffff);
6075     entry_2 = (ldt_info.base_addr & 0xff000000) |
6076         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6077         (ldt_info.limit & 0xf0000) |
6078         ((read_exec_only ^ 1) << 9) |
6079         (contents << 10) |
6080         ((seg_not_present ^ 1) << 15) |
6081         (seg_32bit << 22) |
6082         (limit_in_pages << 23) |
6083         (lm << 21) |
6084         0x7000;
6085     if (!oldmode)
6086         entry_2 |= (useable << 20);
6087 
6088     /* Install the new entry ...  */
6089 install:
6090     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6091     lp[0] = tswap32(entry_1);
6092     lp[1] = tswap32(entry_2);
6093     return 0;
6094 }
6095 
6096 /* specific and weird i386 syscalls */
6097 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6098                               unsigned long bytecount)
6099 {
6100     abi_long ret;
6101 
6102     switch (func) {
6103     case 0:
6104         ret = read_ldt(ptr, bytecount);
6105         break;
6106     case 1:
6107         ret = write_ldt(env, ptr, bytecount, 1);
6108         break;
6109     case 0x11:
6110         ret = write_ldt(env, ptr, bytecount, 0);
6111         break;
6112     default:
6113         ret = -TARGET_ENOSYS;
6114         break;
6115     }
6116     return ret;
6117 }
6118 
6119 #if defined(TARGET_ABI32)
6120 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6121 {
6122     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6123     struct target_modify_ldt_ldt_s ldt_info;
6124     struct target_modify_ldt_ldt_s *target_ldt_info;
6125     int seg_32bit, contents, read_exec_only, limit_in_pages;
6126     int seg_not_present, useable, lm;
6127     uint32_t *lp, entry_1, entry_2;
6128     int i;
6129 
6130     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6131     if (!target_ldt_info)
6132         return -TARGET_EFAULT;
6133     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6134     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6135     ldt_info.limit = tswap32(target_ldt_info->limit);
6136     ldt_info.flags = tswap32(target_ldt_info->flags);
6137     if (ldt_info.entry_number == -1) {
6138         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6139             if (gdt_table[i] == 0) {
6140                 ldt_info.entry_number = i;
6141                 target_ldt_info->entry_number = tswap32(i);
6142                 break;
6143             }
6144         }
6145     }
6146     unlock_user_struct(target_ldt_info, ptr, 1);
6147 
6148     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6149         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6150            return -TARGET_EINVAL;
6151     seg_32bit = ldt_info.flags & 1;
6152     contents = (ldt_info.flags >> 1) & 3;
6153     read_exec_only = (ldt_info.flags >> 3) & 1;
6154     limit_in_pages = (ldt_info.flags >> 4) & 1;
6155     seg_not_present = (ldt_info.flags >> 5) & 1;
6156     useable = (ldt_info.flags >> 6) & 1;
6157 #ifdef TARGET_ABI32
6158     lm = 0;
6159 #else
6160     lm = (ldt_info.flags >> 7) & 1;
6161 #endif
6162 
6163     if (contents == 3) {
6164         if (seg_not_present == 0)
6165             return -TARGET_EINVAL;
6166     }
6167 
6168     /* NOTE: same code as Linux kernel */
6169     /* Allow LDTs to be cleared by the user. */
6170     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6171         if ((contents == 0             &&
6172              read_exec_only == 1       &&
6173              seg_32bit == 0            &&
6174              limit_in_pages == 0       &&
6175              seg_not_present == 1      &&
6176              useable == 0 )) {
6177             entry_1 = 0;
6178             entry_2 = 0;
6179             goto install;
6180         }
6181     }
6182 
6183     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6184         (ldt_info.limit & 0x0ffff);
6185     entry_2 = (ldt_info.base_addr & 0xff000000) |
6186         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6187         (ldt_info.limit & 0xf0000) |
6188         ((read_exec_only ^ 1) << 9) |
6189         (contents << 10) |
6190         ((seg_not_present ^ 1) << 15) |
6191         (seg_32bit << 22) |
6192         (limit_in_pages << 23) |
6193         (useable << 20) |
6194         (lm << 21) |
6195         0x7000;
6196 
6197     /* Install the new entry ...  */
6198 install:
6199     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6200     lp[0] = tswap32(entry_1);
6201     lp[1] = tswap32(entry_2);
6202     return 0;
6203 }
6204 
6205 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6206 {
6207     struct target_modify_ldt_ldt_s *target_ldt_info;
6208     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6209     uint32_t base_addr, limit, flags;
6210     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6211     int seg_not_present, useable, lm;
6212     uint32_t *lp, entry_1, entry_2;
6213 
6214     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6215     if (!target_ldt_info)
6216         return -TARGET_EFAULT;
6217     idx = tswap32(target_ldt_info->entry_number);
6218     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6219         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6220         unlock_user_struct(target_ldt_info, ptr, 1);
6221         return -TARGET_EINVAL;
6222     }
6223     lp = (uint32_t *)(gdt_table + idx);
6224     entry_1 = tswap32(lp[0]);
6225     entry_2 = tswap32(lp[1]);
6226 
6227     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6228     contents = (entry_2 >> 10) & 3;
6229     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6230     seg_32bit = (entry_2 >> 22) & 1;
6231     limit_in_pages = (entry_2 >> 23) & 1;
6232     useable = (entry_2 >> 20) & 1;
6233 #ifdef TARGET_ABI32
6234     lm = 0;
6235 #else
6236     lm = (entry_2 >> 21) & 1;
6237 #endif
6238     flags = (seg_32bit << 0) | (contents << 1) |
6239         (read_exec_only << 3) | (limit_in_pages << 4) |
6240         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6241     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6242     base_addr = (entry_1 >> 16) |
6243         (entry_2 & 0xff000000) |
6244         ((entry_2 & 0xff) << 16);
6245     target_ldt_info->base_addr = tswapal(base_addr);
6246     target_ldt_info->limit = tswap32(limit);
6247     target_ldt_info->flags = tswap32(flags);
6248     unlock_user_struct(target_ldt_info, ptr, 1);
6249     return 0;
6250 }
6251 
6252 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6253 {
6254     return -TARGET_ENOSYS;
6255 }
6256 #else
6257 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6258 {
6259     abi_long ret = 0;
6260     abi_ulong val;
6261     int idx;
6262 
6263     switch(code) {
6264     case TARGET_ARCH_SET_GS:
6265     case TARGET_ARCH_SET_FS:
6266         if (code == TARGET_ARCH_SET_GS)
6267             idx = R_GS;
6268         else
6269             idx = R_FS;
6270         cpu_x86_load_seg(env, idx, 0);
6271         env->segs[idx].base = addr;
6272         break;
6273     case TARGET_ARCH_GET_GS:
6274     case TARGET_ARCH_GET_FS:
6275         if (code == TARGET_ARCH_GET_GS)
6276             idx = R_GS;
6277         else
6278             idx = R_FS;
6279         val = env->segs[idx].base;
6280         if (put_user(val, addr, abi_ulong))
6281             ret = -TARGET_EFAULT;
6282         break;
6283     default:
6284         ret = -TARGET_EINVAL;
6285         break;
6286     }
6287     return ret;
6288 }
6289 #endif /* defined(TARGET_ABI32 */
6290 
6291 #endif /* defined(TARGET_I386) */
6292 
6293 #define NEW_STACK_SIZE 0x40000
6294 
6295 
6296 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6297 typedef struct {
6298     CPUArchState *env;
6299     pthread_mutex_t mutex;
6300     pthread_cond_t cond;
6301     pthread_t thread;
6302     uint32_t tid;
6303     abi_ulong child_tidptr;
6304     abi_ulong parent_tidptr;
6305     sigset_t sigmask;
6306 } new_thread_info;
6307 
6308 static void *clone_func(void *arg)
6309 {
6310     new_thread_info *info = arg;
6311     CPUArchState *env;
6312     CPUState *cpu;
6313     TaskState *ts;
6314 
6315     rcu_register_thread();
6316     tcg_register_thread();
6317     env = info->env;
6318     cpu = env_cpu(env);
6319     thread_cpu = cpu;
6320     ts = (TaskState *)cpu->opaque;
6321     info->tid = sys_gettid();
6322     task_settid(ts);
6323     if (info->child_tidptr)
6324         put_user_u32(info->tid, info->child_tidptr);
6325     if (info->parent_tidptr)
6326         put_user_u32(info->tid, info->parent_tidptr);
6327     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6328     /* Enable signals.  */
6329     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6330     /* Signal to the parent that we're ready.  */
6331     pthread_mutex_lock(&info->mutex);
6332     pthread_cond_broadcast(&info->cond);
6333     pthread_mutex_unlock(&info->mutex);
6334     /* Wait until the parent has finished initializing the tls state.  */
6335     pthread_mutex_lock(&clone_lock);
6336     pthread_mutex_unlock(&clone_lock);
6337     cpu_loop(env);
6338     /* never exits */
6339     return NULL;
6340 }
6341 
6342 /* do_fork() Must return host values and target errnos (unlike most
6343    do_*() functions). */
6344 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6345                    abi_ulong parent_tidptr, target_ulong newtls,
6346                    abi_ulong child_tidptr)
6347 {
6348     CPUState *cpu = env_cpu(env);
6349     int ret;
6350     TaskState *ts;
6351     CPUState *new_cpu;
6352     CPUArchState *new_env;
6353     sigset_t sigmask;
6354 
6355     flags &= ~CLONE_IGNORED_FLAGS;
6356 
6357     /* Emulate vfork() with fork() */
6358     if (flags & CLONE_VFORK)
6359         flags &= ~(CLONE_VFORK | CLONE_VM);
6360 
6361     if (flags & CLONE_VM) {
6362         TaskState *parent_ts = (TaskState *)cpu->opaque;
6363         new_thread_info info;
6364         pthread_attr_t attr;
6365 
6366         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6367             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6368             return -TARGET_EINVAL;
6369         }
6370 
6371         ts = g_new0(TaskState, 1);
6372         init_task_state(ts);
6373 
6374         /* Grab a mutex so that thread setup appears atomic.  */
6375         pthread_mutex_lock(&clone_lock);
6376 
6377         /*
6378          * If this is our first additional thread, we need to ensure we
6379          * generate code for parallel execution and flush old translations.
6380          * Do this now so that the copy gets CF_PARALLEL too.
6381          */
6382         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6383             cpu->tcg_cflags |= CF_PARALLEL;
6384             tb_flush(cpu);
6385         }
6386 
6387         /* we create a new CPU instance. */
6388         new_env = cpu_copy(env);
6389         /* Init regs that differ from the parent.  */
6390         cpu_clone_regs_child(new_env, newsp, flags);
6391         cpu_clone_regs_parent(env, flags);
6392         new_cpu = env_cpu(new_env);
6393         new_cpu->opaque = ts;
6394         ts->bprm = parent_ts->bprm;
6395         ts->info = parent_ts->info;
6396         ts->signal_mask = parent_ts->signal_mask;
6397 
6398         if (flags & CLONE_CHILD_CLEARTID) {
6399             ts->child_tidptr = child_tidptr;
6400         }
6401 
6402         if (flags & CLONE_SETTLS) {
6403             cpu_set_tls (new_env, newtls);
6404         }
6405 
6406         memset(&info, 0, sizeof(info));
6407         pthread_mutex_init(&info.mutex, NULL);
6408         pthread_mutex_lock(&info.mutex);
6409         pthread_cond_init(&info.cond, NULL);
6410         info.env = new_env;
6411         if (flags & CLONE_CHILD_SETTID) {
6412             info.child_tidptr = child_tidptr;
6413         }
6414         if (flags & CLONE_PARENT_SETTID) {
6415             info.parent_tidptr = parent_tidptr;
6416         }
6417 
6418         ret = pthread_attr_init(&attr);
6419         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6420         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6421         /* It is not safe to deliver signals until the child has finished
6422            initializing, so temporarily block all signals.  */
6423         sigfillset(&sigmask);
6424         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6425         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6426 
6427         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6428         /* TODO: Free new CPU state if thread creation failed.  */
6429 
6430         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6431         pthread_attr_destroy(&attr);
6432         if (ret == 0) {
6433             /* Wait for the child to initialize.  */
6434             pthread_cond_wait(&info.cond, &info.mutex);
6435             ret = info.tid;
6436         } else {
6437             ret = -1;
6438         }
6439         pthread_mutex_unlock(&info.mutex);
6440         pthread_cond_destroy(&info.cond);
6441         pthread_mutex_destroy(&info.mutex);
6442         pthread_mutex_unlock(&clone_lock);
6443     } else {
6444         /* if no CLONE_VM, we consider it is a fork */
6445         if (flags & CLONE_INVALID_FORK_FLAGS) {
6446             return -TARGET_EINVAL;
6447         }
6448 
6449         /* We can't support custom termination signals */
6450         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6451             return -TARGET_EINVAL;
6452         }
6453 
6454         if (block_signals()) {
6455             return -TARGET_ERESTARTSYS;
6456         }
6457 
6458         fork_start();
6459         ret = fork();
6460         if (ret == 0) {
6461             /* Child Process.  */
6462             cpu_clone_regs_child(env, newsp, flags);
6463             fork_end(1);
6464             /* There is a race condition here.  The parent process could
6465                theoretically read the TID in the child process before the child
6466                tid is set.  This would require using either ptrace
6467                (not implemented) or having *_tidptr to point at a shared memory
6468                mapping.  We can't repeat the spinlock hack used above because
6469                the child process gets its own copy of the lock.  */
6470             if (flags & CLONE_CHILD_SETTID)
6471                 put_user_u32(sys_gettid(), child_tidptr);
6472             if (flags & CLONE_PARENT_SETTID)
6473                 put_user_u32(sys_gettid(), parent_tidptr);
6474             ts = (TaskState *)cpu->opaque;
6475             if (flags & CLONE_SETTLS)
6476                 cpu_set_tls (env, newtls);
6477             if (flags & CLONE_CHILD_CLEARTID)
6478                 ts->child_tidptr = child_tidptr;
6479         } else {
6480             cpu_clone_regs_parent(env, flags);
6481             fork_end(0);
6482         }
6483     }
6484     return ret;
6485 }
6486 
6487 /* warning : doesn't handle linux specific flags... */
6488 static int target_to_host_fcntl_cmd(int cmd)
6489 {
6490     int ret;
6491 
6492     switch(cmd) {
6493     case TARGET_F_DUPFD:
6494     case TARGET_F_GETFD:
6495     case TARGET_F_SETFD:
6496     case TARGET_F_GETFL:
6497     case TARGET_F_SETFL:
6498     case TARGET_F_OFD_GETLK:
6499     case TARGET_F_OFD_SETLK:
6500     case TARGET_F_OFD_SETLKW:
6501         ret = cmd;
6502         break;
6503     case TARGET_F_GETLK:
6504         ret = F_GETLK64;
6505         break;
6506     case TARGET_F_SETLK:
6507         ret = F_SETLK64;
6508         break;
6509     case TARGET_F_SETLKW:
6510         ret = F_SETLKW64;
6511         break;
6512     case TARGET_F_GETOWN:
6513         ret = F_GETOWN;
6514         break;
6515     case TARGET_F_SETOWN:
6516         ret = F_SETOWN;
6517         break;
6518     case TARGET_F_GETSIG:
6519         ret = F_GETSIG;
6520         break;
6521     case TARGET_F_SETSIG:
6522         ret = F_SETSIG;
6523         break;
6524 #if TARGET_ABI_BITS == 32
6525     case TARGET_F_GETLK64:
6526         ret = F_GETLK64;
6527         break;
6528     case TARGET_F_SETLK64:
6529         ret = F_SETLK64;
6530         break;
6531     case TARGET_F_SETLKW64:
6532         ret = F_SETLKW64;
6533         break;
6534 #endif
6535     case TARGET_F_SETLEASE:
6536         ret = F_SETLEASE;
6537         break;
6538     case TARGET_F_GETLEASE:
6539         ret = F_GETLEASE;
6540         break;
6541 #ifdef F_DUPFD_CLOEXEC
6542     case TARGET_F_DUPFD_CLOEXEC:
6543         ret = F_DUPFD_CLOEXEC;
6544         break;
6545 #endif
6546     case TARGET_F_NOTIFY:
6547         ret = F_NOTIFY;
6548         break;
6549 #ifdef F_GETOWN_EX
6550     case TARGET_F_GETOWN_EX:
6551         ret = F_GETOWN_EX;
6552         break;
6553 #endif
6554 #ifdef F_SETOWN_EX
6555     case TARGET_F_SETOWN_EX:
6556         ret = F_SETOWN_EX;
6557         break;
6558 #endif
6559 #ifdef F_SETPIPE_SZ
6560     case TARGET_F_SETPIPE_SZ:
6561         ret = F_SETPIPE_SZ;
6562         break;
6563     case TARGET_F_GETPIPE_SZ:
6564         ret = F_GETPIPE_SZ;
6565         break;
6566 #endif
6567 #ifdef F_ADD_SEALS
6568     case TARGET_F_ADD_SEALS:
6569         ret = F_ADD_SEALS;
6570         break;
6571     case TARGET_F_GET_SEALS:
6572         ret = F_GET_SEALS;
6573         break;
6574 #endif
6575     default:
6576         ret = -TARGET_EINVAL;
6577         break;
6578     }
6579 
6580 #if defined(__powerpc64__)
6581     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6582      * is not supported by kernel. The glibc fcntl call actually adjusts
6583      * them to 5, 6 and 7 before making the syscall(). Since we make the
6584      * syscall directly, adjust to what is supported by the kernel.
6585      */
6586     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6587         ret -= F_GETLK64 - 5;
6588     }
6589 #endif
6590 
6591     return ret;
6592 }
6593 
6594 #define FLOCK_TRANSTBL \
6595     switch (type) { \
6596     TRANSTBL_CONVERT(F_RDLCK); \
6597     TRANSTBL_CONVERT(F_WRLCK); \
6598     TRANSTBL_CONVERT(F_UNLCK); \
6599     }
6600 
6601 static int target_to_host_flock(int type)
6602 {
6603 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6604     FLOCK_TRANSTBL
6605 #undef  TRANSTBL_CONVERT
6606     return -TARGET_EINVAL;
6607 }
6608 
6609 static int host_to_target_flock(int type)
6610 {
6611 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6612     FLOCK_TRANSTBL
6613 #undef  TRANSTBL_CONVERT
6614     /* if we don't know how to convert the value coming
6615      * from the host we copy to the target field as-is
6616      */
6617     return type;
6618 }
6619 
6620 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6621                                             abi_ulong target_flock_addr)
6622 {
6623     struct target_flock *target_fl;
6624     int l_type;
6625 
6626     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6627         return -TARGET_EFAULT;
6628     }
6629 
6630     __get_user(l_type, &target_fl->l_type);
6631     l_type = target_to_host_flock(l_type);
6632     if (l_type < 0) {
6633         return l_type;
6634     }
6635     fl->l_type = l_type;
6636     __get_user(fl->l_whence, &target_fl->l_whence);
6637     __get_user(fl->l_start, &target_fl->l_start);
6638     __get_user(fl->l_len, &target_fl->l_len);
6639     __get_user(fl->l_pid, &target_fl->l_pid);
6640     unlock_user_struct(target_fl, target_flock_addr, 0);
6641     return 0;
6642 }
6643 
6644 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6645                                           const struct flock64 *fl)
6646 {
6647     struct target_flock *target_fl;
6648     short l_type;
6649 
6650     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6651         return -TARGET_EFAULT;
6652     }
6653 
6654     l_type = host_to_target_flock(fl->l_type);
6655     __put_user(l_type, &target_fl->l_type);
6656     __put_user(fl->l_whence, &target_fl->l_whence);
6657     __put_user(fl->l_start, &target_fl->l_start);
6658     __put_user(fl->l_len, &target_fl->l_len);
6659     __put_user(fl->l_pid, &target_fl->l_pid);
6660     unlock_user_struct(target_fl, target_flock_addr, 1);
6661     return 0;
6662 }
6663 
6664 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6665 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6666 
6667 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6668 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6669                                                    abi_ulong target_flock_addr)
6670 {
6671     struct target_oabi_flock64 *target_fl;
6672     int l_type;
6673 
6674     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6675         return -TARGET_EFAULT;
6676     }
6677 
6678     __get_user(l_type, &target_fl->l_type);
6679     l_type = target_to_host_flock(l_type);
6680     if (l_type < 0) {
6681         return l_type;
6682     }
6683     fl->l_type = l_type;
6684     __get_user(fl->l_whence, &target_fl->l_whence);
6685     __get_user(fl->l_start, &target_fl->l_start);
6686     __get_user(fl->l_len, &target_fl->l_len);
6687     __get_user(fl->l_pid, &target_fl->l_pid);
6688     unlock_user_struct(target_fl, target_flock_addr, 0);
6689     return 0;
6690 }
6691 
6692 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6693                                                  const struct flock64 *fl)
6694 {
6695     struct target_oabi_flock64 *target_fl;
6696     short l_type;
6697 
6698     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6699         return -TARGET_EFAULT;
6700     }
6701 
6702     l_type = host_to_target_flock(fl->l_type);
6703     __put_user(l_type, &target_fl->l_type);
6704     __put_user(fl->l_whence, &target_fl->l_whence);
6705     __put_user(fl->l_start, &target_fl->l_start);
6706     __put_user(fl->l_len, &target_fl->l_len);
6707     __put_user(fl->l_pid, &target_fl->l_pid);
6708     unlock_user_struct(target_fl, target_flock_addr, 1);
6709     return 0;
6710 }
6711 #endif
6712 
6713 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6714                                               abi_ulong target_flock_addr)
6715 {
6716     struct target_flock64 *target_fl;
6717     int l_type;
6718 
6719     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6720         return -TARGET_EFAULT;
6721     }
6722 
6723     __get_user(l_type, &target_fl->l_type);
6724     l_type = target_to_host_flock(l_type);
6725     if (l_type < 0) {
6726         return l_type;
6727     }
6728     fl->l_type = l_type;
6729     __get_user(fl->l_whence, &target_fl->l_whence);
6730     __get_user(fl->l_start, &target_fl->l_start);
6731     __get_user(fl->l_len, &target_fl->l_len);
6732     __get_user(fl->l_pid, &target_fl->l_pid);
6733     unlock_user_struct(target_fl, target_flock_addr, 0);
6734     return 0;
6735 }
6736 
6737 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6738                                             const struct flock64 *fl)
6739 {
6740     struct target_flock64 *target_fl;
6741     short l_type;
6742 
6743     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6744         return -TARGET_EFAULT;
6745     }
6746 
6747     l_type = host_to_target_flock(fl->l_type);
6748     __put_user(l_type, &target_fl->l_type);
6749     __put_user(fl->l_whence, &target_fl->l_whence);
6750     __put_user(fl->l_start, &target_fl->l_start);
6751     __put_user(fl->l_len, &target_fl->l_len);
6752     __put_user(fl->l_pid, &target_fl->l_pid);
6753     unlock_user_struct(target_fl, target_flock_addr, 1);
6754     return 0;
6755 }
6756 
6757 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6758 {
6759     struct flock64 fl64;
6760 #ifdef F_GETOWN_EX
6761     struct f_owner_ex fox;
6762     struct target_f_owner_ex *target_fox;
6763 #endif
6764     abi_long ret;
6765     int host_cmd = target_to_host_fcntl_cmd(cmd);
6766 
6767     if (host_cmd == -TARGET_EINVAL)
6768 	    return host_cmd;
6769 
6770     switch(cmd) {
6771     case TARGET_F_GETLK:
6772         ret = copy_from_user_flock(&fl64, arg);
6773         if (ret) {
6774             return ret;
6775         }
6776         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6777         if (ret == 0) {
6778             ret = copy_to_user_flock(arg, &fl64);
6779         }
6780         break;
6781 
6782     case TARGET_F_SETLK:
6783     case TARGET_F_SETLKW:
6784         ret = copy_from_user_flock(&fl64, arg);
6785         if (ret) {
6786             return ret;
6787         }
6788         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6789         break;
6790 
6791     case TARGET_F_GETLK64:
6792     case TARGET_F_OFD_GETLK:
6793         ret = copy_from_user_flock64(&fl64, arg);
6794         if (ret) {
6795             return ret;
6796         }
6797         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6798         if (ret == 0) {
6799             ret = copy_to_user_flock64(arg, &fl64);
6800         }
6801         break;
6802     case TARGET_F_SETLK64:
6803     case TARGET_F_SETLKW64:
6804     case TARGET_F_OFD_SETLK:
6805     case TARGET_F_OFD_SETLKW:
6806         ret = copy_from_user_flock64(&fl64, arg);
6807         if (ret) {
6808             return ret;
6809         }
6810         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6811         break;
6812 
6813     case TARGET_F_GETFL:
6814         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6815         if (ret >= 0) {
6816             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6817         }
6818         break;
6819 
6820     case TARGET_F_SETFL:
6821         ret = get_errno(safe_fcntl(fd, host_cmd,
6822                                    target_to_host_bitmask(arg,
6823                                                           fcntl_flags_tbl)));
6824         break;
6825 
6826 #ifdef F_GETOWN_EX
6827     case TARGET_F_GETOWN_EX:
6828         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6829         if (ret >= 0) {
6830             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6831                 return -TARGET_EFAULT;
6832             target_fox->type = tswap32(fox.type);
6833             target_fox->pid = tswap32(fox.pid);
6834             unlock_user_struct(target_fox, arg, 1);
6835         }
6836         break;
6837 #endif
6838 
6839 #ifdef F_SETOWN_EX
6840     case TARGET_F_SETOWN_EX:
6841         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6842             return -TARGET_EFAULT;
6843         fox.type = tswap32(target_fox->type);
6844         fox.pid = tswap32(target_fox->pid);
6845         unlock_user_struct(target_fox, arg, 0);
6846         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6847         break;
6848 #endif
6849 
6850     case TARGET_F_SETSIG:
6851         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6852         break;
6853 
6854     case TARGET_F_GETSIG:
6855         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6856         break;
6857 
6858     case TARGET_F_SETOWN:
6859     case TARGET_F_GETOWN:
6860     case TARGET_F_SETLEASE:
6861     case TARGET_F_GETLEASE:
6862     case TARGET_F_SETPIPE_SZ:
6863     case TARGET_F_GETPIPE_SZ:
6864     case TARGET_F_ADD_SEALS:
6865     case TARGET_F_GET_SEALS:
6866         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6867         break;
6868 
6869     default:
6870         ret = get_errno(safe_fcntl(fd, cmd, arg));
6871         break;
6872     }
6873     return ret;
6874 }
6875 
6876 #ifdef USE_UID16
6877 
6878 static inline int high2lowuid(int uid)
6879 {
6880     if (uid > 65535)
6881         return 65534;
6882     else
6883         return uid;
6884 }
6885 
6886 static inline int high2lowgid(int gid)
6887 {
6888     if (gid > 65535)
6889         return 65534;
6890     else
6891         return gid;
6892 }
6893 
6894 static inline int low2highuid(int uid)
6895 {
6896     if ((int16_t)uid == -1)
6897         return -1;
6898     else
6899         return uid;
6900 }
6901 
6902 static inline int low2highgid(int gid)
6903 {
6904     if ((int16_t)gid == -1)
6905         return -1;
6906     else
6907         return gid;
6908 }
6909 static inline int tswapid(int id)
6910 {
6911     return tswap16(id);
6912 }
6913 
6914 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6915 
6916 #else /* !USE_UID16 */
6917 static inline int high2lowuid(int uid)
6918 {
6919     return uid;
6920 }
6921 static inline int high2lowgid(int gid)
6922 {
6923     return gid;
6924 }
6925 static inline int low2highuid(int uid)
6926 {
6927     return uid;
6928 }
6929 static inline int low2highgid(int gid)
6930 {
6931     return gid;
6932 }
6933 static inline int tswapid(int id)
6934 {
6935     return tswap32(id);
6936 }
6937 
6938 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6939 
6940 #endif /* USE_UID16 */
6941 
6942 /* We must do direct syscalls for setting UID/GID, because we want to
6943  * implement the Linux system call semantics of "change only for this thread",
6944  * not the libc/POSIX semantics of "change for all threads in process".
6945  * (See http://ewontfix.com/17/ for more details.)
6946  * We use the 32-bit version of the syscalls if present; if it is not
6947  * then either the host architecture supports 32-bit UIDs natively with
6948  * the standard syscall, or the 16-bit UID is the best we can do.
6949  */
6950 #ifdef __NR_setuid32
6951 #define __NR_sys_setuid __NR_setuid32
6952 #else
6953 #define __NR_sys_setuid __NR_setuid
6954 #endif
6955 #ifdef __NR_setgid32
6956 #define __NR_sys_setgid __NR_setgid32
6957 #else
6958 #define __NR_sys_setgid __NR_setgid
6959 #endif
6960 #ifdef __NR_setresuid32
6961 #define __NR_sys_setresuid __NR_setresuid32
6962 #else
6963 #define __NR_sys_setresuid __NR_setresuid
6964 #endif
6965 #ifdef __NR_setresgid32
6966 #define __NR_sys_setresgid __NR_setresgid32
6967 #else
6968 #define __NR_sys_setresgid __NR_setresgid
6969 #endif
6970 
6971 _syscall1(int, sys_setuid, uid_t, uid)
6972 _syscall1(int, sys_setgid, gid_t, gid)
6973 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6974 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6975 
6976 void syscall_init(void)
6977 {
6978     IOCTLEntry *ie;
6979     const argtype *arg_type;
6980     int size;
6981 
6982     thunk_init(STRUCT_MAX);
6983 
6984 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6985 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6986 #include "syscall_types.h"
6987 #undef STRUCT
6988 #undef STRUCT_SPECIAL
6989 
6990     /* we patch the ioctl size if necessary. We rely on the fact that
6991        no ioctl has all the bits at '1' in the size field */
6992     ie = ioctl_entries;
6993     while (ie->target_cmd != 0) {
6994         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6995             TARGET_IOC_SIZEMASK) {
6996             arg_type = ie->arg_type;
6997             if (arg_type[0] != TYPE_PTR) {
6998                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6999                         ie->target_cmd);
7000                 exit(1);
7001             }
7002             arg_type++;
7003             size = thunk_type_size(arg_type, 0);
7004             ie->target_cmd = (ie->target_cmd &
7005                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7006                 (size << TARGET_IOC_SIZESHIFT);
7007         }
7008 
7009         /* automatic consistency check if same arch */
7010 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7011     (defined(__x86_64__) && defined(TARGET_X86_64))
7012         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7013             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7014                     ie->name, ie->target_cmd, ie->host_cmd);
7015         }
7016 #endif
7017         ie++;
7018     }
7019 }
7020 
7021 #ifdef TARGET_NR_truncate64
7022 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7023                                          abi_long arg2,
7024                                          abi_long arg3,
7025                                          abi_long arg4)
7026 {
7027     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7028         arg2 = arg3;
7029         arg3 = arg4;
7030     }
7031     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7032 }
7033 #endif
7034 
7035 #ifdef TARGET_NR_ftruncate64
7036 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7037                                           abi_long arg2,
7038                                           abi_long arg3,
7039                                           abi_long arg4)
7040 {
7041     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7042         arg2 = arg3;
7043         arg3 = arg4;
7044     }
7045     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7046 }
7047 #endif
7048 
7049 #if defined(TARGET_NR_timer_settime) || \
7050     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7051 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7052                                                  abi_ulong target_addr)
7053 {
7054     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7055                                 offsetof(struct target_itimerspec,
7056                                          it_interval)) ||
7057         target_to_host_timespec(&host_its->it_value, target_addr +
7058                                 offsetof(struct target_itimerspec,
7059                                          it_value))) {
7060         return -TARGET_EFAULT;
7061     }
7062 
7063     return 0;
7064 }
7065 #endif
7066 
7067 #if defined(TARGET_NR_timer_settime64) || \
7068     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7069 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7070                                                    abi_ulong target_addr)
7071 {
7072     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7073                                   offsetof(struct target__kernel_itimerspec,
7074                                            it_interval)) ||
7075         target_to_host_timespec64(&host_its->it_value, target_addr +
7076                                   offsetof(struct target__kernel_itimerspec,
7077                                            it_value))) {
7078         return -TARGET_EFAULT;
7079     }
7080 
7081     return 0;
7082 }
7083 #endif
7084 
7085 #if ((defined(TARGET_NR_timerfd_gettime) || \
7086       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7087       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7088 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7089                                                  struct itimerspec *host_its)
7090 {
7091     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7092                                                        it_interval),
7093                                 &host_its->it_interval) ||
7094         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7095                                                        it_value),
7096                                 &host_its->it_value)) {
7097         return -TARGET_EFAULT;
7098     }
7099     return 0;
7100 }
7101 #endif
7102 
7103 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7104       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7105       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7106 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7107                                                    struct itimerspec *host_its)
7108 {
7109     if (host_to_target_timespec64(target_addr +
7110                                   offsetof(struct target__kernel_itimerspec,
7111                                            it_interval),
7112                                   &host_its->it_interval) ||
7113         host_to_target_timespec64(target_addr +
7114                                   offsetof(struct target__kernel_itimerspec,
7115                                            it_value),
7116                                   &host_its->it_value)) {
7117         return -TARGET_EFAULT;
7118     }
7119     return 0;
7120 }
7121 #endif
7122 
7123 #if defined(TARGET_NR_adjtimex) || \
7124     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7125 static inline abi_long target_to_host_timex(struct timex *host_tx,
7126                                             abi_long target_addr)
7127 {
7128     struct target_timex *target_tx;
7129 
7130     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7131         return -TARGET_EFAULT;
7132     }
7133 
7134     __get_user(host_tx->modes, &target_tx->modes);
7135     __get_user(host_tx->offset, &target_tx->offset);
7136     __get_user(host_tx->freq, &target_tx->freq);
7137     __get_user(host_tx->maxerror, &target_tx->maxerror);
7138     __get_user(host_tx->esterror, &target_tx->esterror);
7139     __get_user(host_tx->status, &target_tx->status);
7140     __get_user(host_tx->constant, &target_tx->constant);
7141     __get_user(host_tx->precision, &target_tx->precision);
7142     __get_user(host_tx->tolerance, &target_tx->tolerance);
7143     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7144     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7145     __get_user(host_tx->tick, &target_tx->tick);
7146     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7147     __get_user(host_tx->jitter, &target_tx->jitter);
7148     __get_user(host_tx->shift, &target_tx->shift);
7149     __get_user(host_tx->stabil, &target_tx->stabil);
7150     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7151     __get_user(host_tx->calcnt, &target_tx->calcnt);
7152     __get_user(host_tx->errcnt, &target_tx->errcnt);
7153     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7154     __get_user(host_tx->tai, &target_tx->tai);
7155 
7156     unlock_user_struct(target_tx, target_addr, 0);
7157     return 0;
7158 }
7159 
7160 static inline abi_long host_to_target_timex(abi_long target_addr,
7161                                             struct timex *host_tx)
7162 {
7163     struct target_timex *target_tx;
7164 
7165     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7166         return -TARGET_EFAULT;
7167     }
7168 
7169     __put_user(host_tx->modes, &target_tx->modes);
7170     __put_user(host_tx->offset, &target_tx->offset);
7171     __put_user(host_tx->freq, &target_tx->freq);
7172     __put_user(host_tx->maxerror, &target_tx->maxerror);
7173     __put_user(host_tx->esterror, &target_tx->esterror);
7174     __put_user(host_tx->status, &target_tx->status);
7175     __put_user(host_tx->constant, &target_tx->constant);
7176     __put_user(host_tx->precision, &target_tx->precision);
7177     __put_user(host_tx->tolerance, &target_tx->tolerance);
7178     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7179     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7180     __put_user(host_tx->tick, &target_tx->tick);
7181     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7182     __put_user(host_tx->jitter, &target_tx->jitter);
7183     __put_user(host_tx->shift, &target_tx->shift);
7184     __put_user(host_tx->stabil, &target_tx->stabil);
7185     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7186     __put_user(host_tx->calcnt, &target_tx->calcnt);
7187     __put_user(host_tx->errcnt, &target_tx->errcnt);
7188     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7189     __put_user(host_tx->tai, &target_tx->tai);
7190 
7191     unlock_user_struct(target_tx, target_addr, 1);
7192     return 0;
7193 }
7194 #endif
7195 
7196 
7197 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7198 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7199                                               abi_long target_addr)
7200 {
7201     struct target__kernel_timex *target_tx;
7202 
7203     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7204                                  offsetof(struct target__kernel_timex,
7205                                           time))) {
7206         return -TARGET_EFAULT;
7207     }
7208 
7209     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7210         return -TARGET_EFAULT;
7211     }
7212 
7213     __get_user(host_tx->modes, &target_tx->modes);
7214     __get_user(host_tx->offset, &target_tx->offset);
7215     __get_user(host_tx->freq, &target_tx->freq);
7216     __get_user(host_tx->maxerror, &target_tx->maxerror);
7217     __get_user(host_tx->esterror, &target_tx->esterror);
7218     __get_user(host_tx->status, &target_tx->status);
7219     __get_user(host_tx->constant, &target_tx->constant);
7220     __get_user(host_tx->precision, &target_tx->precision);
7221     __get_user(host_tx->tolerance, &target_tx->tolerance);
7222     __get_user(host_tx->tick, &target_tx->tick);
7223     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7224     __get_user(host_tx->jitter, &target_tx->jitter);
7225     __get_user(host_tx->shift, &target_tx->shift);
7226     __get_user(host_tx->stabil, &target_tx->stabil);
7227     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7228     __get_user(host_tx->calcnt, &target_tx->calcnt);
7229     __get_user(host_tx->errcnt, &target_tx->errcnt);
7230     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7231     __get_user(host_tx->tai, &target_tx->tai);
7232 
7233     unlock_user_struct(target_tx, target_addr, 0);
7234     return 0;
7235 }
7236 
7237 static inline abi_long host_to_target_timex64(abi_long target_addr,
7238                                               struct timex *host_tx)
7239 {
7240     struct target__kernel_timex *target_tx;
7241 
7242    if (copy_to_user_timeval64(target_addr +
7243                               offsetof(struct target__kernel_timex, time),
7244                               &host_tx->time)) {
7245         return -TARGET_EFAULT;
7246     }
7247 
7248     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7249         return -TARGET_EFAULT;
7250     }
7251 
7252     __put_user(host_tx->modes, &target_tx->modes);
7253     __put_user(host_tx->offset, &target_tx->offset);
7254     __put_user(host_tx->freq, &target_tx->freq);
7255     __put_user(host_tx->maxerror, &target_tx->maxerror);
7256     __put_user(host_tx->esterror, &target_tx->esterror);
7257     __put_user(host_tx->status, &target_tx->status);
7258     __put_user(host_tx->constant, &target_tx->constant);
7259     __put_user(host_tx->precision, &target_tx->precision);
7260     __put_user(host_tx->tolerance, &target_tx->tolerance);
7261     __put_user(host_tx->tick, &target_tx->tick);
7262     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7263     __put_user(host_tx->jitter, &target_tx->jitter);
7264     __put_user(host_tx->shift, &target_tx->shift);
7265     __put_user(host_tx->stabil, &target_tx->stabil);
7266     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7267     __put_user(host_tx->calcnt, &target_tx->calcnt);
7268     __put_user(host_tx->errcnt, &target_tx->errcnt);
7269     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7270     __put_user(host_tx->tai, &target_tx->tai);
7271 
7272     unlock_user_struct(target_tx, target_addr, 1);
7273     return 0;
7274 }
7275 #endif
7276 
7277 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7278 #define sigev_notify_thread_id _sigev_un._tid
7279 #endif
7280 
7281 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7282                                                abi_ulong target_addr)
7283 {
7284     struct target_sigevent *target_sevp;
7285 
7286     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7287         return -TARGET_EFAULT;
7288     }
7289 
7290     /* This union is awkward on 64 bit systems because it has a 32 bit
7291      * integer and a pointer in it; we follow the conversion approach
7292      * used for handling sigval types in signal.c so the guest should get
7293      * the correct value back even if we did a 64 bit byteswap and it's
7294      * using the 32 bit integer.
7295      */
7296     host_sevp->sigev_value.sival_ptr =
7297         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7298     host_sevp->sigev_signo =
7299         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7300     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7301     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7302 
7303     unlock_user_struct(target_sevp, target_addr, 1);
7304     return 0;
7305 }
7306 
7307 #if defined(TARGET_NR_mlockall)
7308 static inline int target_to_host_mlockall_arg(int arg)
7309 {
7310     int result = 0;
7311 
7312     if (arg & TARGET_MCL_CURRENT) {
7313         result |= MCL_CURRENT;
7314     }
7315     if (arg & TARGET_MCL_FUTURE) {
7316         result |= MCL_FUTURE;
7317     }
7318 #ifdef MCL_ONFAULT
7319     if (arg & TARGET_MCL_ONFAULT) {
7320         result |= MCL_ONFAULT;
7321     }
7322 #endif
7323 
7324     return result;
7325 }
7326 #endif
7327 
7328 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7329      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7330      defined(TARGET_NR_newfstatat))
7331 static inline abi_long host_to_target_stat64(void *cpu_env,
7332                                              abi_ulong target_addr,
7333                                              struct stat *host_st)
7334 {
7335 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7336     if (((CPUARMState *)cpu_env)->eabi) {
7337         struct target_eabi_stat64 *target_st;
7338 
7339         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7340             return -TARGET_EFAULT;
7341         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7342         __put_user(host_st->st_dev, &target_st->st_dev);
7343         __put_user(host_st->st_ino, &target_st->st_ino);
7344 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7345         __put_user(host_st->st_ino, &target_st->__st_ino);
7346 #endif
7347         __put_user(host_st->st_mode, &target_st->st_mode);
7348         __put_user(host_st->st_nlink, &target_st->st_nlink);
7349         __put_user(host_st->st_uid, &target_st->st_uid);
7350         __put_user(host_st->st_gid, &target_st->st_gid);
7351         __put_user(host_st->st_rdev, &target_st->st_rdev);
7352         __put_user(host_st->st_size, &target_st->st_size);
7353         __put_user(host_st->st_blksize, &target_st->st_blksize);
7354         __put_user(host_st->st_blocks, &target_st->st_blocks);
7355         __put_user(host_st->st_atime, &target_st->target_st_atime);
7356         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7357         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7358 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7359         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7360         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7361         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7362 #endif
7363         unlock_user_struct(target_st, target_addr, 1);
7364     } else
7365 #endif
7366     {
7367 #if defined(TARGET_HAS_STRUCT_STAT64)
7368         struct target_stat64 *target_st;
7369 #else
7370         struct target_stat *target_st;
7371 #endif
7372 
7373         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7374             return -TARGET_EFAULT;
7375         memset(target_st, 0, sizeof(*target_st));
7376         __put_user(host_st->st_dev, &target_st->st_dev);
7377         __put_user(host_st->st_ino, &target_st->st_ino);
7378 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7379         __put_user(host_st->st_ino, &target_st->__st_ino);
7380 #endif
7381         __put_user(host_st->st_mode, &target_st->st_mode);
7382         __put_user(host_st->st_nlink, &target_st->st_nlink);
7383         __put_user(host_st->st_uid, &target_st->st_uid);
7384         __put_user(host_st->st_gid, &target_st->st_gid);
7385         __put_user(host_st->st_rdev, &target_st->st_rdev);
7386         /* XXX: better use of kernel struct */
7387         __put_user(host_st->st_size, &target_st->st_size);
7388         __put_user(host_st->st_blksize, &target_st->st_blksize);
7389         __put_user(host_st->st_blocks, &target_st->st_blocks);
7390         __put_user(host_st->st_atime, &target_st->target_st_atime);
7391         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7392         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7393 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7394         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7395         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7396         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7397 #endif
7398         unlock_user_struct(target_st, target_addr, 1);
7399     }
7400 
7401     return 0;
7402 }
7403 #endif
7404 
7405 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7406 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7407                                             abi_ulong target_addr)
7408 {
7409     struct target_statx *target_stx;
7410 
7411     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7412         return -TARGET_EFAULT;
7413     }
7414     memset(target_stx, 0, sizeof(*target_stx));
7415 
7416     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7417     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7418     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7419     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7420     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7421     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7422     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7423     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7424     __put_user(host_stx->stx_size, &target_stx->stx_size);
7425     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7426     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7427     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7428     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7429     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7430     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7431     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7432     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7433     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7434     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7435     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7436     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7437     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7438     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7439 
7440     unlock_user_struct(target_stx, target_addr, 1);
7441 
7442     return 0;
7443 }
7444 #endif
7445 
7446 static int do_sys_futex(int *uaddr, int op, int val,
7447                          const struct timespec *timeout, int *uaddr2,
7448                          int val3)
7449 {
7450 #if HOST_LONG_BITS == 64
7451 #if defined(__NR_futex)
7452     /* always a 64-bit time_t, it doesn't define _time64 version  */
7453     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7454 
7455 #endif
7456 #else /* HOST_LONG_BITS == 64 */
7457 #if defined(__NR_futex_time64)
7458     if (sizeof(timeout->tv_sec) == 8) {
7459         /* _time64 function on 32bit arch */
7460         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7461     }
7462 #endif
7463 #if defined(__NR_futex)
7464     /* old function on 32bit arch */
7465     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7466 #endif
7467 #endif /* HOST_LONG_BITS == 64 */
7468     g_assert_not_reached();
7469 }
7470 
7471 static int do_safe_futex(int *uaddr, int op, int val,
7472                          const struct timespec *timeout, int *uaddr2,
7473                          int val3)
7474 {
7475 #if HOST_LONG_BITS == 64
7476 #if defined(__NR_futex)
7477     /* always a 64-bit time_t, it doesn't define _time64 version  */
7478     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7479 #endif
7480 #else /* HOST_LONG_BITS == 64 */
7481 #if defined(__NR_futex_time64)
7482     if (sizeof(timeout->tv_sec) == 8) {
7483         /* _time64 function on 32bit arch */
7484         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7485                                            val3));
7486     }
7487 #endif
7488 #if defined(__NR_futex)
7489     /* old function on 32bit arch */
7490     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7491 #endif
7492 #endif /* HOST_LONG_BITS == 64 */
7493     return -TARGET_ENOSYS;
7494 }
7495 
7496 /* ??? Using host futex calls even when target atomic operations
7497    are not really atomic probably breaks things.  However implementing
7498    futexes locally would make futexes shared between multiple processes
7499    tricky.  However they're probably useless because guest atomic
7500    operations won't work either.  */
7501 #if defined(TARGET_NR_futex)
7502 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7503                     target_ulong timeout, target_ulong uaddr2, int val3)
7504 {
7505     struct timespec ts, *pts;
7506     int base_op;
7507 
7508     /* ??? We assume FUTEX_* constants are the same on both host
7509        and target.  */
7510 #ifdef FUTEX_CMD_MASK
7511     base_op = op & FUTEX_CMD_MASK;
7512 #else
7513     base_op = op;
7514 #endif
7515     switch (base_op) {
7516     case FUTEX_WAIT:
7517     case FUTEX_WAIT_BITSET:
7518         if (timeout) {
7519             pts = &ts;
7520             target_to_host_timespec(pts, timeout);
7521         } else {
7522             pts = NULL;
7523         }
7524         return do_safe_futex(g2h(cpu, uaddr),
7525                              op, tswap32(val), pts, NULL, val3);
7526     case FUTEX_WAKE:
7527         return do_safe_futex(g2h(cpu, uaddr),
7528                              op, val, NULL, NULL, 0);
7529     case FUTEX_FD:
7530         return do_safe_futex(g2h(cpu, uaddr),
7531                              op, val, NULL, NULL, 0);
7532     case FUTEX_REQUEUE:
7533     case FUTEX_CMP_REQUEUE:
7534     case FUTEX_WAKE_OP:
7535         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7536            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7537            But the prototype takes a `struct timespec *'; insert casts
7538            to satisfy the compiler.  We do not need to tswap TIMEOUT
7539            since it's not compared to guest memory.  */
7540         pts = (struct timespec *)(uintptr_t) timeout;
7541         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7542                              (base_op == FUTEX_CMP_REQUEUE
7543                               ? tswap32(val3) : val3));
7544     default:
7545         return -TARGET_ENOSYS;
7546     }
7547 }
7548 #endif
7549 
7550 #if defined(TARGET_NR_futex_time64)
7551 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7552                            int val, target_ulong timeout,
7553                            target_ulong uaddr2, int val3)
7554 {
7555     struct timespec ts, *pts;
7556     int base_op;
7557 
7558     /* ??? We assume FUTEX_* constants are the same on both host
7559        and target.  */
7560 #ifdef FUTEX_CMD_MASK
7561     base_op = op & FUTEX_CMD_MASK;
7562 #else
7563     base_op = op;
7564 #endif
7565     switch (base_op) {
7566     case FUTEX_WAIT:
7567     case FUTEX_WAIT_BITSET:
7568         if (timeout) {
7569             pts = &ts;
7570             if (target_to_host_timespec64(pts, timeout)) {
7571                 return -TARGET_EFAULT;
7572             }
7573         } else {
7574             pts = NULL;
7575         }
7576         return do_safe_futex(g2h(cpu, uaddr), op,
7577                              tswap32(val), pts, NULL, val3);
7578     case FUTEX_WAKE:
7579         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7580     case FUTEX_FD:
7581         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7582     case FUTEX_REQUEUE:
7583     case FUTEX_CMP_REQUEUE:
7584     case FUTEX_WAKE_OP:
7585         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7586            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7587            But the prototype takes a `struct timespec *'; insert casts
7588            to satisfy the compiler.  We do not need to tswap TIMEOUT
7589            since it's not compared to guest memory.  */
7590         pts = (struct timespec *)(uintptr_t) timeout;
7591         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7592                              (base_op == FUTEX_CMP_REQUEUE
7593                               ? tswap32(val3) : val3));
7594     default:
7595         return -TARGET_ENOSYS;
7596     }
7597 }
7598 #endif
7599 
7600 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7601 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7602                                      abi_long handle, abi_long mount_id,
7603                                      abi_long flags)
7604 {
7605     struct file_handle *target_fh;
7606     struct file_handle *fh;
7607     int mid = 0;
7608     abi_long ret;
7609     char *name;
7610     unsigned int size, total_size;
7611 
7612     if (get_user_s32(size, handle)) {
7613         return -TARGET_EFAULT;
7614     }
7615 
7616     name = lock_user_string(pathname);
7617     if (!name) {
7618         return -TARGET_EFAULT;
7619     }
7620 
7621     total_size = sizeof(struct file_handle) + size;
7622     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7623     if (!target_fh) {
7624         unlock_user(name, pathname, 0);
7625         return -TARGET_EFAULT;
7626     }
7627 
7628     fh = g_malloc0(total_size);
7629     fh->handle_bytes = size;
7630 
7631     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7632     unlock_user(name, pathname, 0);
7633 
7634     /* man name_to_handle_at(2):
7635      * Other than the use of the handle_bytes field, the caller should treat
7636      * the file_handle structure as an opaque data type
7637      */
7638 
7639     memcpy(target_fh, fh, total_size);
7640     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7641     target_fh->handle_type = tswap32(fh->handle_type);
7642     g_free(fh);
7643     unlock_user(target_fh, handle, total_size);
7644 
7645     if (put_user_s32(mid, mount_id)) {
7646         return -TARGET_EFAULT;
7647     }
7648 
7649     return ret;
7650 
7651 }
7652 #endif
7653 
7654 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7655 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7656                                      abi_long flags)
7657 {
7658     struct file_handle *target_fh;
7659     struct file_handle *fh;
7660     unsigned int size, total_size;
7661     abi_long ret;
7662 
7663     if (get_user_s32(size, handle)) {
7664         return -TARGET_EFAULT;
7665     }
7666 
7667     total_size = sizeof(struct file_handle) + size;
7668     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7669     if (!target_fh) {
7670         return -TARGET_EFAULT;
7671     }
7672 
7673     fh = g_memdup(target_fh, total_size);
7674     fh->handle_bytes = size;
7675     fh->handle_type = tswap32(target_fh->handle_type);
7676 
7677     ret = get_errno(open_by_handle_at(mount_fd, fh,
7678                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7679 
7680     g_free(fh);
7681 
7682     unlock_user(target_fh, handle, total_size);
7683 
7684     return ret;
7685 }
7686 #endif
7687 
7688 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7689 
7690 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7691 {
7692     int host_flags;
7693     target_sigset_t *target_mask;
7694     sigset_t host_mask;
7695     abi_long ret;
7696 
7697     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7698         return -TARGET_EINVAL;
7699     }
7700     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7701         return -TARGET_EFAULT;
7702     }
7703 
7704     target_to_host_sigset(&host_mask, target_mask);
7705 
7706     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7707 
7708     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7709     if (ret >= 0) {
7710         fd_trans_register(ret, &target_signalfd_trans);
7711     }
7712 
7713     unlock_user_struct(target_mask, mask, 0);
7714 
7715     return ret;
7716 }
7717 #endif
7718 
7719 /* Map host to target signal numbers for the wait family of syscalls.
7720    Assume all other status bits are the same.  */
7721 int host_to_target_waitstatus(int status)
7722 {
7723     if (WIFSIGNALED(status)) {
7724         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7725     }
7726     if (WIFSTOPPED(status)) {
7727         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7728                | (status & 0xff);
7729     }
7730     return status;
7731 }
7732 
7733 static int open_self_cmdline(void *cpu_env, int fd)
7734 {
7735     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7736     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7737     int i;
7738 
7739     for (i = 0; i < bprm->argc; i++) {
7740         size_t len = strlen(bprm->argv[i]) + 1;
7741 
7742         if (write(fd, bprm->argv[i], len) != len) {
7743             return -1;
7744         }
7745     }
7746 
7747     return 0;
7748 }
7749 
7750 static int open_self_maps(void *cpu_env, int fd)
7751 {
7752     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7753     TaskState *ts = cpu->opaque;
7754     GSList *map_info = read_self_maps();
7755     GSList *s;
7756     int count;
7757 
7758     for (s = map_info; s; s = g_slist_next(s)) {
7759         MapInfo *e = (MapInfo *) s->data;
7760 
7761         if (h2g_valid(e->start)) {
7762             unsigned long min = e->start;
7763             unsigned long max = e->end;
7764             int flags = page_get_flags(h2g(min));
7765             const char *path;
7766 
7767             max = h2g_valid(max - 1) ?
7768                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7769 
7770             if (page_check_range(h2g(min), max - min, flags) == -1) {
7771                 continue;
7772             }
7773 
7774             if (h2g(min) == ts->info->stack_limit) {
7775                 path = "[stack]";
7776             } else {
7777                 path = e->path;
7778             }
7779 
7780             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7781                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7782                             h2g(min), h2g(max - 1) + 1,
7783                             (flags & PAGE_READ) ? 'r' : '-',
7784                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7785                             (flags & PAGE_EXEC) ? 'x' : '-',
7786                             e->is_priv ? 'p' : '-',
7787                             (uint64_t) e->offset, e->dev, e->inode);
7788             if (path) {
7789                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7790             } else {
7791                 dprintf(fd, "\n");
7792             }
7793         }
7794     }
7795 
7796     free_self_maps(map_info);
7797 
7798 #ifdef TARGET_VSYSCALL_PAGE
7799     /*
7800      * We only support execution from the vsyscall page.
7801      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7802      */
7803     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7804                     " --xp 00000000 00:00 0",
7805                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7806     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7807 #endif
7808 
7809     return 0;
7810 }
7811 
7812 static int open_self_stat(void *cpu_env, int fd)
7813 {
7814     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7815     TaskState *ts = cpu->opaque;
7816     g_autoptr(GString) buf = g_string_new(NULL);
7817     int i;
7818 
7819     for (i = 0; i < 44; i++) {
7820         if (i == 0) {
7821             /* pid */
7822             g_string_printf(buf, FMT_pid " ", getpid());
7823         } else if (i == 1) {
7824             /* app name */
7825             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7826             bin = bin ? bin + 1 : ts->bprm->argv[0];
7827             g_string_printf(buf, "(%.15s) ", bin);
7828         } else if (i == 3) {
7829             /* ppid */
7830             g_string_printf(buf, FMT_pid " ", getppid());
7831         } else if (i == 27) {
7832             /* stack bottom */
7833             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7834         } else {
7835             /* for the rest, there is MasterCard */
7836             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7837         }
7838 
7839         if (write(fd, buf->str, buf->len) != buf->len) {
7840             return -1;
7841         }
7842     }
7843 
7844     return 0;
7845 }
7846 
7847 static int open_self_auxv(void *cpu_env, int fd)
7848 {
7849     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7850     TaskState *ts = cpu->opaque;
7851     abi_ulong auxv = ts->info->saved_auxv;
7852     abi_ulong len = ts->info->auxv_len;
7853     char *ptr;
7854 
7855     /*
7856      * Auxiliary vector is stored in target process stack.
7857      * read in whole auxv vector and copy it to file
7858      */
7859     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7860     if (ptr != NULL) {
7861         while (len > 0) {
7862             ssize_t r;
7863             r = write(fd, ptr, len);
7864             if (r <= 0) {
7865                 break;
7866             }
7867             len -= r;
7868             ptr += r;
7869         }
7870         lseek(fd, 0, SEEK_SET);
7871         unlock_user(ptr, auxv, len);
7872     }
7873 
7874     return 0;
7875 }
7876 
7877 static int is_proc_myself(const char *filename, const char *entry)
7878 {
7879     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7880         filename += strlen("/proc/");
7881         if (!strncmp(filename, "self/", strlen("self/"))) {
7882             filename += strlen("self/");
7883         } else if (*filename >= '1' && *filename <= '9') {
7884             char myself[80];
7885             snprintf(myself, sizeof(myself), "%d/", getpid());
7886             if (!strncmp(filename, myself, strlen(myself))) {
7887                 filename += strlen(myself);
7888             } else {
7889                 return 0;
7890             }
7891         } else {
7892             return 0;
7893         }
7894         if (!strcmp(filename, entry)) {
7895             return 1;
7896         }
7897     }
7898     return 0;
7899 }
7900 
7901 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7902     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7903 static int is_proc(const char *filename, const char *entry)
7904 {
7905     return strcmp(filename, entry) == 0;
7906 }
7907 #endif
7908 
7909 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7910 static int open_net_route(void *cpu_env, int fd)
7911 {
7912     FILE *fp;
7913     char *line = NULL;
7914     size_t len = 0;
7915     ssize_t read;
7916 
7917     fp = fopen("/proc/net/route", "r");
7918     if (fp == NULL) {
7919         return -1;
7920     }
7921 
7922     /* read header */
7923 
7924     read = getline(&line, &len, fp);
7925     dprintf(fd, "%s", line);
7926 
7927     /* read routes */
7928 
7929     while ((read = getline(&line, &len, fp)) != -1) {
7930         char iface[16];
7931         uint32_t dest, gw, mask;
7932         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7933         int fields;
7934 
7935         fields = sscanf(line,
7936                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7937                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7938                         &mask, &mtu, &window, &irtt);
7939         if (fields != 11) {
7940             continue;
7941         }
7942         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7943                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7944                 metric, tswap32(mask), mtu, window, irtt);
7945     }
7946 
7947     free(line);
7948     fclose(fp);
7949 
7950     return 0;
7951 }
7952 #endif
7953 
7954 #if defined(TARGET_SPARC)
7955 static int open_cpuinfo(void *cpu_env, int fd)
7956 {
7957     dprintf(fd, "type\t\t: sun4u\n");
7958     return 0;
7959 }
7960 #endif
7961 
7962 #if defined(TARGET_HPPA)
7963 static int open_cpuinfo(void *cpu_env, int fd)
7964 {
7965     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7966     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7967     dprintf(fd, "capabilities\t: os32\n");
7968     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7969     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7970     return 0;
7971 }
7972 #endif
7973 
7974 #if defined(TARGET_M68K)
7975 static int open_hardware(void *cpu_env, int fd)
7976 {
7977     dprintf(fd, "Model:\t\tqemu-m68k\n");
7978     return 0;
7979 }
7980 #endif
7981 
7982 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7983 {
7984     struct fake_open {
7985         const char *filename;
7986         int (*fill)(void *cpu_env, int fd);
7987         int (*cmp)(const char *s1, const char *s2);
7988     };
7989     const struct fake_open *fake_open;
7990     static const struct fake_open fakes[] = {
7991         { "maps", open_self_maps, is_proc_myself },
7992         { "stat", open_self_stat, is_proc_myself },
7993         { "auxv", open_self_auxv, is_proc_myself },
7994         { "cmdline", open_self_cmdline, is_proc_myself },
7995 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7996         { "/proc/net/route", open_net_route, is_proc },
7997 #endif
7998 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7999         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8000 #endif
8001 #if defined(TARGET_M68K)
8002         { "/proc/hardware", open_hardware, is_proc },
8003 #endif
8004         { NULL, NULL, NULL }
8005     };
8006 
8007     if (is_proc_myself(pathname, "exe")) {
8008         int execfd = qemu_getauxval(AT_EXECFD);
8009         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8010     }
8011 
8012     for (fake_open = fakes; fake_open->filename; fake_open++) {
8013         if (fake_open->cmp(pathname, fake_open->filename)) {
8014             break;
8015         }
8016     }
8017 
8018     if (fake_open->filename) {
8019         const char *tmpdir;
8020         char filename[PATH_MAX];
8021         int fd, r;
8022 
8023         /* create temporary file to map stat to */
8024         tmpdir = getenv("TMPDIR");
8025         if (!tmpdir)
8026             tmpdir = "/tmp";
8027         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8028         fd = mkstemp(filename);
8029         if (fd < 0) {
8030             return fd;
8031         }
8032         unlink(filename);
8033 
8034         if ((r = fake_open->fill(cpu_env, fd))) {
8035             int e = errno;
8036             close(fd);
8037             errno = e;
8038             return r;
8039         }
8040         lseek(fd, 0, SEEK_SET);
8041 
8042         return fd;
8043     }
8044 
8045     return safe_openat(dirfd, path(pathname), flags, mode);
8046 }
8047 
8048 #define TIMER_MAGIC 0x0caf0000
8049 #define TIMER_MAGIC_MASK 0xffff0000
8050 
8051 /* Convert QEMU provided timer ID back to internal 16bit index format */
8052 static target_timer_t get_timer_id(abi_long arg)
8053 {
8054     target_timer_t timerid = arg;
8055 
8056     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8057         return -TARGET_EINVAL;
8058     }
8059 
8060     timerid &= 0xffff;
8061 
8062     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8063         return -TARGET_EINVAL;
8064     }
8065 
8066     return timerid;
8067 }
8068 
8069 static int target_to_host_cpu_mask(unsigned long *host_mask,
8070                                    size_t host_size,
8071                                    abi_ulong target_addr,
8072                                    size_t target_size)
8073 {
8074     unsigned target_bits = sizeof(abi_ulong) * 8;
8075     unsigned host_bits = sizeof(*host_mask) * 8;
8076     abi_ulong *target_mask;
8077     unsigned i, j;
8078 
8079     assert(host_size >= target_size);
8080 
8081     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8082     if (!target_mask) {
8083         return -TARGET_EFAULT;
8084     }
8085     memset(host_mask, 0, host_size);
8086 
8087     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8088         unsigned bit = i * target_bits;
8089         abi_ulong val;
8090 
8091         __get_user(val, &target_mask[i]);
8092         for (j = 0; j < target_bits; j++, bit++) {
8093             if (val & (1UL << j)) {
8094                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8095             }
8096         }
8097     }
8098 
8099     unlock_user(target_mask, target_addr, 0);
8100     return 0;
8101 }
8102 
8103 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8104                                    size_t host_size,
8105                                    abi_ulong target_addr,
8106                                    size_t target_size)
8107 {
8108     unsigned target_bits = sizeof(abi_ulong) * 8;
8109     unsigned host_bits = sizeof(*host_mask) * 8;
8110     abi_ulong *target_mask;
8111     unsigned i, j;
8112 
8113     assert(host_size >= target_size);
8114 
8115     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8116     if (!target_mask) {
8117         return -TARGET_EFAULT;
8118     }
8119 
8120     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8121         unsigned bit = i * target_bits;
8122         abi_ulong val = 0;
8123 
8124         for (j = 0; j < target_bits; j++, bit++) {
8125             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8126                 val |= 1UL << j;
8127             }
8128         }
8129         __put_user(val, &target_mask[i]);
8130     }
8131 
8132     unlock_user(target_mask, target_addr, target_size);
8133     return 0;
8134 }
8135 
8136 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8137 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8138 #endif
8139 
8140 /* This is an internal helper for do_syscall so that it is easier
8141  * to have a single return point, so that actions, such as logging
8142  * of syscall results, can be performed.
8143  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8144  */
8145 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8146                             abi_long arg2, abi_long arg3, abi_long arg4,
8147                             abi_long arg5, abi_long arg6, abi_long arg7,
8148                             abi_long arg8)
8149 {
8150     CPUState *cpu = env_cpu(cpu_env);
8151     abi_long ret;
8152 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8153     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8154     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8155     || defined(TARGET_NR_statx)
8156     struct stat st;
8157 #endif
8158 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8159     || defined(TARGET_NR_fstatfs)
8160     struct statfs stfs;
8161 #endif
8162     void *p;
8163 
8164     switch(num) {
8165     case TARGET_NR_exit:
8166         /* In old applications this may be used to implement _exit(2).
8167            However in threaded applications it is used for thread termination,
8168            and _exit_group is used for application termination.
8169            Do thread termination if we have more then one thread.  */
8170 
8171         if (block_signals()) {
8172             return -TARGET_ERESTARTSYS;
8173         }
8174 
8175         pthread_mutex_lock(&clone_lock);
8176 
8177         if (CPU_NEXT(first_cpu)) {
8178             TaskState *ts = cpu->opaque;
8179 
8180             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8181             object_unref(OBJECT(cpu));
8182             /*
8183              * At this point the CPU should be unrealized and removed
8184              * from cpu lists. We can clean-up the rest of the thread
8185              * data without the lock held.
8186              */
8187 
8188             pthread_mutex_unlock(&clone_lock);
8189 
8190             if (ts->child_tidptr) {
8191                 put_user_u32(0, ts->child_tidptr);
8192                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8193                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8194             }
8195             thread_cpu = NULL;
8196             g_free(ts);
8197             rcu_unregister_thread();
8198             pthread_exit(NULL);
8199         }
8200 
8201         pthread_mutex_unlock(&clone_lock);
8202         preexit_cleanup(cpu_env, arg1);
8203         _exit(arg1);
8204         return 0; /* avoid warning */
8205     case TARGET_NR_read:
8206         if (arg2 == 0 && arg3 == 0) {
8207             return get_errno(safe_read(arg1, 0, 0));
8208         } else {
8209             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8210                 return -TARGET_EFAULT;
8211             ret = get_errno(safe_read(arg1, p, arg3));
8212             if (ret >= 0 &&
8213                 fd_trans_host_to_target_data(arg1)) {
8214                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8215             }
8216             unlock_user(p, arg2, ret);
8217         }
8218         return ret;
8219     case TARGET_NR_write:
8220         if (arg2 == 0 && arg3 == 0) {
8221             return get_errno(safe_write(arg1, 0, 0));
8222         }
8223         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8224             return -TARGET_EFAULT;
8225         if (fd_trans_target_to_host_data(arg1)) {
8226             void *copy = g_malloc(arg3);
8227             memcpy(copy, p, arg3);
8228             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8229             if (ret >= 0) {
8230                 ret = get_errno(safe_write(arg1, copy, ret));
8231             }
8232             g_free(copy);
8233         } else {
8234             ret = get_errno(safe_write(arg1, p, arg3));
8235         }
8236         unlock_user(p, arg2, 0);
8237         return ret;
8238 
8239 #ifdef TARGET_NR_open
8240     case TARGET_NR_open:
8241         if (!(p = lock_user_string(arg1)))
8242             return -TARGET_EFAULT;
8243         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8244                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8245                                   arg3));
8246         fd_trans_unregister(ret);
8247         unlock_user(p, arg1, 0);
8248         return ret;
8249 #endif
8250     case TARGET_NR_openat:
8251         if (!(p = lock_user_string(arg2)))
8252             return -TARGET_EFAULT;
8253         ret = get_errno(do_openat(cpu_env, arg1, p,
8254                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8255                                   arg4));
8256         fd_trans_unregister(ret);
8257         unlock_user(p, arg2, 0);
8258         return ret;
8259 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8260     case TARGET_NR_name_to_handle_at:
8261         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8262         return ret;
8263 #endif
8264 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8265     case TARGET_NR_open_by_handle_at:
8266         ret = do_open_by_handle_at(arg1, arg2, arg3);
8267         fd_trans_unregister(ret);
8268         return ret;
8269 #endif
8270     case TARGET_NR_close:
8271         fd_trans_unregister(arg1);
8272         return get_errno(close(arg1));
8273 
8274     case TARGET_NR_brk:
8275         return do_brk(arg1);
8276 #ifdef TARGET_NR_fork
8277     case TARGET_NR_fork:
8278         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8279 #endif
8280 #ifdef TARGET_NR_waitpid
8281     case TARGET_NR_waitpid:
8282         {
8283             int status;
8284             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8285             if (!is_error(ret) && arg2 && ret
8286                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8287                 return -TARGET_EFAULT;
8288         }
8289         return ret;
8290 #endif
8291 #ifdef TARGET_NR_waitid
8292     case TARGET_NR_waitid:
8293         {
8294             siginfo_t info;
8295             info.si_pid = 0;
8296             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8297             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8298                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8299                     return -TARGET_EFAULT;
8300                 host_to_target_siginfo(p, &info);
8301                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8302             }
8303         }
8304         return ret;
8305 #endif
8306 #ifdef TARGET_NR_creat /* not on alpha */
8307     case TARGET_NR_creat:
8308         if (!(p = lock_user_string(arg1)))
8309             return -TARGET_EFAULT;
8310         ret = get_errno(creat(p, arg2));
8311         fd_trans_unregister(ret);
8312         unlock_user(p, arg1, 0);
8313         return ret;
8314 #endif
8315 #ifdef TARGET_NR_link
8316     case TARGET_NR_link:
8317         {
8318             void * p2;
8319             p = lock_user_string(arg1);
8320             p2 = lock_user_string(arg2);
8321             if (!p || !p2)
8322                 ret = -TARGET_EFAULT;
8323             else
8324                 ret = get_errno(link(p, p2));
8325             unlock_user(p2, arg2, 0);
8326             unlock_user(p, arg1, 0);
8327         }
8328         return ret;
8329 #endif
8330 #if defined(TARGET_NR_linkat)
8331     case TARGET_NR_linkat:
8332         {
8333             void * p2 = NULL;
8334             if (!arg2 || !arg4)
8335                 return -TARGET_EFAULT;
8336             p  = lock_user_string(arg2);
8337             p2 = lock_user_string(arg4);
8338             if (!p || !p2)
8339                 ret = -TARGET_EFAULT;
8340             else
8341                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8342             unlock_user(p, arg2, 0);
8343             unlock_user(p2, arg4, 0);
8344         }
8345         return ret;
8346 #endif
8347 #ifdef TARGET_NR_unlink
8348     case TARGET_NR_unlink:
8349         if (!(p = lock_user_string(arg1)))
8350             return -TARGET_EFAULT;
8351         ret = get_errno(unlink(p));
8352         unlock_user(p, arg1, 0);
8353         return ret;
8354 #endif
8355 #if defined(TARGET_NR_unlinkat)
8356     case TARGET_NR_unlinkat:
8357         if (!(p = lock_user_string(arg2)))
8358             return -TARGET_EFAULT;
8359         ret = get_errno(unlinkat(arg1, p, arg3));
8360         unlock_user(p, arg2, 0);
8361         return ret;
8362 #endif
8363     case TARGET_NR_execve:
8364         {
8365             char **argp, **envp;
8366             int argc, envc;
8367             abi_ulong gp;
8368             abi_ulong guest_argp;
8369             abi_ulong guest_envp;
8370             abi_ulong addr;
8371             char **q;
8372 
8373             argc = 0;
8374             guest_argp = arg2;
8375             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8376                 if (get_user_ual(addr, gp))
8377                     return -TARGET_EFAULT;
8378                 if (!addr)
8379                     break;
8380                 argc++;
8381             }
8382             envc = 0;
8383             guest_envp = arg3;
8384             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8385                 if (get_user_ual(addr, gp))
8386                     return -TARGET_EFAULT;
8387                 if (!addr)
8388                     break;
8389                 envc++;
8390             }
8391 
8392             argp = g_new0(char *, argc + 1);
8393             envp = g_new0(char *, envc + 1);
8394 
8395             for (gp = guest_argp, q = argp; gp;
8396                   gp += sizeof(abi_ulong), q++) {
8397                 if (get_user_ual(addr, gp))
8398                     goto execve_efault;
8399                 if (!addr)
8400                     break;
8401                 if (!(*q = lock_user_string(addr)))
8402                     goto execve_efault;
8403             }
8404             *q = NULL;
8405 
8406             for (gp = guest_envp, q = envp; gp;
8407                   gp += sizeof(abi_ulong), q++) {
8408                 if (get_user_ual(addr, gp))
8409                     goto execve_efault;
8410                 if (!addr)
8411                     break;
8412                 if (!(*q = lock_user_string(addr)))
8413                     goto execve_efault;
8414             }
8415             *q = NULL;
8416 
8417             if (!(p = lock_user_string(arg1)))
8418                 goto execve_efault;
8419             /* Although execve() is not an interruptible syscall it is
8420              * a special case where we must use the safe_syscall wrapper:
8421              * if we allow a signal to happen before we make the host
8422              * syscall then we will 'lose' it, because at the point of
8423              * execve the process leaves QEMU's control. So we use the
8424              * safe syscall wrapper to ensure that we either take the
8425              * signal as a guest signal, or else it does not happen
8426              * before the execve completes and makes it the other
8427              * program's problem.
8428              */
8429             ret = get_errno(safe_execve(p, argp, envp));
8430             unlock_user(p, arg1, 0);
8431 
8432             goto execve_end;
8433 
8434         execve_efault:
8435             ret = -TARGET_EFAULT;
8436 
8437         execve_end:
8438             for (gp = guest_argp, q = argp; *q;
8439                   gp += sizeof(abi_ulong), q++) {
8440                 if (get_user_ual(addr, gp)
8441                     || !addr)
8442                     break;
8443                 unlock_user(*q, addr, 0);
8444             }
8445             for (gp = guest_envp, q = envp; *q;
8446                   gp += sizeof(abi_ulong), q++) {
8447                 if (get_user_ual(addr, gp)
8448                     || !addr)
8449                     break;
8450                 unlock_user(*q, addr, 0);
8451             }
8452 
8453             g_free(argp);
8454             g_free(envp);
8455         }
8456         return ret;
8457     case TARGET_NR_chdir:
8458         if (!(p = lock_user_string(arg1)))
8459             return -TARGET_EFAULT;
8460         ret = get_errno(chdir(p));
8461         unlock_user(p, arg1, 0);
8462         return ret;
8463 #ifdef TARGET_NR_time
8464     case TARGET_NR_time:
8465         {
8466             time_t host_time;
8467             ret = get_errno(time(&host_time));
8468             if (!is_error(ret)
8469                 && arg1
8470                 && put_user_sal(host_time, arg1))
8471                 return -TARGET_EFAULT;
8472         }
8473         return ret;
8474 #endif
8475 #ifdef TARGET_NR_mknod
8476     case TARGET_NR_mknod:
8477         if (!(p = lock_user_string(arg1)))
8478             return -TARGET_EFAULT;
8479         ret = get_errno(mknod(p, arg2, arg3));
8480         unlock_user(p, arg1, 0);
8481         return ret;
8482 #endif
8483 #if defined(TARGET_NR_mknodat)
8484     case TARGET_NR_mknodat:
8485         if (!(p = lock_user_string(arg2)))
8486             return -TARGET_EFAULT;
8487         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8488         unlock_user(p, arg2, 0);
8489         return ret;
8490 #endif
8491 #ifdef TARGET_NR_chmod
8492     case TARGET_NR_chmod:
8493         if (!(p = lock_user_string(arg1)))
8494             return -TARGET_EFAULT;
8495         ret = get_errno(chmod(p, arg2));
8496         unlock_user(p, arg1, 0);
8497         return ret;
8498 #endif
8499 #ifdef TARGET_NR_lseek
8500     case TARGET_NR_lseek:
8501         return get_errno(lseek(arg1, arg2, arg3));
8502 #endif
8503 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8504     /* Alpha specific */
8505     case TARGET_NR_getxpid:
8506         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8507         return get_errno(getpid());
8508 #endif
8509 #ifdef TARGET_NR_getpid
8510     case TARGET_NR_getpid:
8511         return get_errno(getpid());
8512 #endif
8513     case TARGET_NR_mount:
8514         {
8515             /* need to look at the data field */
8516             void *p2, *p3;
8517 
8518             if (arg1) {
8519                 p = lock_user_string(arg1);
8520                 if (!p) {
8521                     return -TARGET_EFAULT;
8522                 }
8523             } else {
8524                 p = NULL;
8525             }
8526 
8527             p2 = lock_user_string(arg2);
8528             if (!p2) {
8529                 if (arg1) {
8530                     unlock_user(p, arg1, 0);
8531                 }
8532                 return -TARGET_EFAULT;
8533             }
8534 
8535             if (arg3) {
8536                 p3 = lock_user_string(arg3);
8537                 if (!p3) {
8538                     if (arg1) {
8539                         unlock_user(p, arg1, 0);
8540                     }
8541                     unlock_user(p2, arg2, 0);
8542                     return -TARGET_EFAULT;
8543                 }
8544             } else {
8545                 p3 = NULL;
8546             }
8547 
8548             /* FIXME - arg5 should be locked, but it isn't clear how to
8549              * do that since it's not guaranteed to be a NULL-terminated
8550              * string.
8551              */
8552             if (!arg5) {
8553                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8554             } else {
8555                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8556             }
8557             ret = get_errno(ret);
8558 
8559             if (arg1) {
8560                 unlock_user(p, arg1, 0);
8561             }
8562             unlock_user(p2, arg2, 0);
8563             if (arg3) {
8564                 unlock_user(p3, arg3, 0);
8565             }
8566         }
8567         return ret;
8568 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8569 #if defined(TARGET_NR_umount)
8570     case TARGET_NR_umount:
8571 #endif
8572 #if defined(TARGET_NR_oldumount)
8573     case TARGET_NR_oldumount:
8574 #endif
8575         if (!(p = lock_user_string(arg1)))
8576             return -TARGET_EFAULT;
8577         ret = get_errno(umount(p));
8578         unlock_user(p, arg1, 0);
8579         return ret;
8580 #endif
8581 #ifdef TARGET_NR_stime /* not on alpha */
8582     case TARGET_NR_stime:
8583         {
8584             struct timespec ts;
8585             ts.tv_nsec = 0;
8586             if (get_user_sal(ts.tv_sec, arg1)) {
8587                 return -TARGET_EFAULT;
8588             }
8589             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8590         }
8591 #endif
8592 #ifdef TARGET_NR_alarm /* not on alpha */
8593     case TARGET_NR_alarm:
8594         return alarm(arg1);
8595 #endif
8596 #ifdef TARGET_NR_pause /* not on alpha */
8597     case TARGET_NR_pause:
8598         if (!block_signals()) {
8599             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8600         }
8601         return -TARGET_EINTR;
8602 #endif
8603 #ifdef TARGET_NR_utime
8604     case TARGET_NR_utime:
8605         {
8606             struct utimbuf tbuf, *host_tbuf;
8607             struct target_utimbuf *target_tbuf;
8608             if (arg2) {
8609                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8610                     return -TARGET_EFAULT;
8611                 tbuf.actime = tswapal(target_tbuf->actime);
8612                 tbuf.modtime = tswapal(target_tbuf->modtime);
8613                 unlock_user_struct(target_tbuf, arg2, 0);
8614                 host_tbuf = &tbuf;
8615             } else {
8616                 host_tbuf = NULL;
8617             }
8618             if (!(p = lock_user_string(arg1)))
8619                 return -TARGET_EFAULT;
8620             ret = get_errno(utime(p, host_tbuf));
8621             unlock_user(p, arg1, 0);
8622         }
8623         return ret;
8624 #endif
8625 #ifdef TARGET_NR_utimes
8626     case TARGET_NR_utimes:
8627         {
8628             struct timeval *tvp, tv[2];
8629             if (arg2) {
8630                 if (copy_from_user_timeval(&tv[0], arg2)
8631                     || copy_from_user_timeval(&tv[1],
8632                                               arg2 + sizeof(struct target_timeval)))
8633                     return -TARGET_EFAULT;
8634                 tvp = tv;
8635             } else {
8636                 tvp = NULL;
8637             }
8638             if (!(p = lock_user_string(arg1)))
8639                 return -TARGET_EFAULT;
8640             ret = get_errno(utimes(p, tvp));
8641             unlock_user(p, arg1, 0);
8642         }
8643         return ret;
8644 #endif
8645 #if defined(TARGET_NR_futimesat)
8646     case TARGET_NR_futimesat:
8647         {
8648             struct timeval *tvp, tv[2];
8649             if (arg3) {
8650                 if (copy_from_user_timeval(&tv[0], arg3)
8651                     || copy_from_user_timeval(&tv[1],
8652                                               arg3 + sizeof(struct target_timeval)))
8653                     return -TARGET_EFAULT;
8654                 tvp = tv;
8655             } else {
8656                 tvp = NULL;
8657             }
8658             if (!(p = lock_user_string(arg2))) {
8659                 return -TARGET_EFAULT;
8660             }
8661             ret = get_errno(futimesat(arg1, path(p), tvp));
8662             unlock_user(p, arg2, 0);
8663         }
8664         return ret;
8665 #endif
8666 #ifdef TARGET_NR_access
8667     case TARGET_NR_access:
8668         if (!(p = lock_user_string(arg1))) {
8669             return -TARGET_EFAULT;
8670         }
8671         ret = get_errno(access(path(p), arg2));
8672         unlock_user(p, arg1, 0);
8673         return ret;
8674 #endif
8675 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8676     case TARGET_NR_faccessat:
8677         if (!(p = lock_user_string(arg2))) {
8678             return -TARGET_EFAULT;
8679         }
8680         ret = get_errno(faccessat(arg1, p, arg3, 0));
8681         unlock_user(p, arg2, 0);
8682         return ret;
8683 #endif
8684 #ifdef TARGET_NR_nice /* not on alpha */
8685     case TARGET_NR_nice:
8686         return get_errno(nice(arg1));
8687 #endif
8688     case TARGET_NR_sync:
8689         sync();
8690         return 0;
8691 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8692     case TARGET_NR_syncfs:
8693         return get_errno(syncfs(arg1));
8694 #endif
8695     case TARGET_NR_kill:
8696         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8697 #ifdef TARGET_NR_rename
8698     case TARGET_NR_rename:
8699         {
8700             void *p2;
8701             p = lock_user_string(arg1);
8702             p2 = lock_user_string(arg2);
8703             if (!p || !p2)
8704                 ret = -TARGET_EFAULT;
8705             else
8706                 ret = get_errno(rename(p, p2));
8707             unlock_user(p2, arg2, 0);
8708             unlock_user(p, arg1, 0);
8709         }
8710         return ret;
8711 #endif
8712 #if defined(TARGET_NR_renameat)
8713     case TARGET_NR_renameat:
8714         {
8715             void *p2;
8716             p  = lock_user_string(arg2);
8717             p2 = lock_user_string(arg4);
8718             if (!p || !p2)
8719                 ret = -TARGET_EFAULT;
8720             else
8721                 ret = get_errno(renameat(arg1, p, arg3, p2));
8722             unlock_user(p2, arg4, 0);
8723             unlock_user(p, arg2, 0);
8724         }
8725         return ret;
8726 #endif
8727 #if defined(TARGET_NR_renameat2)
8728     case TARGET_NR_renameat2:
8729         {
8730             void *p2;
8731             p  = lock_user_string(arg2);
8732             p2 = lock_user_string(arg4);
8733             if (!p || !p2) {
8734                 ret = -TARGET_EFAULT;
8735             } else {
8736                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8737             }
8738             unlock_user(p2, arg4, 0);
8739             unlock_user(p, arg2, 0);
8740         }
8741         return ret;
8742 #endif
8743 #ifdef TARGET_NR_mkdir
8744     case TARGET_NR_mkdir:
8745         if (!(p = lock_user_string(arg1)))
8746             return -TARGET_EFAULT;
8747         ret = get_errno(mkdir(p, arg2));
8748         unlock_user(p, arg1, 0);
8749         return ret;
8750 #endif
8751 #if defined(TARGET_NR_mkdirat)
8752     case TARGET_NR_mkdirat:
8753         if (!(p = lock_user_string(arg2)))
8754             return -TARGET_EFAULT;
8755         ret = get_errno(mkdirat(arg1, p, arg3));
8756         unlock_user(p, arg2, 0);
8757         return ret;
8758 #endif
8759 #ifdef TARGET_NR_rmdir
8760     case TARGET_NR_rmdir:
8761         if (!(p = lock_user_string(arg1)))
8762             return -TARGET_EFAULT;
8763         ret = get_errno(rmdir(p));
8764         unlock_user(p, arg1, 0);
8765         return ret;
8766 #endif
8767     case TARGET_NR_dup:
8768         ret = get_errno(dup(arg1));
8769         if (ret >= 0) {
8770             fd_trans_dup(arg1, ret);
8771         }
8772         return ret;
8773 #ifdef TARGET_NR_pipe
8774     case TARGET_NR_pipe:
8775         return do_pipe(cpu_env, arg1, 0, 0);
8776 #endif
8777 #ifdef TARGET_NR_pipe2
8778     case TARGET_NR_pipe2:
8779         return do_pipe(cpu_env, arg1,
8780                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8781 #endif
8782     case TARGET_NR_times:
8783         {
8784             struct target_tms *tmsp;
8785             struct tms tms;
8786             ret = get_errno(times(&tms));
8787             if (arg1) {
8788                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8789                 if (!tmsp)
8790                     return -TARGET_EFAULT;
8791                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8792                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8793                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8794                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8795             }
8796             if (!is_error(ret))
8797                 ret = host_to_target_clock_t(ret);
8798         }
8799         return ret;
8800     case TARGET_NR_acct:
8801         if (arg1 == 0) {
8802             ret = get_errno(acct(NULL));
8803         } else {
8804             if (!(p = lock_user_string(arg1))) {
8805                 return -TARGET_EFAULT;
8806             }
8807             ret = get_errno(acct(path(p)));
8808             unlock_user(p, arg1, 0);
8809         }
8810         return ret;
8811 #ifdef TARGET_NR_umount2
8812     case TARGET_NR_umount2:
8813         if (!(p = lock_user_string(arg1)))
8814             return -TARGET_EFAULT;
8815         ret = get_errno(umount2(p, arg2));
8816         unlock_user(p, arg1, 0);
8817         return ret;
8818 #endif
8819     case TARGET_NR_ioctl:
8820         return do_ioctl(arg1, arg2, arg3);
8821 #ifdef TARGET_NR_fcntl
8822     case TARGET_NR_fcntl:
8823         return do_fcntl(arg1, arg2, arg3);
8824 #endif
8825     case TARGET_NR_setpgid:
8826         return get_errno(setpgid(arg1, arg2));
8827     case TARGET_NR_umask:
8828         return get_errno(umask(arg1));
8829     case TARGET_NR_chroot:
8830         if (!(p = lock_user_string(arg1)))
8831             return -TARGET_EFAULT;
8832         ret = get_errno(chroot(p));
8833         unlock_user(p, arg1, 0);
8834         return ret;
8835 #ifdef TARGET_NR_dup2
8836     case TARGET_NR_dup2:
8837         ret = get_errno(dup2(arg1, arg2));
8838         if (ret >= 0) {
8839             fd_trans_dup(arg1, arg2);
8840         }
8841         return ret;
8842 #endif
8843 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8844     case TARGET_NR_dup3:
8845     {
8846         int host_flags;
8847 
8848         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8849             return -EINVAL;
8850         }
8851         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8852         ret = get_errno(dup3(arg1, arg2, host_flags));
8853         if (ret >= 0) {
8854             fd_trans_dup(arg1, arg2);
8855         }
8856         return ret;
8857     }
8858 #endif
8859 #ifdef TARGET_NR_getppid /* not on alpha */
8860     case TARGET_NR_getppid:
8861         return get_errno(getppid());
8862 #endif
8863 #ifdef TARGET_NR_getpgrp
8864     case TARGET_NR_getpgrp:
8865         return get_errno(getpgrp());
8866 #endif
8867     case TARGET_NR_setsid:
8868         return get_errno(setsid());
8869 #ifdef TARGET_NR_sigaction
8870     case TARGET_NR_sigaction:
8871         {
8872 #if defined(TARGET_MIPS)
8873 	    struct target_sigaction act, oact, *pact, *old_act;
8874 
8875 	    if (arg2) {
8876                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8877                     return -TARGET_EFAULT;
8878 		act._sa_handler = old_act->_sa_handler;
8879 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8880 		act.sa_flags = old_act->sa_flags;
8881 		unlock_user_struct(old_act, arg2, 0);
8882 		pact = &act;
8883 	    } else {
8884 		pact = NULL;
8885 	    }
8886 
8887         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8888 
8889 	    if (!is_error(ret) && arg3) {
8890                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8891                     return -TARGET_EFAULT;
8892 		old_act->_sa_handler = oact._sa_handler;
8893 		old_act->sa_flags = oact.sa_flags;
8894 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8895 		old_act->sa_mask.sig[1] = 0;
8896 		old_act->sa_mask.sig[2] = 0;
8897 		old_act->sa_mask.sig[3] = 0;
8898 		unlock_user_struct(old_act, arg3, 1);
8899 	    }
8900 #else
8901             struct target_old_sigaction *old_act;
8902             struct target_sigaction act, oact, *pact;
8903             if (arg2) {
8904                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8905                     return -TARGET_EFAULT;
8906                 act._sa_handler = old_act->_sa_handler;
8907                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8908                 act.sa_flags = old_act->sa_flags;
8909 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8910                 act.sa_restorer = old_act->sa_restorer;
8911 #endif
8912                 unlock_user_struct(old_act, arg2, 0);
8913                 pact = &act;
8914             } else {
8915                 pact = NULL;
8916             }
8917             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8918             if (!is_error(ret) && arg3) {
8919                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8920                     return -TARGET_EFAULT;
8921                 old_act->_sa_handler = oact._sa_handler;
8922                 old_act->sa_mask = oact.sa_mask.sig[0];
8923                 old_act->sa_flags = oact.sa_flags;
8924 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8925                 old_act->sa_restorer = oact.sa_restorer;
8926 #endif
8927                 unlock_user_struct(old_act, arg3, 1);
8928             }
8929 #endif
8930         }
8931         return ret;
8932 #endif
8933     case TARGET_NR_rt_sigaction:
8934         {
8935             /*
8936              * For Alpha and SPARC this is a 5 argument syscall, with
8937              * a 'restorer' parameter which must be copied into the
8938              * sa_restorer field of the sigaction struct.
8939              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8940              * and arg5 is the sigsetsize.
8941              */
8942 #if defined(TARGET_ALPHA)
8943             target_ulong sigsetsize = arg4;
8944             target_ulong restorer = arg5;
8945 #elif defined(TARGET_SPARC)
8946             target_ulong restorer = arg4;
8947             target_ulong sigsetsize = arg5;
8948 #else
8949             target_ulong sigsetsize = arg4;
8950             target_ulong restorer = 0;
8951 #endif
8952             struct target_sigaction *act = NULL;
8953             struct target_sigaction *oact = NULL;
8954 
8955             if (sigsetsize != sizeof(target_sigset_t)) {
8956                 return -TARGET_EINVAL;
8957             }
8958             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8959                 return -TARGET_EFAULT;
8960             }
8961             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8962                 ret = -TARGET_EFAULT;
8963             } else {
8964                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
8965                 if (oact) {
8966                     unlock_user_struct(oact, arg3, 1);
8967                 }
8968             }
8969             if (act) {
8970                 unlock_user_struct(act, arg2, 0);
8971             }
8972         }
8973         return ret;
8974 #ifdef TARGET_NR_sgetmask /* not on alpha */
8975     case TARGET_NR_sgetmask:
8976         {
8977             sigset_t cur_set;
8978             abi_ulong target_set;
8979             ret = do_sigprocmask(0, NULL, &cur_set);
8980             if (!ret) {
8981                 host_to_target_old_sigset(&target_set, &cur_set);
8982                 ret = target_set;
8983             }
8984         }
8985         return ret;
8986 #endif
8987 #ifdef TARGET_NR_ssetmask /* not on alpha */
8988     case TARGET_NR_ssetmask:
8989         {
8990             sigset_t set, oset;
8991             abi_ulong target_set = arg1;
8992             target_to_host_old_sigset(&set, &target_set);
8993             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8994             if (!ret) {
8995                 host_to_target_old_sigset(&target_set, &oset);
8996                 ret = target_set;
8997             }
8998         }
8999         return ret;
9000 #endif
9001 #ifdef TARGET_NR_sigprocmask
9002     case TARGET_NR_sigprocmask:
9003         {
9004 #if defined(TARGET_ALPHA)
9005             sigset_t set, oldset;
9006             abi_ulong mask;
9007             int how;
9008 
9009             switch (arg1) {
9010             case TARGET_SIG_BLOCK:
9011                 how = SIG_BLOCK;
9012                 break;
9013             case TARGET_SIG_UNBLOCK:
9014                 how = SIG_UNBLOCK;
9015                 break;
9016             case TARGET_SIG_SETMASK:
9017                 how = SIG_SETMASK;
9018                 break;
9019             default:
9020                 return -TARGET_EINVAL;
9021             }
9022             mask = arg2;
9023             target_to_host_old_sigset(&set, &mask);
9024 
9025             ret = do_sigprocmask(how, &set, &oldset);
9026             if (!is_error(ret)) {
9027                 host_to_target_old_sigset(&mask, &oldset);
9028                 ret = mask;
9029                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9030             }
9031 #else
9032             sigset_t set, oldset, *set_ptr;
9033             int how;
9034 
9035             if (arg2) {
9036                 switch (arg1) {
9037                 case TARGET_SIG_BLOCK:
9038                     how = SIG_BLOCK;
9039                     break;
9040                 case TARGET_SIG_UNBLOCK:
9041                     how = SIG_UNBLOCK;
9042                     break;
9043                 case TARGET_SIG_SETMASK:
9044                     how = SIG_SETMASK;
9045                     break;
9046                 default:
9047                     return -TARGET_EINVAL;
9048                 }
9049                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9050                     return -TARGET_EFAULT;
9051                 target_to_host_old_sigset(&set, p);
9052                 unlock_user(p, arg2, 0);
9053                 set_ptr = &set;
9054             } else {
9055                 how = 0;
9056                 set_ptr = NULL;
9057             }
9058             ret = do_sigprocmask(how, set_ptr, &oldset);
9059             if (!is_error(ret) && arg3) {
9060                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9061                     return -TARGET_EFAULT;
9062                 host_to_target_old_sigset(p, &oldset);
9063                 unlock_user(p, arg3, sizeof(target_sigset_t));
9064             }
9065 #endif
9066         }
9067         return ret;
9068 #endif
9069     case TARGET_NR_rt_sigprocmask:
9070         {
9071             int how = arg1;
9072             sigset_t set, oldset, *set_ptr;
9073 
9074             if (arg4 != sizeof(target_sigset_t)) {
9075                 return -TARGET_EINVAL;
9076             }
9077 
9078             if (arg2) {
9079                 switch(how) {
9080                 case TARGET_SIG_BLOCK:
9081                     how = SIG_BLOCK;
9082                     break;
9083                 case TARGET_SIG_UNBLOCK:
9084                     how = SIG_UNBLOCK;
9085                     break;
9086                 case TARGET_SIG_SETMASK:
9087                     how = SIG_SETMASK;
9088                     break;
9089                 default:
9090                     return -TARGET_EINVAL;
9091                 }
9092                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9093                     return -TARGET_EFAULT;
9094                 target_to_host_sigset(&set, p);
9095                 unlock_user(p, arg2, 0);
9096                 set_ptr = &set;
9097             } else {
9098                 how = 0;
9099                 set_ptr = NULL;
9100             }
9101             ret = do_sigprocmask(how, set_ptr, &oldset);
9102             if (!is_error(ret) && arg3) {
9103                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9104                     return -TARGET_EFAULT;
9105                 host_to_target_sigset(p, &oldset);
9106                 unlock_user(p, arg3, sizeof(target_sigset_t));
9107             }
9108         }
9109         return ret;
9110 #ifdef TARGET_NR_sigpending
9111     case TARGET_NR_sigpending:
9112         {
9113             sigset_t set;
9114             ret = get_errno(sigpending(&set));
9115             if (!is_error(ret)) {
9116                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9117                     return -TARGET_EFAULT;
9118                 host_to_target_old_sigset(p, &set);
9119                 unlock_user(p, arg1, sizeof(target_sigset_t));
9120             }
9121         }
9122         return ret;
9123 #endif
9124     case TARGET_NR_rt_sigpending:
9125         {
9126             sigset_t set;
9127 
9128             /* Yes, this check is >, not != like most. We follow the kernel's
9129              * logic and it does it like this because it implements
9130              * NR_sigpending through the same code path, and in that case
9131              * the old_sigset_t is smaller in size.
9132              */
9133             if (arg2 > sizeof(target_sigset_t)) {
9134                 return -TARGET_EINVAL;
9135             }
9136 
9137             ret = get_errno(sigpending(&set));
9138             if (!is_error(ret)) {
9139                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9140                     return -TARGET_EFAULT;
9141                 host_to_target_sigset(p, &set);
9142                 unlock_user(p, arg1, sizeof(target_sigset_t));
9143             }
9144         }
9145         return ret;
9146 #ifdef TARGET_NR_sigsuspend
9147     case TARGET_NR_sigsuspend:
9148         {
9149             TaskState *ts = cpu->opaque;
9150 #if defined(TARGET_ALPHA)
9151             abi_ulong mask = arg1;
9152             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9153 #else
9154             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9155                 return -TARGET_EFAULT;
9156             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9157             unlock_user(p, arg1, 0);
9158 #endif
9159             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9160                                                SIGSET_T_SIZE));
9161             if (ret != -TARGET_ERESTARTSYS) {
9162                 ts->in_sigsuspend = 1;
9163             }
9164         }
9165         return ret;
9166 #endif
9167     case TARGET_NR_rt_sigsuspend:
9168         {
9169             TaskState *ts = cpu->opaque;
9170 
9171             if (arg2 != sizeof(target_sigset_t)) {
9172                 return -TARGET_EINVAL;
9173             }
9174             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9175                 return -TARGET_EFAULT;
9176             target_to_host_sigset(&ts->sigsuspend_mask, p);
9177             unlock_user(p, arg1, 0);
9178             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9179                                                SIGSET_T_SIZE));
9180             if (ret != -TARGET_ERESTARTSYS) {
9181                 ts->in_sigsuspend = 1;
9182             }
9183         }
9184         return ret;
9185 #ifdef TARGET_NR_rt_sigtimedwait
9186     case TARGET_NR_rt_sigtimedwait:
9187         {
9188             sigset_t set;
9189             struct timespec uts, *puts;
9190             siginfo_t uinfo;
9191 
9192             if (arg4 != sizeof(target_sigset_t)) {
9193                 return -TARGET_EINVAL;
9194             }
9195 
9196             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9197                 return -TARGET_EFAULT;
9198             target_to_host_sigset(&set, p);
9199             unlock_user(p, arg1, 0);
9200             if (arg3) {
9201                 puts = &uts;
9202                 if (target_to_host_timespec(puts, arg3)) {
9203                     return -TARGET_EFAULT;
9204                 }
9205             } else {
9206                 puts = NULL;
9207             }
9208             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9209                                                  SIGSET_T_SIZE));
9210             if (!is_error(ret)) {
9211                 if (arg2) {
9212                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9213                                   0);
9214                     if (!p) {
9215                         return -TARGET_EFAULT;
9216                     }
9217                     host_to_target_siginfo(p, &uinfo);
9218                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9219                 }
9220                 ret = host_to_target_signal(ret);
9221             }
9222         }
9223         return ret;
9224 #endif
9225 #ifdef TARGET_NR_rt_sigtimedwait_time64
9226     case TARGET_NR_rt_sigtimedwait_time64:
9227         {
9228             sigset_t set;
9229             struct timespec uts, *puts;
9230             siginfo_t uinfo;
9231 
9232             if (arg4 != sizeof(target_sigset_t)) {
9233                 return -TARGET_EINVAL;
9234             }
9235 
9236             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9237             if (!p) {
9238                 return -TARGET_EFAULT;
9239             }
9240             target_to_host_sigset(&set, p);
9241             unlock_user(p, arg1, 0);
9242             if (arg3) {
9243                 puts = &uts;
9244                 if (target_to_host_timespec64(puts, arg3)) {
9245                     return -TARGET_EFAULT;
9246                 }
9247             } else {
9248                 puts = NULL;
9249             }
9250             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9251                                                  SIGSET_T_SIZE));
9252             if (!is_error(ret)) {
9253                 if (arg2) {
9254                     p = lock_user(VERIFY_WRITE, arg2,
9255                                   sizeof(target_siginfo_t), 0);
9256                     if (!p) {
9257                         return -TARGET_EFAULT;
9258                     }
9259                     host_to_target_siginfo(p, &uinfo);
9260                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9261                 }
9262                 ret = host_to_target_signal(ret);
9263             }
9264         }
9265         return ret;
9266 #endif
9267     case TARGET_NR_rt_sigqueueinfo:
9268         {
9269             siginfo_t uinfo;
9270 
9271             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9272             if (!p) {
9273                 return -TARGET_EFAULT;
9274             }
9275             target_to_host_siginfo(&uinfo, p);
9276             unlock_user(p, arg3, 0);
9277             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9278         }
9279         return ret;
9280     case TARGET_NR_rt_tgsigqueueinfo:
9281         {
9282             siginfo_t uinfo;
9283 
9284             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9285             if (!p) {
9286                 return -TARGET_EFAULT;
9287             }
9288             target_to_host_siginfo(&uinfo, p);
9289             unlock_user(p, arg4, 0);
9290             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9291         }
9292         return ret;
9293 #ifdef TARGET_NR_sigreturn
9294     case TARGET_NR_sigreturn:
9295         if (block_signals()) {
9296             return -TARGET_ERESTARTSYS;
9297         }
9298         return do_sigreturn(cpu_env);
9299 #endif
9300     case TARGET_NR_rt_sigreturn:
9301         if (block_signals()) {
9302             return -TARGET_ERESTARTSYS;
9303         }
9304         return do_rt_sigreturn(cpu_env);
9305     case TARGET_NR_sethostname:
9306         if (!(p = lock_user_string(arg1)))
9307             return -TARGET_EFAULT;
9308         ret = get_errno(sethostname(p, arg2));
9309         unlock_user(p, arg1, 0);
9310         return ret;
9311 #ifdef TARGET_NR_setrlimit
9312     case TARGET_NR_setrlimit:
9313         {
9314             int resource = target_to_host_resource(arg1);
9315             struct target_rlimit *target_rlim;
9316             struct rlimit rlim;
9317             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9318                 return -TARGET_EFAULT;
9319             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9320             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9321             unlock_user_struct(target_rlim, arg2, 0);
9322             /*
9323              * If we just passed through resource limit settings for memory then
9324              * they would also apply to QEMU's own allocations, and QEMU will
9325              * crash or hang or die if its allocations fail. Ideally we would
9326              * track the guest allocations in QEMU and apply the limits ourselves.
9327              * For now, just tell the guest the call succeeded but don't actually
9328              * limit anything.
9329              */
9330             if (resource != RLIMIT_AS &&
9331                 resource != RLIMIT_DATA &&
9332                 resource != RLIMIT_STACK) {
9333                 return get_errno(setrlimit(resource, &rlim));
9334             } else {
9335                 return 0;
9336             }
9337         }
9338 #endif
9339 #ifdef TARGET_NR_getrlimit
9340     case TARGET_NR_getrlimit:
9341         {
9342             int resource = target_to_host_resource(arg1);
9343             struct target_rlimit *target_rlim;
9344             struct rlimit rlim;
9345 
9346             ret = get_errno(getrlimit(resource, &rlim));
9347             if (!is_error(ret)) {
9348                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9349                     return -TARGET_EFAULT;
9350                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9351                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9352                 unlock_user_struct(target_rlim, arg2, 1);
9353             }
9354         }
9355         return ret;
9356 #endif
9357     case TARGET_NR_getrusage:
9358         {
9359             struct rusage rusage;
9360             ret = get_errno(getrusage(arg1, &rusage));
9361             if (!is_error(ret)) {
9362                 ret = host_to_target_rusage(arg2, &rusage);
9363             }
9364         }
9365         return ret;
9366 #if defined(TARGET_NR_gettimeofday)
9367     case TARGET_NR_gettimeofday:
9368         {
9369             struct timeval tv;
9370             struct timezone tz;
9371 
9372             ret = get_errno(gettimeofday(&tv, &tz));
9373             if (!is_error(ret)) {
9374                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9375                     return -TARGET_EFAULT;
9376                 }
9377                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9378                     return -TARGET_EFAULT;
9379                 }
9380             }
9381         }
9382         return ret;
9383 #endif
9384 #if defined(TARGET_NR_settimeofday)
9385     case TARGET_NR_settimeofday:
9386         {
9387             struct timeval tv, *ptv = NULL;
9388             struct timezone tz, *ptz = NULL;
9389 
9390             if (arg1) {
9391                 if (copy_from_user_timeval(&tv, arg1)) {
9392                     return -TARGET_EFAULT;
9393                 }
9394                 ptv = &tv;
9395             }
9396 
9397             if (arg2) {
9398                 if (copy_from_user_timezone(&tz, arg2)) {
9399                     return -TARGET_EFAULT;
9400                 }
9401                 ptz = &tz;
9402             }
9403 
9404             return get_errno(settimeofday(ptv, ptz));
9405         }
9406 #endif
9407 #if defined(TARGET_NR_select)
9408     case TARGET_NR_select:
9409 #if defined(TARGET_WANT_NI_OLD_SELECT)
9410         /* some architectures used to have old_select here
9411          * but now ENOSYS it.
9412          */
9413         ret = -TARGET_ENOSYS;
9414 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9415         ret = do_old_select(arg1);
9416 #else
9417         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9418 #endif
9419         return ret;
9420 #endif
9421 #ifdef TARGET_NR_pselect6
9422     case TARGET_NR_pselect6:
9423         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9424 #endif
9425 #ifdef TARGET_NR_pselect6_time64
9426     case TARGET_NR_pselect6_time64:
9427         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9428 #endif
9429 #ifdef TARGET_NR_symlink
9430     case TARGET_NR_symlink:
9431         {
9432             void *p2;
9433             p = lock_user_string(arg1);
9434             p2 = lock_user_string(arg2);
9435             if (!p || !p2)
9436                 ret = -TARGET_EFAULT;
9437             else
9438                 ret = get_errno(symlink(p, p2));
9439             unlock_user(p2, arg2, 0);
9440             unlock_user(p, arg1, 0);
9441         }
9442         return ret;
9443 #endif
9444 #if defined(TARGET_NR_symlinkat)
9445     case TARGET_NR_symlinkat:
9446         {
9447             void *p2;
9448             p  = lock_user_string(arg1);
9449             p2 = lock_user_string(arg3);
9450             if (!p || !p2)
9451                 ret = -TARGET_EFAULT;
9452             else
9453                 ret = get_errno(symlinkat(p, arg2, p2));
9454             unlock_user(p2, arg3, 0);
9455             unlock_user(p, arg1, 0);
9456         }
9457         return ret;
9458 #endif
9459 #ifdef TARGET_NR_readlink
9460     case TARGET_NR_readlink:
9461         {
9462             void *p2;
9463             p = lock_user_string(arg1);
9464             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9465             if (!p || !p2) {
9466                 ret = -TARGET_EFAULT;
9467             } else if (!arg3) {
9468                 /* Short circuit this for the magic exe check. */
9469                 ret = -TARGET_EINVAL;
9470             } else if (is_proc_myself((const char *)p, "exe")) {
9471                 char real[PATH_MAX], *temp;
9472                 temp = realpath(exec_path, real);
9473                 /* Return value is # of bytes that we wrote to the buffer. */
9474                 if (temp == NULL) {
9475                     ret = get_errno(-1);
9476                 } else {
9477                     /* Don't worry about sign mismatch as earlier mapping
9478                      * logic would have thrown a bad address error. */
9479                     ret = MIN(strlen(real), arg3);
9480                     /* We cannot NUL terminate the string. */
9481                     memcpy(p2, real, ret);
9482                 }
9483             } else {
9484                 ret = get_errno(readlink(path(p), p2, arg3));
9485             }
9486             unlock_user(p2, arg2, ret);
9487             unlock_user(p, arg1, 0);
9488         }
9489         return ret;
9490 #endif
9491 #if defined(TARGET_NR_readlinkat)
9492     case TARGET_NR_readlinkat:
9493         {
9494             void *p2;
9495             p  = lock_user_string(arg2);
9496             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9497             if (!p || !p2) {
9498                 ret = -TARGET_EFAULT;
9499             } else if (is_proc_myself((const char *)p, "exe")) {
9500                 char real[PATH_MAX], *temp;
9501                 temp = realpath(exec_path, real);
9502                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9503                 snprintf((char *)p2, arg4, "%s", real);
9504             } else {
9505                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9506             }
9507             unlock_user(p2, arg3, ret);
9508             unlock_user(p, arg2, 0);
9509         }
9510         return ret;
9511 #endif
9512 #ifdef TARGET_NR_swapon
9513     case TARGET_NR_swapon:
9514         if (!(p = lock_user_string(arg1)))
9515             return -TARGET_EFAULT;
9516         ret = get_errno(swapon(p, arg2));
9517         unlock_user(p, arg1, 0);
9518         return ret;
9519 #endif
9520     case TARGET_NR_reboot:
9521         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9522            /* arg4 must be ignored in all other cases */
9523            p = lock_user_string(arg4);
9524            if (!p) {
9525                return -TARGET_EFAULT;
9526            }
9527            ret = get_errno(reboot(arg1, arg2, arg3, p));
9528            unlock_user(p, arg4, 0);
9529         } else {
9530            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9531         }
9532         return ret;
9533 #ifdef TARGET_NR_mmap
9534     case TARGET_NR_mmap:
9535 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9536     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9537     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9538     || defined(TARGET_S390X)
9539         {
9540             abi_ulong *v;
9541             abi_ulong v1, v2, v3, v4, v5, v6;
9542             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9543                 return -TARGET_EFAULT;
9544             v1 = tswapal(v[0]);
9545             v2 = tswapal(v[1]);
9546             v3 = tswapal(v[2]);
9547             v4 = tswapal(v[3]);
9548             v5 = tswapal(v[4]);
9549             v6 = tswapal(v[5]);
9550             unlock_user(v, arg1, 0);
9551             ret = get_errno(target_mmap(v1, v2, v3,
9552                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9553                                         v5, v6));
9554         }
9555 #else
9556         /* mmap pointers are always untagged */
9557         ret = get_errno(target_mmap(arg1, arg2, arg3,
9558                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9559                                     arg5,
9560                                     arg6));
9561 #endif
9562         return ret;
9563 #endif
9564 #ifdef TARGET_NR_mmap2
9565     case TARGET_NR_mmap2:
9566 #ifndef MMAP_SHIFT
9567 #define MMAP_SHIFT 12
9568 #endif
9569         ret = target_mmap(arg1, arg2, arg3,
9570                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9571                           arg5, arg6 << MMAP_SHIFT);
9572         return get_errno(ret);
9573 #endif
9574     case TARGET_NR_munmap:
9575         arg1 = cpu_untagged_addr(cpu, arg1);
9576         return get_errno(target_munmap(arg1, arg2));
9577     case TARGET_NR_mprotect:
9578         arg1 = cpu_untagged_addr(cpu, arg1);
9579         {
9580             TaskState *ts = cpu->opaque;
9581             /* Special hack to detect libc making the stack executable.  */
9582             if ((arg3 & PROT_GROWSDOWN)
9583                 && arg1 >= ts->info->stack_limit
9584                 && arg1 <= ts->info->start_stack) {
9585                 arg3 &= ~PROT_GROWSDOWN;
9586                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9587                 arg1 = ts->info->stack_limit;
9588             }
9589         }
9590         return get_errno(target_mprotect(arg1, arg2, arg3));
9591 #ifdef TARGET_NR_mremap
9592     case TARGET_NR_mremap:
9593         arg1 = cpu_untagged_addr(cpu, arg1);
9594         /* mremap new_addr (arg5) is always untagged */
9595         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9596 #endif
9597         /* ??? msync/mlock/munlock are broken for softmmu.  */
9598 #ifdef TARGET_NR_msync
9599     case TARGET_NR_msync:
9600         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9601 #endif
9602 #ifdef TARGET_NR_mlock
9603     case TARGET_NR_mlock:
9604         return get_errno(mlock(g2h(cpu, arg1), arg2));
9605 #endif
9606 #ifdef TARGET_NR_munlock
9607     case TARGET_NR_munlock:
9608         return get_errno(munlock(g2h(cpu, arg1), arg2));
9609 #endif
9610 #ifdef TARGET_NR_mlockall
9611     case TARGET_NR_mlockall:
9612         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9613 #endif
9614 #ifdef TARGET_NR_munlockall
9615     case TARGET_NR_munlockall:
9616         return get_errno(munlockall());
9617 #endif
9618 #ifdef TARGET_NR_truncate
9619     case TARGET_NR_truncate:
9620         if (!(p = lock_user_string(arg1)))
9621             return -TARGET_EFAULT;
9622         ret = get_errno(truncate(p, arg2));
9623         unlock_user(p, arg1, 0);
9624         return ret;
9625 #endif
9626 #ifdef TARGET_NR_ftruncate
9627     case TARGET_NR_ftruncate:
9628         return get_errno(ftruncate(arg1, arg2));
9629 #endif
9630     case TARGET_NR_fchmod:
9631         return get_errno(fchmod(arg1, arg2));
9632 #if defined(TARGET_NR_fchmodat)
9633     case TARGET_NR_fchmodat:
9634         if (!(p = lock_user_string(arg2)))
9635             return -TARGET_EFAULT;
9636         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9637         unlock_user(p, arg2, 0);
9638         return ret;
9639 #endif
9640     case TARGET_NR_getpriority:
9641         /* Note that negative values are valid for getpriority, so we must
9642            differentiate based on errno settings.  */
9643         errno = 0;
9644         ret = getpriority(arg1, arg2);
9645         if (ret == -1 && errno != 0) {
9646             return -host_to_target_errno(errno);
9647         }
9648 #ifdef TARGET_ALPHA
9649         /* Return value is the unbiased priority.  Signal no error.  */
9650         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9651 #else
9652         /* Return value is a biased priority to avoid negative numbers.  */
9653         ret = 20 - ret;
9654 #endif
9655         return ret;
9656     case TARGET_NR_setpriority:
9657         return get_errno(setpriority(arg1, arg2, arg3));
9658 #ifdef TARGET_NR_statfs
9659     case TARGET_NR_statfs:
9660         if (!(p = lock_user_string(arg1))) {
9661             return -TARGET_EFAULT;
9662         }
9663         ret = get_errno(statfs(path(p), &stfs));
9664         unlock_user(p, arg1, 0);
9665     convert_statfs:
9666         if (!is_error(ret)) {
9667             struct target_statfs *target_stfs;
9668 
9669             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9670                 return -TARGET_EFAULT;
9671             __put_user(stfs.f_type, &target_stfs->f_type);
9672             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9673             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9674             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9675             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9676             __put_user(stfs.f_files, &target_stfs->f_files);
9677             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9678             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9679             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9680             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9681             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9682 #ifdef _STATFS_F_FLAGS
9683             __put_user(stfs.f_flags, &target_stfs->f_flags);
9684 #else
9685             __put_user(0, &target_stfs->f_flags);
9686 #endif
9687             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9688             unlock_user_struct(target_stfs, arg2, 1);
9689         }
9690         return ret;
9691 #endif
9692 #ifdef TARGET_NR_fstatfs
9693     case TARGET_NR_fstatfs:
9694         ret = get_errno(fstatfs(arg1, &stfs));
9695         goto convert_statfs;
9696 #endif
9697 #ifdef TARGET_NR_statfs64
9698     case TARGET_NR_statfs64:
9699         if (!(p = lock_user_string(arg1))) {
9700             return -TARGET_EFAULT;
9701         }
9702         ret = get_errno(statfs(path(p), &stfs));
9703         unlock_user(p, arg1, 0);
9704     convert_statfs64:
9705         if (!is_error(ret)) {
9706             struct target_statfs64 *target_stfs;
9707 
9708             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9709                 return -TARGET_EFAULT;
9710             __put_user(stfs.f_type, &target_stfs->f_type);
9711             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9712             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9713             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9714             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9715             __put_user(stfs.f_files, &target_stfs->f_files);
9716             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9717             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9718             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9719             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9720             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9721 #ifdef _STATFS_F_FLAGS
9722             __put_user(stfs.f_flags, &target_stfs->f_flags);
9723 #else
9724             __put_user(0, &target_stfs->f_flags);
9725 #endif
9726             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9727             unlock_user_struct(target_stfs, arg3, 1);
9728         }
9729         return ret;
9730     case TARGET_NR_fstatfs64:
9731         ret = get_errno(fstatfs(arg1, &stfs));
9732         goto convert_statfs64;
9733 #endif
9734 #ifdef TARGET_NR_socketcall
9735     case TARGET_NR_socketcall:
9736         return do_socketcall(arg1, arg2);
9737 #endif
9738 #ifdef TARGET_NR_accept
9739     case TARGET_NR_accept:
9740         return do_accept4(arg1, arg2, arg3, 0);
9741 #endif
9742 #ifdef TARGET_NR_accept4
9743     case TARGET_NR_accept4:
9744         return do_accept4(arg1, arg2, arg3, arg4);
9745 #endif
9746 #ifdef TARGET_NR_bind
9747     case TARGET_NR_bind:
9748         return do_bind(arg1, arg2, arg3);
9749 #endif
9750 #ifdef TARGET_NR_connect
9751     case TARGET_NR_connect:
9752         return do_connect(arg1, arg2, arg3);
9753 #endif
9754 #ifdef TARGET_NR_getpeername
9755     case TARGET_NR_getpeername:
9756         return do_getpeername(arg1, arg2, arg3);
9757 #endif
9758 #ifdef TARGET_NR_getsockname
9759     case TARGET_NR_getsockname:
9760         return do_getsockname(arg1, arg2, arg3);
9761 #endif
9762 #ifdef TARGET_NR_getsockopt
9763     case TARGET_NR_getsockopt:
9764         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9765 #endif
9766 #ifdef TARGET_NR_listen
9767     case TARGET_NR_listen:
9768         return get_errno(listen(arg1, arg2));
9769 #endif
9770 #ifdef TARGET_NR_recv
9771     case TARGET_NR_recv:
9772         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9773 #endif
9774 #ifdef TARGET_NR_recvfrom
9775     case TARGET_NR_recvfrom:
9776         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9777 #endif
9778 #ifdef TARGET_NR_recvmsg
9779     case TARGET_NR_recvmsg:
9780         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9781 #endif
9782 #ifdef TARGET_NR_send
9783     case TARGET_NR_send:
9784         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9785 #endif
9786 #ifdef TARGET_NR_sendmsg
9787     case TARGET_NR_sendmsg:
9788         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9789 #endif
9790 #ifdef TARGET_NR_sendmmsg
9791     case TARGET_NR_sendmmsg:
9792         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9793 #endif
9794 #ifdef TARGET_NR_recvmmsg
9795     case TARGET_NR_recvmmsg:
9796         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9797 #endif
9798 #ifdef TARGET_NR_sendto
9799     case TARGET_NR_sendto:
9800         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9801 #endif
9802 #ifdef TARGET_NR_shutdown
9803     case TARGET_NR_shutdown:
9804         return get_errno(shutdown(arg1, arg2));
9805 #endif
9806 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9807     case TARGET_NR_getrandom:
9808         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9809         if (!p) {
9810             return -TARGET_EFAULT;
9811         }
9812         ret = get_errno(getrandom(p, arg2, arg3));
9813         unlock_user(p, arg1, ret);
9814         return ret;
9815 #endif
9816 #ifdef TARGET_NR_socket
9817     case TARGET_NR_socket:
9818         return do_socket(arg1, arg2, arg3);
9819 #endif
9820 #ifdef TARGET_NR_socketpair
9821     case TARGET_NR_socketpair:
9822         return do_socketpair(arg1, arg2, arg3, arg4);
9823 #endif
9824 #ifdef TARGET_NR_setsockopt
9825     case TARGET_NR_setsockopt:
9826         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9827 #endif
9828 #if defined(TARGET_NR_syslog)
9829     case TARGET_NR_syslog:
9830         {
9831             int len = arg2;
9832 
9833             switch (arg1) {
9834             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9835             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9836             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9837             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9838             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9839             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9840             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9841             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9842                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9843             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9844             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9845             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9846                 {
9847                     if (len < 0) {
9848                         return -TARGET_EINVAL;
9849                     }
9850                     if (len == 0) {
9851                         return 0;
9852                     }
9853                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9854                     if (!p) {
9855                         return -TARGET_EFAULT;
9856                     }
9857                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9858                     unlock_user(p, arg2, arg3);
9859                 }
9860                 return ret;
9861             default:
9862                 return -TARGET_EINVAL;
9863             }
9864         }
9865         break;
9866 #endif
9867     case TARGET_NR_setitimer:
9868         {
9869             struct itimerval value, ovalue, *pvalue;
9870 
9871             if (arg2) {
9872                 pvalue = &value;
9873                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9874                     || copy_from_user_timeval(&pvalue->it_value,
9875                                               arg2 + sizeof(struct target_timeval)))
9876                     return -TARGET_EFAULT;
9877             } else {
9878                 pvalue = NULL;
9879             }
9880             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9881             if (!is_error(ret) && arg3) {
9882                 if (copy_to_user_timeval(arg3,
9883                                          &ovalue.it_interval)
9884                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9885                                             &ovalue.it_value))
9886                     return -TARGET_EFAULT;
9887             }
9888         }
9889         return ret;
9890     case TARGET_NR_getitimer:
9891         {
9892             struct itimerval value;
9893 
9894             ret = get_errno(getitimer(arg1, &value));
9895             if (!is_error(ret) && arg2) {
9896                 if (copy_to_user_timeval(arg2,
9897                                          &value.it_interval)
9898                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9899                                             &value.it_value))
9900                     return -TARGET_EFAULT;
9901             }
9902         }
9903         return ret;
9904 #ifdef TARGET_NR_stat
9905     case TARGET_NR_stat:
9906         if (!(p = lock_user_string(arg1))) {
9907             return -TARGET_EFAULT;
9908         }
9909         ret = get_errno(stat(path(p), &st));
9910         unlock_user(p, arg1, 0);
9911         goto do_stat;
9912 #endif
9913 #ifdef TARGET_NR_lstat
9914     case TARGET_NR_lstat:
9915         if (!(p = lock_user_string(arg1))) {
9916             return -TARGET_EFAULT;
9917         }
9918         ret = get_errno(lstat(path(p), &st));
9919         unlock_user(p, arg1, 0);
9920         goto do_stat;
9921 #endif
9922 #ifdef TARGET_NR_fstat
9923     case TARGET_NR_fstat:
9924         {
9925             ret = get_errno(fstat(arg1, &st));
9926 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9927         do_stat:
9928 #endif
9929             if (!is_error(ret)) {
9930                 struct target_stat *target_st;
9931 
9932                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9933                     return -TARGET_EFAULT;
9934                 memset(target_st, 0, sizeof(*target_st));
9935                 __put_user(st.st_dev, &target_st->st_dev);
9936                 __put_user(st.st_ino, &target_st->st_ino);
9937                 __put_user(st.st_mode, &target_st->st_mode);
9938                 __put_user(st.st_uid, &target_st->st_uid);
9939                 __put_user(st.st_gid, &target_st->st_gid);
9940                 __put_user(st.st_nlink, &target_st->st_nlink);
9941                 __put_user(st.st_rdev, &target_st->st_rdev);
9942                 __put_user(st.st_size, &target_st->st_size);
9943                 __put_user(st.st_blksize, &target_st->st_blksize);
9944                 __put_user(st.st_blocks, &target_st->st_blocks);
9945                 __put_user(st.st_atime, &target_st->target_st_atime);
9946                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9947                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9948 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9949                 __put_user(st.st_atim.tv_nsec,
9950                            &target_st->target_st_atime_nsec);
9951                 __put_user(st.st_mtim.tv_nsec,
9952                            &target_st->target_st_mtime_nsec);
9953                 __put_user(st.st_ctim.tv_nsec,
9954                            &target_st->target_st_ctime_nsec);
9955 #endif
9956                 unlock_user_struct(target_st, arg2, 1);
9957             }
9958         }
9959         return ret;
9960 #endif
9961     case TARGET_NR_vhangup:
9962         return get_errno(vhangup());
9963 #ifdef TARGET_NR_syscall
9964     case TARGET_NR_syscall:
9965         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9966                           arg6, arg7, arg8, 0);
9967 #endif
9968 #if defined(TARGET_NR_wait4)
9969     case TARGET_NR_wait4:
9970         {
9971             int status;
9972             abi_long status_ptr = arg2;
9973             struct rusage rusage, *rusage_ptr;
9974             abi_ulong target_rusage = arg4;
9975             abi_long rusage_err;
9976             if (target_rusage)
9977                 rusage_ptr = &rusage;
9978             else
9979                 rusage_ptr = NULL;
9980             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9981             if (!is_error(ret)) {
9982                 if (status_ptr && ret) {
9983                     status = host_to_target_waitstatus(status);
9984                     if (put_user_s32(status, status_ptr))
9985                         return -TARGET_EFAULT;
9986                 }
9987                 if (target_rusage) {
9988                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9989                     if (rusage_err) {
9990                         ret = rusage_err;
9991                     }
9992                 }
9993             }
9994         }
9995         return ret;
9996 #endif
9997 #ifdef TARGET_NR_swapoff
9998     case TARGET_NR_swapoff:
9999         if (!(p = lock_user_string(arg1)))
10000             return -TARGET_EFAULT;
10001         ret = get_errno(swapoff(p));
10002         unlock_user(p, arg1, 0);
10003         return ret;
10004 #endif
10005     case TARGET_NR_sysinfo:
10006         {
10007             struct target_sysinfo *target_value;
10008             struct sysinfo value;
10009             ret = get_errno(sysinfo(&value));
10010             if (!is_error(ret) && arg1)
10011             {
10012                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10013                     return -TARGET_EFAULT;
10014                 __put_user(value.uptime, &target_value->uptime);
10015                 __put_user(value.loads[0], &target_value->loads[0]);
10016                 __put_user(value.loads[1], &target_value->loads[1]);
10017                 __put_user(value.loads[2], &target_value->loads[2]);
10018                 __put_user(value.totalram, &target_value->totalram);
10019                 __put_user(value.freeram, &target_value->freeram);
10020                 __put_user(value.sharedram, &target_value->sharedram);
10021                 __put_user(value.bufferram, &target_value->bufferram);
10022                 __put_user(value.totalswap, &target_value->totalswap);
10023                 __put_user(value.freeswap, &target_value->freeswap);
10024                 __put_user(value.procs, &target_value->procs);
10025                 __put_user(value.totalhigh, &target_value->totalhigh);
10026                 __put_user(value.freehigh, &target_value->freehigh);
10027                 __put_user(value.mem_unit, &target_value->mem_unit);
10028                 unlock_user_struct(target_value, arg1, 1);
10029             }
10030         }
10031         return ret;
10032 #ifdef TARGET_NR_ipc
10033     case TARGET_NR_ipc:
10034         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10035 #endif
10036 #ifdef TARGET_NR_semget
10037     case TARGET_NR_semget:
10038         return get_errno(semget(arg1, arg2, arg3));
10039 #endif
10040 #ifdef TARGET_NR_semop
10041     case TARGET_NR_semop:
10042         return do_semtimedop(arg1, arg2, arg3, 0, false);
10043 #endif
10044 #ifdef TARGET_NR_semtimedop
10045     case TARGET_NR_semtimedop:
10046         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10047 #endif
10048 #ifdef TARGET_NR_semtimedop_time64
10049     case TARGET_NR_semtimedop_time64:
10050         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10051 #endif
10052 #ifdef TARGET_NR_semctl
10053     case TARGET_NR_semctl:
10054         return do_semctl(arg1, arg2, arg3, arg4);
10055 #endif
10056 #ifdef TARGET_NR_msgctl
10057     case TARGET_NR_msgctl:
10058         return do_msgctl(arg1, arg2, arg3);
10059 #endif
10060 #ifdef TARGET_NR_msgget
10061     case TARGET_NR_msgget:
10062         return get_errno(msgget(arg1, arg2));
10063 #endif
10064 #ifdef TARGET_NR_msgrcv
10065     case TARGET_NR_msgrcv:
10066         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10067 #endif
10068 #ifdef TARGET_NR_msgsnd
10069     case TARGET_NR_msgsnd:
10070         return do_msgsnd(arg1, arg2, arg3, arg4);
10071 #endif
10072 #ifdef TARGET_NR_shmget
10073     case TARGET_NR_shmget:
10074         return get_errno(shmget(arg1, arg2, arg3));
10075 #endif
10076 #ifdef TARGET_NR_shmctl
10077     case TARGET_NR_shmctl:
10078         return do_shmctl(arg1, arg2, arg3);
10079 #endif
10080 #ifdef TARGET_NR_shmat
10081     case TARGET_NR_shmat:
10082         return do_shmat(cpu_env, arg1, arg2, arg3);
10083 #endif
10084 #ifdef TARGET_NR_shmdt
10085     case TARGET_NR_shmdt:
10086         return do_shmdt(arg1);
10087 #endif
10088     case TARGET_NR_fsync:
10089         return get_errno(fsync(arg1));
10090     case TARGET_NR_clone:
10091         /* Linux manages to have three different orderings for its
10092          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10093          * match the kernel's CONFIG_CLONE_* settings.
10094          * Microblaze is further special in that it uses a sixth
10095          * implicit argument to clone for the TLS pointer.
10096          */
10097 #if defined(TARGET_MICROBLAZE)
10098         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10099 #elif defined(TARGET_CLONE_BACKWARDS)
10100         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10101 #elif defined(TARGET_CLONE_BACKWARDS2)
10102         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10103 #else
10104         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10105 #endif
10106         return ret;
10107 #ifdef __NR_exit_group
10108         /* new thread calls */
10109     case TARGET_NR_exit_group:
10110         preexit_cleanup(cpu_env, arg1);
10111         return get_errno(exit_group(arg1));
10112 #endif
10113     case TARGET_NR_setdomainname:
10114         if (!(p = lock_user_string(arg1)))
10115             return -TARGET_EFAULT;
10116         ret = get_errno(setdomainname(p, arg2));
10117         unlock_user(p, arg1, 0);
10118         return ret;
10119     case TARGET_NR_uname:
10120         /* no need to transcode because we use the linux syscall */
10121         {
10122             struct new_utsname * buf;
10123 
10124             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10125                 return -TARGET_EFAULT;
10126             ret = get_errno(sys_uname(buf));
10127             if (!is_error(ret)) {
10128                 /* Overwrite the native machine name with whatever is being
10129                    emulated. */
10130                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10131                           sizeof(buf->machine));
10132                 /* Allow the user to override the reported release.  */
10133                 if (qemu_uname_release && *qemu_uname_release) {
10134                     g_strlcpy(buf->release, qemu_uname_release,
10135                               sizeof(buf->release));
10136                 }
10137             }
10138             unlock_user_struct(buf, arg1, 1);
10139         }
10140         return ret;
10141 #ifdef TARGET_I386
10142     case TARGET_NR_modify_ldt:
10143         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10144 #if !defined(TARGET_X86_64)
10145     case TARGET_NR_vm86:
10146         return do_vm86(cpu_env, arg1, arg2);
10147 #endif
10148 #endif
10149 #if defined(TARGET_NR_adjtimex)
10150     case TARGET_NR_adjtimex:
10151         {
10152             struct timex host_buf;
10153 
10154             if (target_to_host_timex(&host_buf, arg1) != 0) {
10155                 return -TARGET_EFAULT;
10156             }
10157             ret = get_errno(adjtimex(&host_buf));
10158             if (!is_error(ret)) {
10159                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10160                     return -TARGET_EFAULT;
10161                 }
10162             }
10163         }
10164         return ret;
10165 #endif
10166 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10167     case TARGET_NR_clock_adjtime:
10168         {
10169             struct timex htx, *phtx = &htx;
10170 
10171             if (target_to_host_timex(phtx, arg2) != 0) {
10172                 return -TARGET_EFAULT;
10173             }
10174             ret = get_errno(clock_adjtime(arg1, phtx));
10175             if (!is_error(ret) && phtx) {
10176                 if (host_to_target_timex(arg2, phtx) != 0) {
10177                     return -TARGET_EFAULT;
10178                 }
10179             }
10180         }
10181         return ret;
10182 #endif
10183 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10184     case TARGET_NR_clock_adjtime64:
10185         {
10186             struct timex htx;
10187 
10188             if (target_to_host_timex64(&htx, arg2) != 0) {
10189                 return -TARGET_EFAULT;
10190             }
10191             ret = get_errno(clock_adjtime(arg1, &htx));
10192             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10193                     return -TARGET_EFAULT;
10194             }
10195         }
10196         return ret;
10197 #endif
10198     case TARGET_NR_getpgid:
10199         return get_errno(getpgid(arg1));
10200     case TARGET_NR_fchdir:
10201         return get_errno(fchdir(arg1));
10202     case TARGET_NR_personality:
10203         return get_errno(personality(arg1));
10204 #ifdef TARGET_NR__llseek /* Not on alpha */
10205     case TARGET_NR__llseek:
10206         {
10207             int64_t res;
10208 #if !defined(__NR_llseek)
10209             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10210             if (res == -1) {
10211                 ret = get_errno(res);
10212             } else {
10213                 ret = 0;
10214             }
10215 #else
10216             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10217 #endif
10218             if ((ret == 0) && put_user_s64(res, arg4)) {
10219                 return -TARGET_EFAULT;
10220             }
10221         }
10222         return ret;
10223 #endif
10224 #ifdef TARGET_NR_getdents
10225     case TARGET_NR_getdents:
10226 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10227 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10228         {
10229             struct target_dirent *target_dirp;
10230             struct linux_dirent *dirp;
10231             abi_long count = arg3;
10232 
10233             dirp = g_try_malloc(count);
10234             if (!dirp) {
10235                 return -TARGET_ENOMEM;
10236             }
10237 
10238             ret = get_errno(sys_getdents(arg1, dirp, count));
10239             if (!is_error(ret)) {
10240                 struct linux_dirent *de;
10241 		struct target_dirent *tde;
10242                 int len = ret;
10243                 int reclen, treclen;
10244 		int count1, tnamelen;
10245 
10246 		count1 = 0;
10247                 de = dirp;
10248                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10249                     return -TARGET_EFAULT;
10250 		tde = target_dirp;
10251                 while (len > 0) {
10252                     reclen = de->d_reclen;
10253                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10254                     assert(tnamelen >= 0);
10255                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10256                     assert(count1 + treclen <= count);
10257                     tde->d_reclen = tswap16(treclen);
10258                     tde->d_ino = tswapal(de->d_ino);
10259                     tde->d_off = tswapal(de->d_off);
10260                     memcpy(tde->d_name, de->d_name, tnamelen);
10261                     de = (struct linux_dirent *)((char *)de + reclen);
10262                     len -= reclen;
10263                     tde = (struct target_dirent *)((char *)tde + treclen);
10264 		    count1 += treclen;
10265                 }
10266 		ret = count1;
10267                 unlock_user(target_dirp, arg2, ret);
10268             }
10269             g_free(dirp);
10270         }
10271 #else
10272         {
10273             struct linux_dirent *dirp;
10274             abi_long count = arg3;
10275 
10276             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10277                 return -TARGET_EFAULT;
10278             ret = get_errno(sys_getdents(arg1, dirp, count));
10279             if (!is_error(ret)) {
10280                 struct linux_dirent *de;
10281                 int len = ret;
10282                 int reclen;
10283                 de = dirp;
10284                 while (len > 0) {
10285                     reclen = de->d_reclen;
10286                     if (reclen > len)
10287                         break;
10288                     de->d_reclen = tswap16(reclen);
10289                     tswapls(&de->d_ino);
10290                     tswapls(&de->d_off);
10291                     de = (struct linux_dirent *)((char *)de + reclen);
10292                     len -= reclen;
10293                 }
10294             }
10295             unlock_user(dirp, arg2, ret);
10296         }
10297 #endif
10298 #else
10299         /* Implement getdents in terms of getdents64 */
10300         {
10301             struct linux_dirent64 *dirp;
10302             abi_long count = arg3;
10303 
10304             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10305             if (!dirp) {
10306                 return -TARGET_EFAULT;
10307             }
10308             ret = get_errno(sys_getdents64(arg1, dirp, count));
10309             if (!is_error(ret)) {
10310                 /* Convert the dirent64 structs to target dirent.  We do this
10311                  * in-place, since we can guarantee that a target_dirent is no
10312                  * larger than a dirent64; however this means we have to be
10313                  * careful to read everything before writing in the new format.
10314                  */
10315                 struct linux_dirent64 *de;
10316                 struct target_dirent *tde;
10317                 int len = ret;
10318                 int tlen = 0;
10319 
10320                 de = dirp;
10321                 tde = (struct target_dirent *)dirp;
10322                 while (len > 0) {
10323                     int namelen, treclen;
10324                     int reclen = de->d_reclen;
10325                     uint64_t ino = de->d_ino;
10326                     int64_t off = de->d_off;
10327                     uint8_t type = de->d_type;
10328 
10329                     namelen = strlen(de->d_name);
10330                     treclen = offsetof(struct target_dirent, d_name)
10331                         + namelen + 2;
10332                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10333 
10334                     memmove(tde->d_name, de->d_name, namelen + 1);
10335                     tde->d_ino = tswapal(ino);
10336                     tde->d_off = tswapal(off);
10337                     tde->d_reclen = tswap16(treclen);
10338                     /* The target_dirent type is in what was formerly a padding
10339                      * byte at the end of the structure:
10340                      */
10341                     *(((char *)tde) + treclen - 1) = type;
10342 
10343                     de = (struct linux_dirent64 *)((char *)de + reclen);
10344                     tde = (struct target_dirent *)((char *)tde + treclen);
10345                     len -= reclen;
10346                     tlen += treclen;
10347                 }
10348                 ret = tlen;
10349             }
10350             unlock_user(dirp, arg2, ret);
10351         }
10352 #endif
10353         return ret;
10354 #endif /* TARGET_NR_getdents */
10355 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10356     case TARGET_NR_getdents64:
10357         {
10358             struct linux_dirent64 *dirp;
10359             abi_long count = arg3;
10360             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10361                 return -TARGET_EFAULT;
10362             ret = get_errno(sys_getdents64(arg1, dirp, count));
10363             if (!is_error(ret)) {
10364                 struct linux_dirent64 *de;
10365                 int len = ret;
10366                 int reclen;
10367                 de = dirp;
10368                 while (len > 0) {
10369                     reclen = de->d_reclen;
10370                     if (reclen > len)
10371                         break;
10372                     de->d_reclen = tswap16(reclen);
10373                     tswap64s((uint64_t *)&de->d_ino);
10374                     tswap64s((uint64_t *)&de->d_off);
10375                     de = (struct linux_dirent64 *)((char *)de + reclen);
10376                     len -= reclen;
10377                 }
10378             }
10379             unlock_user(dirp, arg2, ret);
10380         }
10381         return ret;
10382 #endif /* TARGET_NR_getdents64 */
10383 #if defined(TARGET_NR__newselect)
10384     case TARGET_NR__newselect:
10385         return do_select(arg1, arg2, arg3, arg4, arg5);
10386 #endif
10387 #ifdef TARGET_NR_poll
10388     case TARGET_NR_poll:
10389         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10390 #endif
10391 #ifdef TARGET_NR_ppoll
10392     case TARGET_NR_ppoll:
10393         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10394 #endif
10395 #ifdef TARGET_NR_ppoll_time64
10396     case TARGET_NR_ppoll_time64:
10397         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10398 #endif
10399     case TARGET_NR_flock:
10400         /* NOTE: the flock constant seems to be the same for every
10401            Linux platform */
10402         return get_errno(safe_flock(arg1, arg2));
10403     case TARGET_NR_readv:
10404         {
10405             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10406             if (vec != NULL) {
10407                 ret = get_errno(safe_readv(arg1, vec, arg3));
10408                 unlock_iovec(vec, arg2, arg3, 1);
10409             } else {
10410                 ret = -host_to_target_errno(errno);
10411             }
10412         }
10413         return ret;
10414     case TARGET_NR_writev:
10415         {
10416             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10417             if (vec != NULL) {
10418                 ret = get_errno(safe_writev(arg1, vec, arg3));
10419                 unlock_iovec(vec, arg2, arg3, 0);
10420             } else {
10421                 ret = -host_to_target_errno(errno);
10422             }
10423         }
10424         return ret;
10425 #if defined(TARGET_NR_preadv)
10426     case TARGET_NR_preadv:
10427         {
10428             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10429             if (vec != NULL) {
10430                 unsigned long low, high;
10431 
10432                 target_to_host_low_high(arg4, arg5, &low, &high);
10433                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10434                 unlock_iovec(vec, arg2, arg3, 1);
10435             } else {
10436                 ret = -host_to_target_errno(errno);
10437            }
10438         }
10439         return ret;
10440 #endif
10441 #if defined(TARGET_NR_pwritev)
10442     case TARGET_NR_pwritev:
10443         {
10444             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10445             if (vec != NULL) {
10446                 unsigned long low, high;
10447 
10448                 target_to_host_low_high(arg4, arg5, &low, &high);
10449                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10450                 unlock_iovec(vec, arg2, arg3, 0);
10451             } else {
10452                 ret = -host_to_target_errno(errno);
10453            }
10454         }
10455         return ret;
10456 #endif
10457     case TARGET_NR_getsid:
10458         return get_errno(getsid(arg1));
10459 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10460     case TARGET_NR_fdatasync:
10461         return get_errno(fdatasync(arg1));
10462 #endif
10463     case TARGET_NR_sched_getaffinity:
10464         {
10465             unsigned int mask_size;
10466             unsigned long *mask;
10467 
10468             /*
10469              * sched_getaffinity needs multiples of ulong, so need to take
10470              * care of mismatches between target ulong and host ulong sizes.
10471              */
10472             if (arg2 & (sizeof(abi_ulong) - 1)) {
10473                 return -TARGET_EINVAL;
10474             }
10475             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10476 
10477             mask = alloca(mask_size);
10478             memset(mask, 0, mask_size);
10479             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10480 
10481             if (!is_error(ret)) {
10482                 if (ret > arg2) {
10483                     /* More data returned than the caller's buffer will fit.
10484                      * This only happens if sizeof(abi_long) < sizeof(long)
10485                      * and the caller passed us a buffer holding an odd number
10486                      * of abi_longs. If the host kernel is actually using the
10487                      * extra 4 bytes then fail EINVAL; otherwise we can just
10488                      * ignore them and only copy the interesting part.
10489                      */
10490                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10491                     if (numcpus > arg2 * 8) {
10492                         return -TARGET_EINVAL;
10493                     }
10494                     ret = arg2;
10495                 }
10496 
10497                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10498                     return -TARGET_EFAULT;
10499                 }
10500             }
10501         }
10502         return ret;
10503     case TARGET_NR_sched_setaffinity:
10504         {
10505             unsigned int mask_size;
10506             unsigned long *mask;
10507 
10508             /*
10509              * sched_setaffinity needs multiples of ulong, so need to take
10510              * care of mismatches between target ulong and host ulong sizes.
10511              */
10512             if (arg2 & (sizeof(abi_ulong) - 1)) {
10513                 return -TARGET_EINVAL;
10514             }
10515             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10516             mask = alloca(mask_size);
10517 
10518             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10519             if (ret) {
10520                 return ret;
10521             }
10522 
10523             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10524         }
10525     case TARGET_NR_getcpu:
10526         {
10527             unsigned cpu, node;
10528             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10529                                        arg2 ? &node : NULL,
10530                                        NULL));
10531             if (is_error(ret)) {
10532                 return ret;
10533             }
10534             if (arg1 && put_user_u32(cpu, arg1)) {
10535                 return -TARGET_EFAULT;
10536             }
10537             if (arg2 && put_user_u32(node, arg2)) {
10538                 return -TARGET_EFAULT;
10539             }
10540         }
10541         return ret;
10542     case TARGET_NR_sched_setparam:
10543         {
10544             struct sched_param *target_schp;
10545             struct sched_param schp;
10546 
10547             if (arg2 == 0) {
10548                 return -TARGET_EINVAL;
10549             }
10550             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10551                 return -TARGET_EFAULT;
10552             schp.sched_priority = tswap32(target_schp->sched_priority);
10553             unlock_user_struct(target_schp, arg2, 0);
10554             return get_errno(sched_setparam(arg1, &schp));
10555         }
10556     case TARGET_NR_sched_getparam:
10557         {
10558             struct sched_param *target_schp;
10559             struct sched_param schp;
10560 
10561             if (arg2 == 0) {
10562                 return -TARGET_EINVAL;
10563             }
10564             ret = get_errno(sched_getparam(arg1, &schp));
10565             if (!is_error(ret)) {
10566                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10567                     return -TARGET_EFAULT;
10568                 target_schp->sched_priority = tswap32(schp.sched_priority);
10569                 unlock_user_struct(target_schp, arg2, 1);
10570             }
10571         }
10572         return ret;
10573     case TARGET_NR_sched_setscheduler:
10574         {
10575             struct sched_param *target_schp;
10576             struct sched_param schp;
10577             if (arg3 == 0) {
10578                 return -TARGET_EINVAL;
10579             }
10580             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10581                 return -TARGET_EFAULT;
10582             schp.sched_priority = tswap32(target_schp->sched_priority);
10583             unlock_user_struct(target_schp, arg3, 0);
10584             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10585         }
10586     case TARGET_NR_sched_getscheduler:
10587         return get_errno(sched_getscheduler(arg1));
10588     case TARGET_NR_sched_yield:
10589         return get_errno(sched_yield());
10590     case TARGET_NR_sched_get_priority_max:
10591         return get_errno(sched_get_priority_max(arg1));
10592     case TARGET_NR_sched_get_priority_min:
10593         return get_errno(sched_get_priority_min(arg1));
10594 #ifdef TARGET_NR_sched_rr_get_interval
10595     case TARGET_NR_sched_rr_get_interval:
10596         {
10597             struct timespec ts;
10598             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10599             if (!is_error(ret)) {
10600                 ret = host_to_target_timespec(arg2, &ts);
10601             }
10602         }
10603         return ret;
10604 #endif
10605 #ifdef TARGET_NR_sched_rr_get_interval_time64
10606     case TARGET_NR_sched_rr_get_interval_time64:
10607         {
10608             struct timespec ts;
10609             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10610             if (!is_error(ret)) {
10611                 ret = host_to_target_timespec64(arg2, &ts);
10612             }
10613         }
10614         return ret;
10615 #endif
10616 #if defined(TARGET_NR_nanosleep)
10617     case TARGET_NR_nanosleep:
10618         {
10619             struct timespec req, rem;
10620             target_to_host_timespec(&req, arg1);
10621             ret = get_errno(safe_nanosleep(&req, &rem));
10622             if (is_error(ret) && arg2) {
10623                 host_to_target_timespec(arg2, &rem);
10624             }
10625         }
10626         return ret;
10627 #endif
10628     case TARGET_NR_prctl:
10629         switch (arg1) {
10630         case PR_GET_PDEATHSIG:
10631         {
10632             int deathsig;
10633             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10634             if (!is_error(ret) && arg2
10635                 && put_user_s32(deathsig, arg2)) {
10636                 return -TARGET_EFAULT;
10637             }
10638             return ret;
10639         }
10640 #ifdef PR_GET_NAME
10641         case PR_GET_NAME:
10642         {
10643             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10644             if (!name) {
10645                 return -TARGET_EFAULT;
10646             }
10647             ret = get_errno(prctl(arg1, (unsigned long)name,
10648                                   arg3, arg4, arg5));
10649             unlock_user(name, arg2, 16);
10650             return ret;
10651         }
10652         case PR_SET_NAME:
10653         {
10654             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10655             if (!name) {
10656                 return -TARGET_EFAULT;
10657             }
10658             ret = get_errno(prctl(arg1, (unsigned long)name,
10659                                   arg3, arg4, arg5));
10660             unlock_user(name, arg2, 0);
10661             return ret;
10662         }
10663 #endif
10664 #ifdef TARGET_MIPS
10665         case TARGET_PR_GET_FP_MODE:
10666         {
10667             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10668             ret = 0;
10669             if (env->CP0_Status & (1 << CP0St_FR)) {
10670                 ret |= TARGET_PR_FP_MODE_FR;
10671             }
10672             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10673                 ret |= TARGET_PR_FP_MODE_FRE;
10674             }
10675             return ret;
10676         }
10677         case TARGET_PR_SET_FP_MODE:
10678         {
10679             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10680             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10681             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10682             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10683             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10684 
10685             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10686                                             TARGET_PR_FP_MODE_FRE;
10687 
10688             /* If nothing to change, return right away, successfully.  */
10689             if (old_fr == new_fr && old_fre == new_fre) {
10690                 return 0;
10691             }
10692             /* Check the value is valid */
10693             if (arg2 & ~known_bits) {
10694                 return -TARGET_EOPNOTSUPP;
10695             }
10696             /* Setting FRE without FR is not supported.  */
10697             if (new_fre && !new_fr) {
10698                 return -TARGET_EOPNOTSUPP;
10699             }
10700             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10701                 /* FR1 is not supported */
10702                 return -TARGET_EOPNOTSUPP;
10703             }
10704             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10705                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10706                 /* cannot set FR=0 */
10707                 return -TARGET_EOPNOTSUPP;
10708             }
10709             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10710                 /* Cannot set FRE=1 */
10711                 return -TARGET_EOPNOTSUPP;
10712             }
10713 
10714             int i;
10715             fpr_t *fpr = env->active_fpu.fpr;
10716             for (i = 0; i < 32 ; i += 2) {
10717                 if (!old_fr && new_fr) {
10718                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10719                 } else if (old_fr && !new_fr) {
10720                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10721                 }
10722             }
10723 
10724             if (new_fr) {
10725                 env->CP0_Status |= (1 << CP0St_FR);
10726                 env->hflags |= MIPS_HFLAG_F64;
10727             } else {
10728                 env->CP0_Status &= ~(1 << CP0St_FR);
10729                 env->hflags &= ~MIPS_HFLAG_F64;
10730             }
10731             if (new_fre) {
10732                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10733                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10734                     env->hflags |= MIPS_HFLAG_FRE;
10735                 }
10736             } else {
10737                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10738                 env->hflags &= ~MIPS_HFLAG_FRE;
10739             }
10740 
10741             return 0;
10742         }
10743 #endif /* MIPS */
10744 #ifdef TARGET_AARCH64
10745         case TARGET_PR_SVE_SET_VL:
10746             /*
10747              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10748              * PR_SVE_VL_INHERIT.  Note the kernel definition
10749              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10750              * even though the current architectural maximum is VQ=16.
10751              */
10752             ret = -TARGET_EINVAL;
10753             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10754                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10755                 CPUARMState *env = cpu_env;
10756                 ARMCPU *cpu = env_archcpu(env);
10757                 uint32_t vq, old_vq;
10758 
10759                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10760                 vq = MAX(arg2 / 16, 1);
10761                 vq = MIN(vq, cpu->sve_max_vq);
10762 
10763                 if (vq < old_vq) {
10764                     aarch64_sve_narrow_vq(env, vq);
10765                 }
10766                 env->vfp.zcr_el[1] = vq - 1;
10767                 arm_rebuild_hflags(env);
10768                 ret = vq * 16;
10769             }
10770             return ret;
10771         case TARGET_PR_SVE_GET_VL:
10772             ret = -TARGET_EINVAL;
10773             {
10774                 ARMCPU *cpu = env_archcpu(cpu_env);
10775                 if (cpu_isar_feature(aa64_sve, cpu)) {
10776                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10777                 }
10778             }
10779             return ret;
10780         case TARGET_PR_PAC_RESET_KEYS:
10781             {
10782                 CPUARMState *env = cpu_env;
10783                 ARMCPU *cpu = env_archcpu(env);
10784 
10785                 if (arg3 || arg4 || arg5) {
10786                     return -TARGET_EINVAL;
10787                 }
10788                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10789                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10790                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10791                                TARGET_PR_PAC_APGAKEY);
10792                     int ret = 0;
10793                     Error *err = NULL;
10794 
10795                     if (arg2 == 0) {
10796                         arg2 = all;
10797                     } else if (arg2 & ~all) {
10798                         return -TARGET_EINVAL;
10799                     }
10800                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10801                         ret |= qemu_guest_getrandom(&env->keys.apia,
10802                                                     sizeof(ARMPACKey), &err);
10803                     }
10804                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10805                         ret |= qemu_guest_getrandom(&env->keys.apib,
10806                                                     sizeof(ARMPACKey), &err);
10807                     }
10808                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10809                         ret |= qemu_guest_getrandom(&env->keys.apda,
10810                                                     sizeof(ARMPACKey), &err);
10811                     }
10812                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10813                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10814                                                     sizeof(ARMPACKey), &err);
10815                     }
10816                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10817                         ret |= qemu_guest_getrandom(&env->keys.apga,
10818                                                     sizeof(ARMPACKey), &err);
10819                     }
10820                     if (ret != 0) {
10821                         /*
10822                          * Some unknown failure in the crypto.  The best
10823                          * we can do is log it and fail the syscall.
10824                          * The real syscall cannot fail this way.
10825                          */
10826                         qemu_log_mask(LOG_UNIMP,
10827                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10828                                       error_get_pretty(err));
10829                         error_free(err);
10830                         return -TARGET_EIO;
10831                     }
10832                     return 0;
10833                 }
10834             }
10835             return -TARGET_EINVAL;
10836         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10837             {
10838                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10839                 CPUARMState *env = cpu_env;
10840                 ARMCPU *cpu = env_archcpu(env);
10841 
10842                 if (cpu_isar_feature(aa64_mte, cpu)) {
10843                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10844                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10845                 }
10846 
10847                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10848                     return -TARGET_EINVAL;
10849                 }
10850                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10851 
10852                 if (cpu_isar_feature(aa64_mte, cpu)) {
10853                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10854                     case TARGET_PR_MTE_TCF_NONE:
10855                     case TARGET_PR_MTE_TCF_SYNC:
10856                     case TARGET_PR_MTE_TCF_ASYNC:
10857                         break;
10858                     default:
10859                         return -EINVAL;
10860                     }
10861 
10862                     /*
10863                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10864                      * Note that the syscall values are consistent with hw.
10865                      */
10866                     env->cp15.sctlr_el[1] =
10867                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10868                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10869 
10870                     /*
10871                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10872                      * Note that the syscall uses an include mask,
10873                      * and hardware uses an exclude mask -- invert.
10874                      */
10875                     env->cp15.gcr_el1 =
10876                         deposit64(env->cp15.gcr_el1, 0, 16,
10877                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10878                     arm_rebuild_hflags(env);
10879                 }
10880                 return 0;
10881             }
10882         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10883             {
10884                 abi_long ret = 0;
10885                 CPUARMState *env = cpu_env;
10886                 ARMCPU *cpu = env_archcpu(env);
10887 
10888                 if (arg2 || arg3 || arg4 || arg5) {
10889                     return -TARGET_EINVAL;
10890                 }
10891                 if (env->tagged_addr_enable) {
10892                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10893                 }
10894                 if (cpu_isar_feature(aa64_mte, cpu)) {
10895                     /* See above. */
10896                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10897                             << TARGET_PR_MTE_TCF_SHIFT);
10898                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10899                                     ~env->cp15.gcr_el1);
10900                 }
10901                 return ret;
10902             }
10903 #endif /* AARCH64 */
10904         case PR_GET_SECCOMP:
10905         case PR_SET_SECCOMP:
10906             /* Disable seccomp to prevent the target disabling syscalls we
10907              * need. */
10908             return -TARGET_EINVAL;
10909         default:
10910             /* Most prctl options have no pointer arguments */
10911             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10912         }
10913         break;
10914 #ifdef TARGET_NR_arch_prctl
10915     case TARGET_NR_arch_prctl:
10916         return do_arch_prctl(cpu_env, arg1, arg2);
10917 #endif
10918 #ifdef TARGET_NR_pread64
10919     case TARGET_NR_pread64:
10920         if (regpairs_aligned(cpu_env, num)) {
10921             arg4 = arg5;
10922             arg5 = arg6;
10923         }
10924         if (arg2 == 0 && arg3 == 0) {
10925             /* Special-case NULL buffer and zero length, which should succeed */
10926             p = 0;
10927         } else {
10928             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10929             if (!p) {
10930                 return -TARGET_EFAULT;
10931             }
10932         }
10933         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10934         unlock_user(p, arg2, ret);
10935         return ret;
10936     case TARGET_NR_pwrite64:
10937         if (regpairs_aligned(cpu_env, num)) {
10938             arg4 = arg5;
10939             arg5 = arg6;
10940         }
10941         if (arg2 == 0 && arg3 == 0) {
10942             /* Special-case NULL buffer and zero length, which should succeed */
10943             p = 0;
10944         } else {
10945             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10946             if (!p) {
10947                 return -TARGET_EFAULT;
10948             }
10949         }
10950         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10951         unlock_user(p, arg2, 0);
10952         return ret;
10953 #endif
10954     case TARGET_NR_getcwd:
10955         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10956             return -TARGET_EFAULT;
10957         ret = get_errno(sys_getcwd1(p, arg2));
10958         unlock_user(p, arg1, ret);
10959         return ret;
10960     case TARGET_NR_capget:
10961     case TARGET_NR_capset:
10962     {
10963         struct target_user_cap_header *target_header;
10964         struct target_user_cap_data *target_data = NULL;
10965         struct __user_cap_header_struct header;
10966         struct __user_cap_data_struct data[2];
10967         struct __user_cap_data_struct *dataptr = NULL;
10968         int i, target_datalen;
10969         int data_items = 1;
10970 
10971         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10972             return -TARGET_EFAULT;
10973         }
10974         header.version = tswap32(target_header->version);
10975         header.pid = tswap32(target_header->pid);
10976 
10977         if (header.version != _LINUX_CAPABILITY_VERSION) {
10978             /* Version 2 and up takes pointer to two user_data structs */
10979             data_items = 2;
10980         }
10981 
10982         target_datalen = sizeof(*target_data) * data_items;
10983 
10984         if (arg2) {
10985             if (num == TARGET_NR_capget) {
10986                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10987             } else {
10988                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10989             }
10990             if (!target_data) {
10991                 unlock_user_struct(target_header, arg1, 0);
10992                 return -TARGET_EFAULT;
10993             }
10994 
10995             if (num == TARGET_NR_capset) {
10996                 for (i = 0; i < data_items; i++) {
10997                     data[i].effective = tswap32(target_data[i].effective);
10998                     data[i].permitted = tswap32(target_data[i].permitted);
10999                     data[i].inheritable = tswap32(target_data[i].inheritable);
11000                 }
11001             }
11002 
11003             dataptr = data;
11004         }
11005 
11006         if (num == TARGET_NR_capget) {
11007             ret = get_errno(capget(&header, dataptr));
11008         } else {
11009             ret = get_errno(capset(&header, dataptr));
11010         }
11011 
11012         /* The kernel always updates version for both capget and capset */
11013         target_header->version = tswap32(header.version);
11014         unlock_user_struct(target_header, arg1, 1);
11015 
11016         if (arg2) {
11017             if (num == TARGET_NR_capget) {
11018                 for (i = 0; i < data_items; i++) {
11019                     target_data[i].effective = tswap32(data[i].effective);
11020                     target_data[i].permitted = tswap32(data[i].permitted);
11021                     target_data[i].inheritable = tswap32(data[i].inheritable);
11022                 }
11023                 unlock_user(target_data, arg2, target_datalen);
11024             } else {
11025                 unlock_user(target_data, arg2, 0);
11026             }
11027         }
11028         return ret;
11029     }
11030     case TARGET_NR_sigaltstack:
11031         return do_sigaltstack(arg1, arg2, cpu_env);
11032 
11033 #ifdef CONFIG_SENDFILE
11034 #ifdef TARGET_NR_sendfile
11035     case TARGET_NR_sendfile:
11036     {
11037         off_t *offp = NULL;
11038         off_t off;
11039         if (arg3) {
11040             ret = get_user_sal(off, arg3);
11041             if (is_error(ret)) {
11042                 return ret;
11043             }
11044             offp = &off;
11045         }
11046         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11047         if (!is_error(ret) && arg3) {
11048             abi_long ret2 = put_user_sal(off, arg3);
11049             if (is_error(ret2)) {
11050                 ret = ret2;
11051             }
11052         }
11053         return ret;
11054     }
11055 #endif
11056 #ifdef TARGET_NR_sendfile64
11057     case TARGET_NR_sendfile64:
11058     {
11059         off_t *offp = NULL;
11060         off_t off;
11061         if (arg3) {
11062             ret = get_user_s64(off, arg3);
11063             if (is_error(ret)) {
11064                 return ret;
11065             }
11066             offp = &off;
11067         }
11068         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11069         if (!is_error(ret) && arg3) {
11070             abi_long ret2 = put_user_s64(off, arg3);
11071             if (is_error(ret2)) {
11072                 ret = ret2;
11073             }
11074         }
11075         return ret;
11076     }
11077 #endif
11078 #endif
11079 #ifdef TARGET_NR_vfork
11080     case TARGET_NR_vfork:
11081         return get_errno(do_fork(cpu_env,
11082                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11083                          0, 0, 0, 0));
11084 #endif
11085 #ifdef TARGET_NR_ugetrlimit
11086     case TARGET_NR_ugetrlimit:
11087     {
11088 	struct rlimit rlim;
11089 	int resource = target_to_host_resource(arg1);
11090 	ret = get_errno(getrlimit(resource, &rlim));
11091 	if (!is_error(ret)) {
11092 	    struct target_rlimit *target_rlim;
11093             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11094                 return -TARGET_EFAULT;
11095 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11096 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11097             unlock_user_struct(target_rlim, arg2, 1);
11098 	}
11099         return ret;
11100     }
11101 #endif
11102 #ifdef TARGET_NR_truncate64
11103     case TARGET_NR_truncate64:
11104         if (!(p = lock_user_string(arg1)))
11105             return -TARGET_EFAULT;
11106 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11107         unlock_user(p, arg1, 0);
11108         return ret;
11109 #endif
11110 #ifdef TARGET_NR_ftruncate64
11111     case TARGET_NR_ftruncate64:
11112         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11113 #endif
11114 #ifdef TARGET_NR_stat64
11115     case TARGET_NR_stat64:
11116         if (!(p = lock_user_string(arg1))) {
11117             return -TARGET_EFAULT;
11118         }
11119         ret = get_errno(stat(path(p), &st));
11120         unlock_user(p, arg1, 0);
11121         if (!is_error(ret))
11122             ret = host_to_target_stat64(cpu_env, arg2, &st);
11123         return ret;
11124 #endif
11125 #ifdef TARGET_NR_lstat64
11126     case TARGET_NR_lstat64:
11127         if (!(p = lock_user_string(arg1))) {
11128             return -TARGET_EFAULT;
11129         }
11130         ret = get_errno(lstat(path(p), &st));
11131         unlock_user(p, arg1, 0);
11132         if (!is_error(ret))
11133             ret = host_to_target_stat64(cpu_env, arg2, &st);
11134         return ret;
11135 #endif
11136 #ifdef TARGET_NR_fstat64
11137     case TARGET_NR_fstat64:
11138         ret = get_errno(fstat(arg1, &st));
11139         if (!is_error(ret))
11140             ret = host_to_target_stat64(cpu_env, arg2, &st);
11141         return ret;
11142 #endif
11143 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11144 #ifdef TARGET_NR_fstatat64
11145     case TARGET_NR_fstatat64:
11146 #endif
11147 #ifdef TARGET_NR_newfstatat
11148     case TARGET_NR_newfstatat:
11149 #endif
11150         if (!(p = lock_user_string(arg2))) {
11151             return -TARGET_EFAULT;
11152         }
11153         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11154         unlock_user(p, arg2, 0);
11155         if (!is_error(ret))
11156             ret = host_to_target_stat64(cpu_env, arg3, &st);
11157         return ret;
11158 #endif
11159 #if defined(TARGET_NR_statx)
11160     case TARGET_NR_statx:
11161         {
11162             struct target_statx *target_stx;
11163             int dirfd = arg1;
11164             int flags = arg3;
11165 
11166             p = lock_user_string(arg2);
11167             if (p == NULL) {
11168                 return -TARGET_EFAULT;
11169             }
11170 #if defined(__NR_statx)
11171             {
11172                 /*
11173                  * It is assumed that struct statx is architecture independent.
11174                  */
11175                 struct target_statx host_stx;
11176                 int mask = arg4;
11177 
11178                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11179                 if (!is_error(ret)) {
11180                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11181                         unlock_user(p, arg2, 0);
11182                         return -TARGET_EFAULT;
11183                     }
11184                 }
11185 
11186                 if (ret != -TARGET_ENOSYS) {
11187                     unlock_user(p, arg2, 0);
11188                     return ret;
11189                 }
11190             }
11191 #endif
11192             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11193             unlock_user(p, arg2, 0);
11194 
11195             if (!is_error(ret)) {
11196                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11197                     return -TARGET_EFAULT;
11198                 }
11199                 memset(target_stx, 0, sizeof(*target_stx));
11200                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11201                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11202                 __put_user(st.st_ino, &target_stx->stx_ino);
11203                 __put_user(st.st_mode, &target_stx->stx_mode);
11204                 __put_user(st.st_uid, &target_stx->stx_uid);
11205                 __put_user(st.st_gid, &target_stx->stx_gid);
11206                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11207                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11208                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11209                 __put_user(st.st_size, &target_stx->stx_size);
11210                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11211                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11212                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11213                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11214                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11215                 unlock_user_struct(target_stx, arg5, 1);
11216             }
11217         }
11218         return ret;
11219 #endif
11220 #ifdef TARGET_NR_lchown
11221     case TARGET_NR_lchown:
11222         if (!(p = lock_user_string(arg1)))
11223             return -TARGET_EFAULT;
11224         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11225         unlock_user(p, arg1, 0);
11226         return ret;
11227 #endif
11228 #ifdef TARGET_NR_getuid
11229     case TARGET_NR_getuid:
11230         return get_errno(high2lowuid(getuid()));
11231 #endif
11232 #ifdef TARGET_NR_getgid
11233     case TARGET_NR_getgid:
11234         return get_errno(high2lowgid(getgid()));
11235 #endif
11236 #ifdef TARGET_NR_geteuid
11237     case TARGET_NR_geteuid:
11238         return get_errno(high2lowuid(geteuid()));
11239 #endif
11240 #ifdef TARGET_NR_getegid
11241     case TARGET_NR_getegid:
11242         return get_errno(high2lowgid(getegid()));
11243 #endif
11244     case TARGET_NR_setreuid:
11245         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11246     case TARGET_NR_setregid:
11247         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11248     case TARGET_NR_getgroups:
11249         {
11250             int gidsetsize = arg1;
11251             target_id *target_grouplist;
11252             gid_t *grouplist;
11253             int i;
11254 
11255             grouplist = alloca(gidsetsize * sizeof(gid_t));
11256             ret = get_errno(getgroups(gidsetsize, grouplist));
11257             if (gidsetsize == 0)
11258                 return ret;
11259             if (!is_error(ret)) {
11260                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11261                 if (!target_grouplist)
11262                     return -TARGET_EFAULT;
11263                 for(i = 0;i < ret; i++)
11264                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11265                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11266             }
11267         }
11268         return ret;
11269     case TARGET_NR_setgroups:
11270         {
11271             int gidsetsize = arg1;
11272             target_id *target_grouplist;
11273             gid_t *grouplist = NULL;
11274             int i;
11275             if (gidsetsize) {
11276                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11277                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11278                 if (!target_grouplist) {
11279                     return -TARGET_EFAULT;
11280                 }
11281                 for (i = 0; i < gidsetsize; i++) {
11282                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11283                 }
11284                 unlock_user(target_grouplist, arg2, 0);
11285             }
11286             return get_errno(setgroups(gidsetsize, grouplist));
11287         }
11288     case TARGET_NR_fchown:
11289         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11290 #if defined(TARGET_NR_fchownat)
11291     case TARGET_NR_fchownat:
11292         if (!(p = lock_user_string(arg2)))
11293             return -TARGET_EFAULT;
11294         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11295                                  low2highgid(arg4), arg5));
11296         unlock_user(p, arg2, 0);
11297         return ret;
11298 #endif
11299 #ifdef TARGET_NR_setresuid
11300     case TARGET_NR_setresuid:
11301         return get_errno(sys_setresuid(low2highuid(arg1),
11302                                        low2highuid(arg2),
11303                                        low2highuid(arg3)));
11304 #endif
11305 #ifdef TARGET_NR_getresuid
11306     case TARGET_NR_getresuid:
11307         {
11308             uid_t ruid, euid, suid;
11309             ret = get_errno(getresuid(&ruid, &euid, &suid));
11310             if (!is_error(ret)) {
11311                 if (put_user_id(high2lowuid(ruid), arg1)
11312                     || put_user_id(high2lowuid(euid), arg2)
11313                     || put_user_id(high2lowuid(suid), arg3))
11314                     return -TARGET_EFAULT;
11315             }
11316         }
11317         return ret;
11318 #endif
11319 #ifdef TARGET_NR_getresgid
11320     case TARGET_NR_setresgid:
11321         return get_errno(sys_setresgid(low2highgid(arg1),
11322                                        low2highgid(arg2),
11323                                        low2highgid(arg3)));
11324 #endif
11325 #ifdef TARGET_NR_getresgid
11326     case TARGET_NR_getresgid:
11327         {
11328             gid_t rgid, egid, sgid;
11329             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11330             if (!is_error(ret)) {
11331                 if (put_user_id(high2lowgid(rgid), arg1)
11332                     || put_user_id(high2lowgid(egid), arg2)
11333                     || put_user_id(high2lowgid(sgid), arg3))
11334                     return -TARGET_EFAULT;
11335             }
11336         }
11337         return ret;
11338 #endif
11339 #ifdef TARGET_NR_chown
11340     case TARGET_NR_chown:
11341         if (!(p = lock_user_string(arg1)))
11342             return -TARGET_EFAULT;
11343         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11344         unlock_user(p, arg1, 0);
11345         return ret;
11346 #endif
11347     case TARGET_NR_setuid:
11348         return get_errno(sys_setuid(low2highuid(arg1)));
11349     case TARGET_NR_setgid:
11350         return get_errno(sys_setgid(low2highgid(arg1)));
11351     case TARGET_NR_setfsuid:
11352         return get_errno(setfsuid(arg1));
11353     case TARGET_NR_setfsgid:
11354         return get_errno(setfsgid(arg1));
11355 
11356 #ifdef TARGET_NR_lchown32
11357     case TARGET_NR_lchown32:
11358         if (!(p = lock_user_string(arg1)))
11359             return -TARGET_EFAULT;
11360         ret = get_errno(lchown(p, arg2, arg3));
11361         unlock_user(p, arg1, 0);
11362         return ret;
11363 #endif
11364 #ifdef TARGET_NR_getuid32
11365     case TARGET_NR_getuid32:
11366         return get_errno(getuid());
11367 #endif
11368 
11369 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11370    /* Alpha specific */
11371     case TARGET_NR_getxuid:
11372          {
11373             uid_t euid;
11374             euid=geteuid();
11375             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11376          }
11377         return get_errno(getuid());
11378 #endif
11379 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11380    /* Alpha specific */
11381     case TARGET_NR_getxgid:
11382          {
11383             uid_t egid;
11384             egid=getegid();
11385             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11386          }
11387         return get_errno(getgid());
11388 #endif
11389 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11390     /* Alpha specific */
11391     case TARGET_NR_osf_getsysinfo:
11392         ret = -TARGET_EOPNOTSUPP;
11393         switch (arg1) {
11394           case TARGET_GSI_IEEE_FP_CONTROL:
11395             {
11396                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11397                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11398 
11399                 swcr &= ~SWCR_STATUS_MASK;
11400                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11401 
11402                 if (put_user_u64 (swcr, arg2))
11403                         return -TARGET_EFAULT;
11404                 ret = 0;
11405             }
11406             break;
11407 
11408           /* case GSI_IEEE_STATE_AT_SIGNAL:
11409              -- Not implemented in linux kernel.
11410              case GSI_UACPROC:
11411              -- Retrieves current unaligned access state; not much used.
11412              case GSI_PROC_TYPE:
11413              -- Retrieves implver information; surely not used.
11414              case GSI_GET_HWRPB:
11415              -- Grabs a copy of the HWRPB; surely not used.
11416           */
11417         }
11418         return ret;
11419 #endif
11420 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11421     /* Alpha specific */
11422     case TARGET_NR_osf_setsysinfo:
11423         ret = -TARGET_EOPNOTSUPP;
11424         switch (arg1) {
11425           case TARGET_SSI_IEEE_FP_CONTROL:
11426             {
11427                 uint64_t swcr, fpcr;
11428 
11429                 if (get_user_u64 (swcr, arg2)) {
11430                     return -TARGET_EFAULT;
11431                 }
11432 
11433                 /*
11434                  * The kernel calls swcr_update_status to update the
11435                  * status bits from the fpcr at every point that it
11436                  * could be queried.  Therefore, we store the status
11437                  * bits only in FPCR.
11438                  */
11439                 ((CPUAlphaState *)cpu_env)->swcr
11440                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11441 
11442                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11443                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11444                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11445                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11446                 ret = 0;
11447             }
11448             break;
11449 
11450           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11451             {
11452                 uint64_t exc, fpcr, fex;
11453 
11454                 if (get_user_u64(exc, arg2)) {
11455                     return -TARGET_EFAULT;
11456                 }
11457                 exc &= SWCR_STATUS_MASK;
11458                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11459 
11460                 /* Old exceptions are not signaled.  */
11461                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11462                 fex = exc & ~fex;
11463                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11464                 fex &= ((CPUArchState *)cpu_env)->swcr;
11465 
11466                 /* Update the hardware fpcr.  */
11467                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11468                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11469 
11470                 if (fex) {
11471                     int si_code = TARGET_FPE_FLTUNK;
11472                     target_siginfo_t info;
11473 
11474                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11475                         si_code = TARGET_FPE_FLTUND;
11476                     }
11477                     if (fex & SWCR_TRAP_ENABLE_INE) {
11478                         si_code = TARGET_FPE_FLTRES;
11479                     }
11480                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11481                         si_code = TARGET_FPE_FLTUND;
11482                     }
11483                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11484                         si_code = TARGET_FPE_FLTOVF;
11485                     }
11486                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11487                         si_code = TARGET_FPE_FLTDIV;
11488                     }
11489                     if (fex & SWCR_TRAP_ENABLE_INV) {
11490                         si_code = TARGET_FPE_FLTINV;
11491                     }
11492 
11493                     info.si_signo = SIGFPE;
11494                     info.si_errno = 0;
11495                     info.si_code = si_code;
11496                     info._sifields._sigfault._addr
11497                         = ((CPUArchState *)cpu_env)->pc;
11498                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11499                                  QEMU_SI_FAULT, &info);
11500                 }
11501                 ret = 0;
11502             }
11503             break;
11504 
11505           /* case SSI_NVPAIRS:
11506              -- Used with SSIN_UACPROC to enable unaligned accesses.
11507              case SSI_IEEE_STATE_AT_SIGNAL:
11508              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11509              -- Not implemented in linux kernel
11510           */
11511         }
11512         return ret;
11513 #endif
11514 #ifdef TARGET_NR_osf_sigprocmask
11515     /* Alpha specific.  */
11516     case TARGET_NR_osf_sigprocmask:
11517         {
11518             abi_ulong mask;
11519             int how;
11520             sigset_t set, oldset;
11521 
11522             switch(arg1) {
11523             case TARGET_SIG_BLOCK:
11524                 how = SIG_BLOCK;
11525                 break;
11526             case TARGET_SIG_UNBLOCK:
11527                 how = SIG_UNBLOCK;
11528                 break;
11529             case TARGET_SIG_SETMASK:
11530                 how = SIG_SETMASK;
11531                 break;
11532             default:
11533                 return -TARGET_EINVAL;
11534             }
11535             mask = arg2;
11536             target_to_host_old_sigset(&set, &mask);
11537             ret = do_sigprocmask(how, &set, &oldset);
11538             if (!ret) {
11539                 host_to_target_old_sigset(&mask, &oldset);
11540                 ret = mask;
11541             }
11542         }
11543         return ret;
11544 #endif
11545 
11546 #ifdef TARGET_NR_getgid32
11547     case TARGET_NR_getgid32:
11548         return get_errno(getgid());
11549 #endif
11550 #ifdef TARGET_NR_geteuid32
11551     case TARGET_NR_geteuid32:
11552         return get_errno(geteuid());
11553 #endif
11554 #ifdef TARGET_NR_getegid32
11555     case TARGET_NR_getegid32:
11556         return get_errno(getegid());
11557 #endif
11558 #ifdef TARGET_NR_setreuid32
11559     case TARGET_NR_setreuid32:
11560         return get_errno(setreuid(arg1, arg2));
11561 #endif
11562 #ifdef TARGET_NR_setregid32
11563     case TARGET_NR_setregid32:
11564         return get_errno(setregid(arg1, arg2));
11565 #endif
11566 #ifdef TARGET_NR_getgroups32
11567     case TARGET_NR_getgroups32:
11568         {
11569             int gidsetsize = arg1;
11570             uint32_t *target_grouplist;
11571             gid_t *grouplist;
11572             int i;
11573 
11574             grouplist = alloca(gidsetsize * sizeof(gid_t));
11575             ret = get_errno(getgroups(gidsetsize, grouplist));
11576             if (gidsetsize == 0)
11577                 return ret;
11578             if (!is_error(ret)) {
11579                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11580                 if (!target_grouplist) {
11581                     return -TARGET_EFAULT;
11582                 }
11583                 for(i = 0;i < ret; i++)
11584                     target_grouplist[i] = tswap32(grouplist[i]);
11585                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11586             }
11587         }
11588         return ret;
11589 #endif
11590 #ifdef TARGET_NR_setgroups32
11591     case TARGET_NR_setgroups32:
11592         {
11593             int gidsetsize = arg1;
11594             uint32_t *target_grouplist;
11595             gid_t *grouplist;
11596             int i;
11597 
11598             grouplist = alloca(gidsetsize * sizeof(gid_t));
11599             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11600             if (!target_grouplist) {
11601                 return -TARGET_EFAULT;
11602             }
11603             for(i = 0;i < gidsetsize; i++)
11604                 grouplist[i] = tswap32(target_grouplist[i]);
11605             unlock_user(target_grouplist, arg2, 0);
11606             return get_errno(setgroups(gidsetsize, grouplist));
11607         }
11608 #endif
11609 #ifdef TARGET_NR_fchown32
11610     case TARGET_NR_fchown32:
11611         return get_errno(fchown(arg1, arg2, arg3));
11612 #endif
11613 #ifdef TARGET_NR_setresuid32
11614     case TARGET_NR_setresuid32:
11615         return get_errno(sys_setresuid(arg1, arg2, arg3));
11616 #endif
11617 #ifdef TARGET_NR_getresuid32
11618     case TARGET_NR_getresuid32:
11619         {
11620             uid_t ruid, euid, suid;
11621             ret = get_errno(getresuid(&ruid, &euid, &suid));
11622             if (!is_error(ret)) {
11623                 if (put_user_u32(ruid, arg1)
11624                     || put_user_u32(euid, arg2)
11625                     || put_user_u32(suid, arg3))
11626                     return -TARGET_EFAULT;
11627             }
11628         }
11629         return ret;
11630 #endif
11631 #ifdef TARGET_NR_setresgid32
11632     case TARGET_NR_setresgid32:
11633         return get_errno(sys_setresgid(arg1, arg2, arg3));
11634 #endif
11635 #ifdef TARGET_NR_getresgid32
11636     case TARGET_NR_getresgid32:
11637         {
11638             gid_t rgid, egid, sgid;
11639             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11640             if (!is_error(ret)) {
11641                 if (put_user_u32(rgid, arg1)
11642                     || put_user_u32(egid, arg2)
11643                     || put_user_u32(sgid, arg3))
11644                     return -TARGET_EFAULT;
11645             }
11646         }
11647         return ret;
11648 #endif
11649 #ifdef TARGET_NR_chown32
11650     case TARGET_NR_chown32:
11651         if (!(p = lock_user_string(arg1)))
11652             return -TARGET_EFAULT;
11653         ret = get_errno(chown(p, arg2, arg3));
11654         unlock_user(p, arg1, 0);
11655         return ret;
11656 #endif
11657 #ifdef TARGET_NR_setuid32
11658     case TARGET_NR_setuid32:
11659         return get_errno(sys_setuid(arg1));
11660 #endif
11661 #ifdef TARGET_NR_setgid32
11662     case TARGET_NR_setgid32:
11663         return get_errno(sys_setgid(arg1));
11664 #endif
11665 #ifdef TARGET_NR_setfsuid32
11666     case TARGET_NR_setfsuid32:
11667         return get_errno(setfsuid(arg1));
11668 #endif
11669 #ifdef TARGET_NR_setfsgid32
11670     case TARGET_NR_setfsgid32:
11671         return get_errno(setfsgid(arg1));
11672 #endif
11673 #ifdef TARGET_NR_mincore
11674     case TARGET_NR_mincore:
11675         {
11676             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11677             if (!a) {
11678                 return -TARGET_ENOMEM;
11679             }
11680             p = lock_user_string(arg3);
11681             if (!p) {
11682                 ret = -TARGET_EFAULT;
11683             } else {
11684                 ret = get_errno(mincore(a, arg2, p));
11685                 unlock_user(p, arg3, ret);
11686             }
11687             unlock_user(a, arg1, 0);
11688         }
11689         return ret;
11690 #endif
11691 #ifdef TARGET_NR_arm_fadvise64_64
11692     case TARGET_NR_arm_fadvise64_64:
11693         /* arm_fadvise64_64 looks like fadvise64_64 but
11694          * with different argument order: fd, advice, offset, len
11695          * rather than the usual fd, offset, len, advice.
11696          * Note that offset and len are both 64-bit so appear as
11697          * pairs of 32-bit registers.
11698          */
11699         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11700                             target_offset64(arg5, arg6), arg2);
11701         return -host_to_target_errno(ret);
11702 #endif
11703 
11704 #if TARGET_ABI_BITS == 32
11705 
11706 #ifdef TARGET_NR_fadvise64_64
11707     case TARGET_NR_fadvise64_64:
11708 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11709         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11710         ret = arg2;
11711         arg2 = arg3;
11712         arg3 = arg4;
11713         arg4 = arg5;
11714         arg5 = arg6;
11715         arg6 = ret;
11716 #else
11717         /* 6 args: fd, offset (high, low), len (high, low), advice */
11718         if (regpairs_aligned(cpu_env, num)) {
11719             /* offset is in (3,4), len in (5,6) and advice in 7 */
11720             arg2 = arg3;
11721             arg3 = arg4;
11722             arg4 = arg5;
11723             arg5 = arg6;
11724             arg6 = arg7;
11725         }
11726 #endif
11727         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11728                             target_offset64(arg4, arg5), arg6);
11729         return -host_to_target_errno(ret);
11730 #endif
11731 
11732 #ifdef TARGET_NR_fadvise64
11733     case TARGET_NR_fadvise64:
11734         /* 5 args: fd, offset (high, low), len, advice */
11735         if (regpairs_aligned(cpu_env, num)) {
11736             /* offset is in (3,4), len in 5 and advice in 6 */
11737             arg2 = arg3;
11738             arg3 = arg4;
11739             arg4 = arg5;
11740             arg5 = arg6;
11741         }
11742         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11743         return -host_to_target_errno(ret);
11744 #endif
11745 
11746 #else /* not a 32-bit ABI */
11747 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11748 #ifdef TARGET_NR_fadvise64_64
11749     case TARGET_NR_fadvise64_64:
11750 #endif
11751 #ifdef TARGET_NR_fadvise64
11752     case TARGET_NR_fadvise64:
11753 #endif
11754 #ifdef TARGET_S390X
11755         switch (arg4) {
11756         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11757         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11758         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11759         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11760         default: break;
11761         }
11762 #endif
11763         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11764 #endif
11765 #endif /* end of 64-bit ABI fadvise handling */
11766 
11767 #ifdef TARGET_NR_madvise
11768     case TARGET_NR_madvise:
11769         /* A straight passthrough may not be safe because qemu sometimes
11770            turns private file-backed mappings into anonymous mappings.
11771            This will break MADV_DONTNEED.
11772            This is a hint, so ignoring and returning success is ok.  */
11773         return 0;
11774 #endif
11775 #ifdef TARGET_NR_fcntl64
11776     case TARGET_NR_fcntl64:
11777     {
11778         int cmd;
11779         struct flock64 fl;
11780         from_flock64_fn *copyfrom = copy_from_user_flock64;
11781         to_flock64_fn *copyto = copy_to_user_flock64;
11782 
11783 #ifdef TARGET_ARM
11784         if (!((CPUARMState *)cpu_env)->eabi) {
11785             copyfrom = copy_from_user_oabi_flock64;
11786             copyto = copy_to_user_oabi_flock64;
11787         }
11788 #endif
11789 
11790         cmd = target_to_host_fcntl_cmd(arg2);
11791         if (cmd == -TARGET_EINVAL) {
11792             return cmd;
11793         }
11794 
11795         switch(arg2) {
11796         case TARGET_F_GETLK64:
11797             ret = copyfrom(&fl, arg3);
11798             if (ret) {
11799                 break;
11800             }
11801             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11802             if (ret == 0) {
11803                 ret = copyto(arg3, &fl);
11804             }
11805 	    break;
11806 
11807         case TARGET_F_SETLK64:
11808         case TARGET_F_SETLKW64:
11809             ret = copyfrom(&fl, arg3);
11810             if (ret) {
11811                 break;
11812             }
11813             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11814 	    break;
11815         default:
11816             ret = do_fcntl(arg1, arg2, arg3);
11817             break;
11818         }
11819         return ret;
11820     }
11821 #endif
11822 #ifdef TARGET_NR_cacheflush
11823     case TARGET_NR_cacheflush:
11824         /* self-modifying code is handled automatically, so nothing needed */
11825         return 0;
11826 #endif
11827 #ifdef TARGET_NR_getpagesize
11828     case TARGET_NR_getpagesize:
11829         return TARGET_PAGE_SIZE;
11830 #endif
11831     case TARGET_NR_gettid:
11832         return get_errno(sys_gettid());
11833 #ifdef TARGET_NR_readahead
11834     case TARGET_NR_readahead:
11835 #if TARGET_ABI_BITS == 32
11836         if (regpairs_aligned(cpu_env, num)) {
11837             arg2 = arg3;
11838             arg3 = arg4;
11839             arg4 = arg5;
11840         }
11841         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11842 #else
11843         ret = get_errno(readahead(arg1, arg2, arg3));
11844 #endif
11845         return ret;
11846 #endif
11847 #ifdef CONFIG_ATTR
11848 #ifdef TARGET_NR_setxattr
11849     case TARGET_NR_listxattr:
11850     case TARGET_NR_llistxattr:
11851     {
11852         void *p, *b = 0;
11853         if (arg2) {
11854             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11855             if (!b) {
11856                 return -TARGET_EFAULT;
11857             }
11858         }
11859         p = lock_user_string(arg1);
11860         if (p) {
11861             if (num == TARGET_NR_listxattr) {
11862                 ret = get_errno(listxattr(p, b, arg3));
11863             } else {
11864                 ret = get_errno(llistxattr(p, b, arg3));
11865             }
11866         } else {
11867             ret = -TARGET_EFAULT;
11868         }
11869         unlock_user(p, arg1, 0);
11870         unlock_user(b, arg2, arg3);
11871         return ret;
11872     }
11873     case TARGET_NR_flistxattr:
11874     {
11875         void *b = 0;
11876         if (arg2) {
11877             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11878             if (!b) {
11879                 return -TARGET_EFAULT;
11880             }
11881         }
11882         ret = get_errno(flistxattr(arg1, b, arg3));
11883         unlock_user(b, arg2, arg3);
11884         return ret;
11885     }
11886     case TARGET_NR_setxattr:
11887     case TARGET_NR_lsetxattr:
11888         {
11889             void *p, *n, *v = 0;
11890             if (arg3) {
11891                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11892                 if (!v) {
11893                     return -TARGET_EFAULT;
11894                 }
11895             }
11896             p = lock_user_string(arg1);
11897             n = lock_user_string(arg2);
11898             if (p && n) {
11899                 if (num == TARGET_NR_setxattr) {
11900                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11901                 } else {
11902                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11903                 }
11904             } else {
11905                 ret = -TARGET_EFAULT;
11906             }
11907             unlock_user(p, arg1, 0);
11908             unlock_user(n, arg2, 0);
11909             unlock_user(v, arg3, 0);
11910         }
11911         return ret;
11912     case TARGET_NR_fsetxattr:
11913         {
11914             void *n, *v = 0;
11915             if (arg3) {
11916                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11917                 if (!v) {
11918                     return -TARGET_EFAULT;
11919                 }
11920             }
11921             n = lock_user_string(arg2);
11922             if (n) {
11923                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11924             } else {
11925                 ret = -TARGET_EFAULT;
11926             }
11927             unlock_user(n, arg2, 0);
11928             unlock_user(v, arg3, 0);
11929         }
11930         return ret;
11931     case TARGET_NR_getxattr:
11932     case TARGET_NR_lgetxattr:
11933         {
11934             void *p, *n, *v = 0;
11935             if (arg3) {
11936                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11937                 if (!v) {
11938                     return -TARGET_EFAULT;
11939                 }
11940             }
11941             p = lock_user_string(arg1);
11942             n = lock_user_string(arg2);
11943             if (p && n) {
11944                 if (num == TARGET_NR_getxattr) {
11945                     ret = get_errno(getxattr(p, n, v, arg4));
11946                 } else {
11947                     ret = get_errno(lgetxattr(p, n, v, arg4));
11948                 }
11949             } else {
11950                 ret = -TARGET_EFAULT;
11951             }
11952             unlock_user(p, arg1, 0);
11953             unlock_user(n, arg2, 0);
11954             unlock_user(v, arg3, arg4);
11955         }
11956         return ret;
11957     case TARGET_NR_fgetxattr:
11958         {
11959             void *n, *v = 0;
11960             if (arg3) {
11961                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11962                 if (!v) {
11963                     return -TARGET_EFAULT;
11964                 }
11965             }
11966             n = lock_user_string(arg2);
11967             if (n) {
11968                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11969             } else {
11970                 ret = -TARGET_EFAULT;
11971             }
11972             unlock_user(n, arg2, 0);
11973             unlock_user(v, arg3, arg4);
11974         }
11975         return ret;
11976     case TARGET_NR_removexattr:
11977     case TARGET_NR_lremovexattr:
11978         {
11979             void *p, *n;
11980             p = lock_user_string(arg1);
11981             n = lock_user_string(arg2);
11982             if (p && n) {
11983                 if (num == TARGET_NR_removexattr) {
11984                     ret = get_errno(removexattr(p, n));
11985                 } else {
11986                     ret = get_errno(lremovexattr(p, n));
11987                 }
11988             } else {
11989                 ret = -TARGET_EFAULT;
11990             }
11991             unlock_user(p, arg1, 0);
11992             unlock_user(n, arg2, 0);
11993         }
11994         return ret;
11995     case TARGET_NR_fremovexattr:
11996         {
11997             void *n;
11998             n = lock_user_string(arg2);
11999             if (n) {
12000                 ret = get_errno(fremovexattr(arg1, n));
12001             } else {
12002                 ret = -TARGET_EFAULT;
12003             }
12004             unlock_user(n, arg2, 0);
12005         }
12006         return ret;
12007 #endif
12008 #endif /* CONFIG_ATTR */
12009 #ifdef TARGET_NR_set_thread_area
12010     case TARGET_NR_set_thread_area:
12011 #if defined(TARGET_MIPS)
12012       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12013       return 0;
12014 #elif defined(TARGET_CRIS)
12015       if (arg1 & 0xff)
12016           ret = -TARGET_EINVAL;
12017       else {
12018           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12019           ret = 0;
12020       }
12021       return ret;
12022 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12023       return do_set_thread_area(cpu_env, arg1);
12024 #elif defined(TARGET_M68K)
12025       {
12026           TaskState *ts = cpu->opaque;
12027           ts->tp_value = arg1;
12028           return 0;
12029       }
12030 #else
12031       return -TARGET_ENOSYS;
12032 #endif
12033 #endif
12034 #ifdef TARGET_NR_get_thread_area
12035     case TARGET_NR_get_thread_area:
12036 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12037         return do_get_thread_area(cpu_env, arg1);
12038 #elif defined(TARGET_M68K)
12039         {
12040             TaskState *ts = cpu->opaque;
12041             return ts->tp_value;
12042         }
12043 #else
12044         return -TARGET_ENOSYS;
12045 #endif
12046 #endif
12047 #ifdef TARGET_NR_getdomainname
12048     case TARGET_NR_getdomainname:
12049         return -TARGET_ENOSYS;
12050 #endif
12051 
12052 #ifdef TARGET_NR_clock_settime
12053     case TARGET_NR_clock_settime:
12054     {
12055         struct timespec ts;
12056 
12057         ret = target_to_host_timespec(&ts, arg2);
12058         if (!is_error(ret)) {
12059             ret = get_errno(clock_settime(arg1, &ts));
12060         }
12061         return ret;
12062     }
12063 #endif
12064 #ifdef TARGET_NR_clock_settime64
12065     case TARGET_NR_clock_settime64:
12066     {
12067         struct timespec ts;
12068 
12069         ret = target_to_host_timespec64(&ts, arg2);
12070         if (!is_error(ret)) {
12071             ret = get_errno(clock_settime(arg1, &ts));
12072         }
12073         return ret;
12074     }
12075 #endif
12076 #ifdef TARGET_NR_clock_gettime
12077     case TARGET_NR_clock_gettime:
12078     {
12079         struct timespec ts;
12080         ret = get_errno(clock_gettime(arg1, &ts));
12081         if (!is_error(ret)) {
12082             ret = host_to_target_timespec(arg2, &ts);
12083         }
12084         return ret;
12085     }
12086 #endif
12087 #ifdef TARGET_NR_clock_gettime64
12088     case TARGET_NR_clock_gettime64:
12089     {
12090         struct timespec ts;
12091         ret = get_errno(clock_gettime(arg1, &ts));
12092         if (!is_error(ret)) {
12093             ret = host_to_target_timespec64(arg2, &ts);
12094         }
12095         return ret;
12096     }
12097 #endif
12098 #ifdef TARGET_NR_clock_getres
12099     case TARGET_NR_clock_getres:
12100     {
12101         struct timespec ts;
12102         ret = get_errno(clock_getres(arg1, &ts));
12103         if (!is_error(ret)) {
12104             host_to_target_timespec(arg2, &ts);
12105         }
12106         return ret;
12107     }
12108 #endif
12109 #ifdef TARGET_NR_clock_getres_time64
12110     case TARGET_NR_clock_getres_time64:
12111     {
12112         struct timespec ts;
12113         ret = get_errno(clock_getres(arg1, &ts));
12114         if (!is_error(ret)) {
12115             host_to_target_timespec64(arg2, &ts);
12116         }
12117         return ret;
12118     }
12119 #endif
12120 #ifdef TARGET_NR_clock_nanosleep
12121     case TARGET_NR_clock_nanosleep:
12122     {
12123         struct timespec ts;
12124         if (target_to_host_timespec(&ts, arg3)) {
12125             return -TARGET_EFAULT;
12126         }
12127         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12128                                              &ts, arg4 ? &ts : NULL));
12129         /*
12130          * if the call is interrupted by a signal handler, it fails
12131          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12132          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12133          */
12134         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12135             host_to_target_timespec(arg4, &ts)) {
12136               return -TARGET_EFAULT;
12137         }
12138 
12139         return ret;
12140     }
12141 #endif
12142 #ifdef TARGET_NR_clock_nanosleep_time64
12143     case TARGET_NR_clock_nanosleep_time64:
12144     {
12145         struct timespec ts;
12146 
12147         if (target_to_host_timespec64(&ts, arg3)) {
12148             return -TARGET_EFAULT;
12149         }
12150 
12151         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12152                                              &ts, arg4 ? &ts : NULL));
12153 
12154         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12155             host_to_target_timespec64(arg4, &ts)) {
12156             return -TARGET_EFAULT;
12157         }
12158         return ret;
12159     }
12160 #endif
12161 
12162 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12163     case TARGET_NR_set_tid_address:
12164         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12165 #endif
12166 
12167     case TARGET_NR_tkill:
12168         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12169 
12170     case TARGET_NR_tgkill:
12171         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12172                          target_to_host_signal(arg3)));
12173 
12174 #ifdef TARGET_NR_set_robust_list
12175     case TARGET_NR_set_robust_list:
12176     case TARGET_NR_get_robust_list:
12177         /* The ABI for supporting robust futexes has userspace pass
12178          * the kernel a pointer to a linked list which is updated by
12179          * userspace after the syscall; the list is walked by the kernel
12180          * when the thread exits. Since the linked list in QEMU guest
12181          * memory isn't a valid linked list for the host and we have
12182          * no way to reliably intercept the thread-death event, we can't
12183          * support these. Silently return ENOSYS so that guest userspace
12184          * falls back to a non-robust futex implementation (which should
12185          * be OK except in the corner case of the guest crashing while
12186          * holding a mutex that is shared with another process via
12187          * shared memory).
12188          */
12189         return -TARGET_ENOSYS;
12190 #endif
12191 
12192 #if defined(TARGET_NR_utimensat)
12193     case TARGET_NR_utimensat:
12194         {
12195             struct timespec *tsp, ts[2];
12196             if (!arg3) {
12197                 tsp = NULL;
12198             } else {
12199                 if (target_to_host_timespec(ts, arg3)) {
12200                     return -TARGET_EFAULT;
12201                 }
12202                 if (target_to_host_timespec(ts + 1, arg3 +
12203                                             sizeof(struct target_timespec))) {
12204                     return -TARGET_EFAULT;
12205                 }
12206                 tsp = ts;
12207             }
12208             if (!arg2)
12209                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12210             else {
12211                 if (!(p = lock_user_string(arg2))) {
12212                     return -TARGET_EFAULT;
12213                 }
12214                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12215                 unlock_user(p, arg2, 0);
12216             }
12217         }
12218         return ret;
12219 #endif
12220 #ifdef TARGET_NR_utimensat_time64
12221     case TARGET_NR_utimensat_time64:
12222         {
12223             struct timespec *tsp, ts[2];
12224             if (!arg3) {
12225                 tsp = NULL;
12226             } else {
12227                 if (target_to_host_timespec64(ts, arg3)) {
12228                     return -TARGET_EFAULT;
12229                 }
12230                 if (target_to_host_timespec64(ts + 1, arg3 +
12231                                      sizeof(struct target__kernel_timespec))) {
12232                     return -TARGET_EFAULT;
12233                 }
12234                 tsp = ts;
12235             }
12236             if (!arg2)
12237                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12238             else {
12239                 p = lock_user_string(arg2);
12240                 if (!p) {
12241                     return -TARGET_EFAULT;
12242                 }
12243                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12244                 unlock_user(p, arg2, 0);
12245             }
12246         }
12247         return ret;
12248 #endif
12249 #ifdef TARGET_NR_futex
12250     case TARGET_NR_futex:
12251         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12252 #endif
12253 #ifdef TARGET_NR_futex_time64
12254     case TARGET_NR_futex_time64:
12255         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12256 #endif
12257 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12258     case TARGET_NR_inotify_init:
12259         ret = get_errno(sys_inotify_init());
12260         if (ret >= 0) {
12261             fd_trans_register(ret, &target_inotify_trans);
12262         }
12263         return ret;
12264 #endif
12265 #ifdef CONFIG_INOTIFY1
12266 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12267     case TARGET_NR_inotify_init1:
12268         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12269                                           fcntl_flags_tbl)));
12270         if (ret >= 0) {
12271             fd_trans_register(ret, &target_inotify_trans);
12272         }
12273         return ret;
12274 #endif
12275 #endif
12276 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12277     case TARGET_NR_inotify_add_watch:
12278         p = lock_user_string(arg2);
12279         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12280         unlock_user(p, arg2, 0);
12281         return ret;
12282 #endif
12283 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12284     case TARGET_NR_inotify_rm_watch:
12285         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12286 #endif
12287 
12288 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12289     case TARGET_NR_mq_open:
12290         {
12291             struct mq_attr posix_mq_attr;
12292             struct mq_attr *pposix_mq_attr;
12293             int host_flags;
12294 
12295             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12296             pposix_mq_attr = NULL;
12297             if (arg4) {
12298                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12299                     return -TARGET_EFAULT;
12300                 }
12301                 pposix_mq_attr = &posix_mq_attr;
12302             }
12303             p = lock_user_string(arg1 - 1);
12304             if (!p) {
12305                 return -TARGET_EFAULT;
12306             }
12307             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12308             unlock_user (p, arg1, 0);
12309         }
12310         return ret;
12311 
12312     case TARGET_NR_mq_unlink:
12313         p = lock_user_string(arg1 - 1);
12314         if (!p) {
12315             return -TARGET_EFAULT;
12316         }
12317         ret = get_errno(mq_unlink(p));
12318         unlock_user (p, arg1, 0);
12319         return ret;
12320 
12321 #ifdef TARGET_NR_mq_timedsend
12322     case TARGET_NR_mq_timedsend:
12323         {
12324             struct timespec ts;
12325 
12326             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12327             if (arg5 != 0) {
12328                 if (target_to_host_timespec(&ts, arg5)) {
12329                     return -TARGET_EFAULT;
12330                 }
12331                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12332                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12333                     return -TARGET_EFAULT;
12334                 }
12335             } else {
12336                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12337             }
12338             unlock_user (p, arg2, arg3);
12339         }
12340         return ret;
12341 #endif
12342 #ifdef TARGET_NR_mq_timedsend_time64
12343     case TARGET_NR_mq_timedsend_time64:
12344         {
12345             struct timespec ts;
12346 
12347             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12348             if (arg5 != 0) {
12349                 if (target_to_host_timespec64(&ts, arg5)) {
12350                     return -TARGET_EFAULT;
12351                 }
12352                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12353                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12354                     return -TARGET_EFAULT;
12355                 }
12356             } else {
12357                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12358             }
12359             unlock_user(p, arg2, arg3);
12360         }
12361         return ret;
12362 #endif
12363 
12364 #ifdef TARGET_NR_mq_timedreceive
12365     case TARGET_NR_mq_timedreceive:
12366         {
12367             struct timespec ts;
12368             unsigned int prio;
12369 
12370             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12371             if (arg5 != 0) {
12372                 if (target_to_host_timespec(&ts, arg5)) {
12373                     return -TARGET_EFAULT;
12374                 }
12375                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12376                                                      &prio, &ts));
12377                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12378                     return -TARGET_EFAULT;
12379                 }
12380             } else {
12381                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12382                                                      &prio, NULL));
12383             }
12384             unlock_user (p, arg2, arg3);
12385             if (arg4 != 0)
12386                 put_user_u32(prio, arg4);
12387         }
12388         return ret;
12389 #endif
12390 #ifdef TARGET_NR_mq_timedreceive_time64
12391     case TARGET_NR_mq_timedreceive_time64:
12392         {
12393             struct timespec ts;
12394             unsigned int prio;
12395 
12396             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12397             if (arg5 != 0) {
12398                 if (target_to_host_timespec64(&ts, arg5)) {
12399                     return -TARGET_EFAULT;
12400                 }
12401                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12402                                                      &prio, &ts));
12403                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12404                     return -TARGET_EFAULT;
12405                 }
12406             } else {
12407                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12408                                                      &prio, NULL));
12409             }
12410             unlock_user(p, arg2, arg3);
12411             if (arg4 != 0) {
12412                 put_user_u32(prio, arg4);
12413             }
12414         }
12415         return ret;
12416 #endif
12417 
12418     /* Not implemented for now... */
12419 /*     case TARGET_NR_mq_notify: */
12420 /*         break; */
12421 
12422     case TARGET_NR_mq_getsetattr:
12423         {
12424             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12425             ret = 0;
12426             if (arg2 != 0) {
12427                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12428                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12429                                            &posix_mq_attr_out));
12430             } else if (arg3 != 0) {
12431                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12432             }
12433             if (ret == 0 && arg3 != 0) {
12434                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12435             }
12436         }
12437         return ret;
12438 #endif
12439 
12440 #ifdef CONFIG_SPLICE
12441 #ifdef TARGET_NR_tee
12442     case TARGET_NR_tee:
12443         {
12444             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12445         }
12446         return ret;
12447 #endif
12448 #ifdef TARGET_NR_splice
12449     case TARGET_NR_splice:
12450         {
12451             loff_t loff_in, loff_out;
12452             loff_t *ploff_in = NULL, *ploff_out = NULL;
12453             if (arg2) {
12454                 if (get_user_u64(loff_in, arg2)) {
12455                     return -TARGET_EFAULT;
12456                 }
12457                 ploff_in = &loff_in;
12458             }
12459             if (arg4) {
12460                 if (get_user_u64(loff_out, arg4)) {
12461                     return -TARGET_EFAULT;
12462                 }
12463                 ploff_out = &loff_out;
12464             }
12465             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12466             if (arg2) {
12467                 if (put_user_u64(loff_in, arg2)) {
12468                     return -TARGET_EFAULT;
12469                 }
12470             }
12471             if (arg4) {
12472                 if (put_user_u64(loff_out, arg4)) {
12473                     return -TARGET_EFAULT;
12474                 }
12475             }
12476         }
12477         return ret;
12478 #endif
12479 #ifdef TARGET_NR_vmsplice
12480 	case TARGET_NR_vmsplice:
12481         {
12482             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12483             if (vec != NULL) {
12484                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12485                 unlock_iovec(vec, arg2, arg3, 0);
12486             } else {
12487                 ret = -host_to_target_errno(errno);
12488             }
12489         }
12490         return ret;
12491 #endif
12492 #endif /* CONFIG_SPLICE */
12493 #ifdef CONFIG_EVENTFD
12494 #if defined(TARGET_NR_eventfd)
12495     case TARGET_NR_eventfd:
12496         ret = get_errno(eventfd(arg1, 0));
12497         if (ret >= 0) {
12498             fd_trans_register(ret, &target_eventfd_trans);
12499         }
12500         return ret;
12501 #endif
12502 #if defined(TARGET_NR_eventfd2)
12503     case TARGET_NR_eventfd2:
12504     {
12505         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12506         if (arg2 & TARGET_O_NONBLOCK) {
12507             host_flags |= O_NONBLOCK;
12508         }
12509         if (arg2 & TARGET_O_CLOEXEC) {
12510             host_flags |= O_CLOEXEC;
12511         }
12512         ret = get_errno(eventfd(arg1, host_flags));
12513         if (ret >= 0) {
12514             fd_trans_register(ret, &target_eventfd_trans);
12515         }
12516         return ret;
12517     }
12518 #endif
12519 #endif /* CONFIG_EVENTFD  */
12520 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12521     case TARGET_NR_fallocate:
12522 #if TARGET_ABI_BITS == 32
12523         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12524                                   target_offset64(arg5, arg6)));
12525 #else
12526         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12527 #endif
12528         return ret;
12529 #endif
12530 #if defined(CONFIG_SYNC_FILE_RANGE)
12531 #if defined(TARGET_NR_sync_file_range)
12532     case TARGET_NR_sync_file_range:
12533 #if TARGET_ABI_BITS == 32
12534 #if defined(TARGET_MIPS)
12535         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12536                                         target_offset64(arg5, arg6), arg7));
12537 #else
12538         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12539                                         target_offset64(arg4, arg5), arg6));
12540 #endif /* !TARGET_MIPS */
12541 #else
12542         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12543 #endif
12544         return ret;
12545 #endif
12546 #if defined(TARGET_NR_sync_file_range2) || \
12547     defined(TARGET_NR_arm_sync_file_range)
12548 #if defined(TARGET_NR_sync_file_range2)
12549     case TARGET_NR_sync_file_range2:
12550 #endif
12551 #if defined(TARGET_NR_arm_sync_file_range)
12552     case TARGET_NR_arm_sync_file_range:
12553 #endif
12554         /* This is like sync_file_range but the arguments are reordered */
12555 #if TARGET_ABI_BITS == 32
12556         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12557                                         target_offset64(arg5, arg6), arg2));
12558 #else
12559         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12560 #endif
12561         return ret;
12562 #endif
12563 #endif
12564 #if defined(TARGET_NR_signalfd4)
12565     case TARGET_NR_signalfd4:
12566         return do_signalfd4(arg1, arg2, arg4);
12567 #endif
12568 #if defined(TARGET_NR_signalfd)
12569     case TARGET_NR_signalfd:
12570         return do_signalfd4(arg1, arg2, 0);
12571 #endif
12572 #if defined(CONFIG_EPOLL)
12573 #if defined(TARGET_NR_epoll_create)
12574     case TARGET_NR_epoll_create:
12575         return get_errno(epoll_create(arg1));
12576 #endif
12577 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12578     case TARGET_NR_epoll_create1:
12579         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12580 #endif
12581 #if defined(TARGET_NR_epoll_ctl)
12582     case TARGET_NR_epoll_ctl:
12583     {
12584         struct epoll_event ep;
12585         struct epoll_event *epp = 0;
12586         if (arg4) {
12587             if (arg2 != EPOLL_CTL_DEL) {
12588                 struct target_epoll_event *target_ep;
12589                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12590                     return -TARGET_EFAULT;
12591                 }
12592                 ep.events = tswap32(target_ep->events);
12593                 /*
12594                  * The epoll_data_t union is just opaque data to the kernel,
12595                  * so we transfer all 64 bits across and need not worry what
12596                  * actual data type it is.
12597                  */
12598                 ep.data.u64 = tswap64(target_ep->data.u64);
12599                 unlock_user_struct(target_ep, arg4, 0);
12600             }
12601             /*
12602              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12603              * non-null pointer, even though this argument is ignored.
12604              *
12605              */
12606             epp = &ep;
12607         }
12608         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12609     }
12610 #endif
12611 
12612 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12613 #if defined(TARGET_NR_epoll_wait)
12614     case TARGET_NR_epoll_wait:
12615 #endif
12616 #if defined(TARGET_NR_epoll_pwait)
12617     case TARGET_NR_epoll_pwait:
12618 #endif
12619     {
12620         struct target_epoll_event *target_ep;
12621         struct epoll_event *ep;
12622         int epfd = arg1;
12623         int maxevents = arg3;
12624         int timeout = arg4;
12625 
12626         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12627             return -TARGET_EINVAL;
12628         }
12629 
12630         target_ep = lock_user(VERIFY_WRITE, arg2,
12631                               maxevents * sizeof(struct target_epoll_event), 1);
12632         if (!target_ep) {
12633             return -TARGET_EFAULT;
12634         }
12635 
12636         ep = g_try_new(struct epoll_event, maxevents);
12637         if (!ep) {
12638             unlock_user(target_ep, arg2, 0);
12639             return -TARGET_ENOMEM;
12640         }
12641 
12642         switch (num) {
12643 #if defined(TARGET_NR_epoll_pwait)
12644         case TARGET_NR_epoll_pwait:
12645         {
12646             target_sigset_t *target_set;
12647             sigset_t _set, *set = &_set;
12648 
12649             if (arg5) {
12650                 if (arg6 != sizeof(target_sigset_t)) {
12651                     ret = -TARGET_EINVAL;
12652                     break;
12653                 }
12654 
12655                 target_set = lock_user(VERIFY_READ, arg5,
12656                                        sizeof(target_sigset_t), 1);
12657                 if (!target_set) {
12658                     ret = -TARGET_EFAULT;
12659                     break;
12660                 }
12661                 target_to_host_sigset(set, target_set);
12662                 unlock_user(target_set, arg5, 0);
12663             } else {
12664                 set = NULL;
12665             }
12666 
12667             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12668                                              set, SIGSET_T_SIZE));
12669             break;
12670         }
12671 #endif
12672 #if defined(TARGET_NR_epoll_wait)
12673         case TARGET_NR_epoll_wait:
12674             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12675                                              NULL, 0));
12676             break;
12677 #endif
12678         default:
12679             ret = -TARGET_ENOSYS;
12680         }
12681         if (!is_error(ret)) {
12682             int i;
12683             for (i = 0; i < ret; i++) {
12684                 target_ep[i].events = tswap32(ep[i].events);
12685                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12686             }
12687             unlock_user(target_ep, arg2,
12688                         ret * sizeof(struct target_epoll_event));
12689         } else {
12690             unlock_user(target_ep, arg2, 0);
12691         }
12692         g_free(ep);
12693         return ret;
12694     }
12695 #endif
12696 #endif
12697 #ifdef TARGET_NR_prlimit64
12698     case TARGET_NR_prlimit64:
12699     {
12700         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12701         struct target_rlimit64 *target_rnew, *target_rold;
12702         struct host_rlimit64 rnew, rold, *rnewp = 0;
12703         int resource = target_to_host_resource(arg2);
12704 
12705         if (arg3 && (resource != RLIMIT_AS &&
12706                      resource != RLIMIT_DATA &&
12707                      resource != RLIMIT_STACK)) {
12708             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12709                 return -TARGET_EFAULT;
12710             }
12711             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12712             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12713             unlock_user_struct(target_rnew, arg3, 0);
12714             rnewp = &rnew;
12715         }
12716 
12717         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12718         if (!is_error(ret) && arg4) {
12719             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12720                 return -TARGET_EFAULT;
12721             }
12722             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12723             target_rold->rlim_max = tswap64(rold.rlim_max);
12724             unlock_user_struct(target_rold, arg4, 1);
12725         }
12726         return ret;
12727     }
12728 #endif
12729 #ifdef TARGET_NR_gethostname
12730     case TARGET_NR_gethostname:
12731     {
12732         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12733         if (name) {
12734             ret = get_errno(gethostname(name, arg2));
12735             unlock_user(name, arg1, arg2);
12736         } else {
12737             ret = -TARGET_EFAULT;
12738         }
12739         return ret;
12740     }
12741 #endif
12742 #ifdef TARGET_NR_atomic_cmpxchg_32
12743     case TARGET_NR_atomic_cmpxchg_32:
12744     {
12745         /* should use start_exclusive from main.c */
12746         abi_ulong mem_value;
12747         if (get_user_u32(mem_value, arg6)) {
12748             target_siginfo_t info;
12749             info.si_signo = SIGSEGV;
12750             info.si_errno = 0;
12751             info.si_code = TARGET_SEGV_MAPERR;
12752             info._sifields._sigfault._addr = arg6;
12753             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12754                          QEMU_SI_FAULT, &info);
12755             ret = 0xdeadbeef;
12756 
12757         }
12758         if (mem_value == arg2)
12759             put_user_u32(arg1, arg6);
12760         return mem_value;
12761     }
12762 #endif
12763 #ifdef TARGET_NR_atomic_barrier
12764     case TARGET_NR_atomic_barrier:
12765         /* Like the kernel implementation and the
12766            qemu arm barrier, no-op this? */
12767         return 0;
12768 #endif
12769 
12770 #ifdef TARGET_NR_timer_create
12771     case TARGET_NR_timer_create:
12772     {
12773         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12774 
12775         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12776 
12777         int clkid = arg1;
12778         int timer_index = next_free_host_timer();
12779 
12780         if (timer_index < 0) {
12781             ret = -TARGET_EAGAIN;
12782         } else {
12783             timer_t *phtimer = g_posix_timers  + timer_index;
12784 
12785             if (arg2) {
12786                 phost_sevp = &host_sevp;
12787                 ret = target_to_host_sigevent(phost_sevp, arg2);
12788                 if (ret != 0) {
12789                     return ret;
12790                 }
12791             }
12792 
12793             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12794             if (ret) {
12795                 phtimer = NULL;
12796             } else {
12797                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12798                     return -TARGET_EFAULT;
12799                 }
12800             }
12801         }
12802         return ret;
12803     }
12804 #endif
12805 
12806 #ifdef TARGET_NR_timer_settime
12807     case TARGET_NR_timer_settime:
12808     {
12809         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12810          * struct itimerspec * old_value */
12811         target_timer_t timerid = get_timer_id(arg1);
12812 
12813         if (timerid < 0) {
12814             ret = timerid;
12815         } else if (arg3 == 0) {
12816             ret = -TARGET_EINVAL;
12817         } else {
12818             timer_t htimer = g_posix_timers[timerid];
12819             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12820 
12821             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12822                 return -TARGET_EFAULT;
12823             }
12824             ret = get_errno(
12825                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12826             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12827                 return -TARGET_EFAULT;
12828             }
12829         }
12830         return ret;
12831     }
12832 #endif
12833 
12834 #ifdef TARGET_NR_timer_settime64
12835     case TARGET_NR_timer_settime64:
12836     {
12837         target_timer_t timerid = get_timer_id(arg1);
12838 
12839         if (timerid < 0) {
12840             ret = timerid;
12841         } else if (arg3 == 0) {
12842             ret = -TARGET_EINVAL;
12843         } else {
12844             timer_t htimer = g_posix_timers[timerid];
12845             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12846 
12847             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12848                 return -TARGET_EFAULT;
12849             }
12850             ret = get_errno(
12851                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12852             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12853                 return -TARGET_EFAULT;
12854             }
12855         }
12856         return ret;
12857     }
12858 #endif
12859 
12860 #ifdef TARGET_NR_timer_gettime
12861     case TARGET_NR_timer_gettime:
12862     {
12863         /* args: timer_t timerid, struct itimerspec *curr_value */
12864         target_timer_t timerid = get_timer_id(arg1);
12865 
12866         if (timerid < 0) {
12867             ret = timerid;
12868         } else if (!arg2) {
12869             ret = -TARGET_EFAULT;
12870         } else {
12871             timer_t htimer = g_posix_timers[timerid];
12872             struct itimerspec hspec;
12873             ret = get_errno(timer_gettime(htimer, &hspec));
12874 
12875             if (host_to_target_itimerspec(arg2, &hspec)) {
12876                 ret = -TARGET_EFAULT;
12877             }
12878         }
12879         return ret;
12880     }
12881 #endif
12882 
12883 #ifdef TARGET_NR_timer_gettime64
12884     case TARGET_NR_timer_gettime64:
12885     {
12886         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12887         target_timer_t timerid = get_timer_id(arg1);
12888 
12889         if (timerid < 0) {
12890             ret = timerid;
12891         } else if (!arg2) {
12892             ret = -TARGET_EFAULT;
12893         } else {
12894             timer_t htimer = g_posix_timers[timerid];
12895             struct itimerspec hspec;
12896             ret = get_errno(timer_gettime(htimer, &hspec));
12897 
12898             if (host_to_target_itimerspec64(arg2, &hspec)) {
12899                 ret = -TARGET_EFAULT;
12900             }
12901         }
12902         return ret;
12903     }
12904 #endif
12905 
12906 #ifdef TARGET_NR_timer_getoverrun
12907     case TARGET_NR_timer_getoverrun:
12908     {
12909         /* args: timer_t timerid */
12910         target_timer_t timerid = get_timer_id(arg1);
12911 
12912         if (timerid < 0) {
12913             ret = timerid;
12914         } else {
12915             timer_t htimer = g_posix_timers[timerid];
12916             ret = get_errno(timer_getoverrun(htimer));
12917         }
12918         return ret;
12919     }
12920 #endif
12921 
12922 #ifdef TARGET_NR_timer_delete
12923     case TARGET_NR_timer_delete:
12924     {
12925         /* args: timer_t timerid */
12926         target_timer_t timerid = get_timer_id(arg1);
12927 
12928         if (timerid < 0) {
12929             ret = timerid;
12930         } else {
12931             timer_t htimer = g_posix_timers[timerid];
12932             ret = get_errno(timer_delete(htimer));
12933             g_posix_timers[timerid] = 0;
12934         }
12935         return ret;
12936     }
12937 #endif
12938 
12939 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12940     case TARGET_NR_timerfd_create:
12941         return get_errno(timerfd_create(arg1,
12942                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12943 #endif
12944 
12945 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12946     case TARGET_NR_timerfd_gettime:
12947         {
12948             struct itimerspec its_curr;
12949 
12950             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12951 
12952             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12953                 return -TARGET_EFAULT;
12954             }
12955         }
12956         return ret;
12957 #endif
12958 
12959 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12960     case TARGET_NR_timerfd_gettime64:
12961         {
12962             struct itimerspec its_curr;
12963 
12964             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12965 
12966             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12967                 return -TARGET_EFAULT;
12968             }
12969         }
12970         return ret;
12971 #endif
12972 
12973 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12974     case TARGET_NR_timerfd_settime:
12975         {
12976             struct itimerspec its_new, its_old, *p_new;
12977 
12978             if (arg3) {
12979                 if (target_to_host_itimerspec(&its_new, arg3)) {
12980                     return -TARGET_EFAULT;
12981                 }
12982                 p_new = &its_new;
12983             } else {
12984                 p_new = NULL;
12985             }
12986 
12987             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12988 
12989             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12990                 return -TARGET_EFAULT;
12991             }
12992         }
12993         return ret;
12994 #endif
12995 
12996 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12997     case TARGET_NR_timerfd_settime64:
12998         {
12999             struct itimerspec its_new, its_old, *p_new;
13000 
13001             if (arg3) {
13002                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13003                     return -TARGET_EFAULT;
13004                 }
13005                 p_new = &its_new;
13006             } else {
13007                 p_new = NULL;
13008             }
13009 
13010             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13011 
13012             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13013                 return -TARGET_EFAULT;
13014             }
13015         }
13016         return ret;
13017 #endif
13018 
13019 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13020     case TARGET_NR_ioprio_get:
13021         return get_errno(ioprio_get(arg1, arg2));
13022 #endif
13023 
13024 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13025     case TARGET_NR_ioprio_set:
13026         return get_errno(ioprio_set(arg1, arg2, arg3));
13027 #endif
13028 
13029 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13030     case TARGET_NR_setns:
13031         return get_errno(setns(arg1, arg2));
13032 #endif
13033 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13034     case TARGET_NR_unshare:
13035         return get_errno(unshare(arg1));
13036 #endif
13037 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13038     case TARGET_NR_kcmp:
13039         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13040 #endif
13041 #ifdef TARGET_NR_swapcontext
13042     case TARGET_NR_swapcontext:
13043         /* PowerPC specific.  */
13044         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13045 #endif
13046 #ifdef TARGET_NR_memfd_create
13047     case TARGET_NR_memfd_create:
13048         p = lock_user_string(arg1);
13049         if (!p) {
13050             return -TARGET_EFAULT;
13051         }
13052         ret = get_errno(memfd_create(p, arg2));
13053         fd_trans_unregister(ret);
13054         unlock_user(p, arg1, 0);
13055         return ret;
13056 #endif
13057 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13058     case TARGET_NR_membarrier:
13059         return get_errno(membarrier(arg1, arg2));
13060 #endif
13061 
13062 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13063     case TARGET_NR_copy_file_range:
13064         {
13065             loff_t inoff, outoff;
13066             loff_t *pinoff = NULL, *poutoff = NULL;
13067 
13068             if (arg2) {
13069                 if (get_user_u64(inoff, arg2)) {
13070                     return -TARGET_EFAULT;
13071                 }
13072                 pinoff = &inoff;
13073             }
13074             if (arg4) {
13075                 if (get_user_u64(outoff, arg4)) {
13076                     return -TARGET_EFAULT;
13077                 }
13078                 poutoff = &outoff;
13079             }
13080             /* Do not sign-extend the count parameter. */
13081             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13082                                                  (abi_ulong)arg5, arg6));
13083             if (!is_error(ret) && ret > 0) {
13084                 if (arg2) {
13085                     if (put_user_u64(inoff, arg2)) {
13086                         return -TARGET_EFAULT;
13087                     }
13088                 }
13089                 if (arg4) {
13090                     if (put_user_u64(outoff, arg4)) {
13091                         return -TARGET_EFAULT;
13092                     }
13093                 }
13094             }
13095         }
13096         return ret;
13097 #endif
13098 
13099 #if defined(TARGET_NR_pivot_root)
13100     case TARGET_NR_pivot_root:
13101         {
13102             void *p2;
13103             p = lock_user_string(arg1); /* new_root */
13104             p2 = lock_user_string(arg2); /* put_old */
13105             if (!p || !p2) {
13106                 ret = -TARGET_EFAULT;
13107             } else {
13108                 ret = get_errno(pivot_root(p, p2));
13109             }
13110             unlock_user(p2, arg2, 0);
13111             unlock_user(p, arg1, 0);
13112         }
13113         return ret;
13114 #endif
13115 
13116     default:
13117         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13118         return -TARGET_ENOSYS;
13119     }
13120     return ret;
13121 }
13122 
13123 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13124                     abi_long arg2, abi_long arg3, abi_long arg4,
13125                     abi_long arg5, abi_long arg6, abi_long arg7,
13126                     abi_long arg8)
13127 {
13128     CPUState *cpu = env_cpu(cpu_env);
13129     abi_long ret;
13130 
13131 #ifdef DEBUG_ERESTARTSYS
13132     /* Debug-only code for exercising the syscall-restart code paths
13133      * in the per-architecture cpu main loops: restart every syscall
13134      * the guest makes once before letting it through.
13135      */
13136     {
13137         static bool flag;
13138         flag = !flag;
13139         if (flag) {
13140             return -TARGET_ERESTARTSYS;
13141         }
13142     }
13143 #endif
13144 
13145     record_syscall_start(cpu, num, arg1,
13146                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13147 
13148     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13149         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13150     }
13151 
13152     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13153                       arg5, arg6, arg7, arg8);
13154 
13155     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13156         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13157                           arg3, arg4, arg5, arg6);
13158     }
13159 
13160     record_syscall_return(cpu, num, ret);
13161     return ret;
13162 }
13163