xref: /openbmc/qemu/linux-user/syscall.c (revision a44d57a3)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "strace.h"
131 #include "qemu/guest-random.h"
132 #include "qemu/selfmap.h"
133 #include "user/syscall-trace.h"
134 #include "qapi/error.h"
135 #include "fd-trans.h"
136 #include "tcg/tcg.h"
137 
138 #ifndef CLONE_IO
139 #define CLONE_IO                0x80000000      /* Clone io context */
140 #endif
141 
142 /* We can't directly call the host clone syscall, because this will
143  * badly confuse libc (breaking mutexes, for example). So we must
144  * divide clone flags into:
145  *  * flag combinations that look like pthread_create()
146  *  * flag combinations that look like fork()
147  *  * flags we can implement within QEMU itself
148  *  * flags we can't support and will return an error for
149  */
150 /* For thread creation, all these flags must be present; for
151  * fork, none must be present.
152  */
153 #define CLONE_THREAD_FLAGS                              \
154     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
155      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
156 
157 /* These flags are ignored:
158  * CLONE_DETACHED is now ignored by the kernel;
159  * CLONE_IO is just an optimisation hint to the I/O scheduler
160  */
161 #define CLONE_IGNORED_FLAGS                     \
162     (CLONE_DETACHED | CLONE_IO)
163 
164 /* Flags for fork which we can implement within QEMU itself */
165 #define CLONE_OPTIONAL_FORK_FLAGS               \
166     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
167      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
168 
169 /* Flags for thread creation which we can implement within QEMU itself */
170 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
171     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
172      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
173 
174 #define CLONE_INVALID_FORK_FLAGS                                        \
175     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
176 
177 #define CLONE_INVALID_THREAD_FLAGS                                      \
178     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
179        CLONE_IGNORED_FLAGS))
180 
181 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
182  * have almost all been allocated. We cannot support any of
183  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
184  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
185  * The checks against the invalid thread masks above will catch these.
186  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
187  */
188 
189 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
190  * once. This exercises the codepaths for restart.
191  */
192 //#define DEBUG_ERESTARTSYS
193 
194 //#include <linux/msdos_fs.h>
195 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
196 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
197 
198 #undef _syscall0
199 #undef _syscall1
200 #undef _syscall2
201 #undef _syscall3
202 #undef _syscall4
203 #undef _syscall5
204 #undef _syscall6
205 
206 #define _syscall0(type,name)		\
207 static type name (void)			\
208 {					\
209 	return syscall(__NR_##name);	\
210 }
211 
212 #define _syscall1(type,name,type1,arg1)		\
213 static type name (type1 arg1)			\
214 {						\
215 	return syscall(__NR_##name, arg1);	\
216 }
217 
218 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
219 static type name (type1 arg1,type2 arg2)		\
220 {							\
221 	return syscall(__NR_##name, arg1, arg2);	\
222 }
223 
224 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
225 static type name (type1 arg1,type2 arg2,type3 arg3)		\
226 {								\
227 	return syscall(__NR_##name, arg1, arg2, arg3);		\
228 }
229 
230 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
231 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
232 {										\
233 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
234 }
235 
236 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
237 		  type5,arg5)							\
238 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
239 {										\
240 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
241 }
242 
243 
244 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5,type6,arg6)					\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
247                   type6 arg6)							\
248 {										\
249 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
250 }
251 
252 
253 #define __NR_sys_uname __NR_uname
254 #define __NR_sys_getcwd1 __NR_getcwd
255 #define __NR_sys_getdents __NR_getdents
256 #define __NR_sys_getdents64 __NR_getdents64
257 #define __NR_sys_getpriority __NR_getpriority
258 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
259 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
260 #define __NR_sys_syslog __NR_syslog
261 #if defined(__NR_futex)
262 # define __NR_sys_futex __NR_futex
263 #endif
264 #if defined(__NR_futex_time64)
265 # define __NR_sys_futex_time64 __NR_futex_time64
266 #endif
267 #define __NR_sys_inotify_init __NR_inotify_init
268 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
269 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
270 #define __NR_sys_statx __NR_statx
271 
272 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
273 #define __NR__llseek __NR_lseek
274 #endif
275 
276 /* Newer kernel ports have llseek() instead of _llseek() */
277 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
278 #define TARGET_NR__llseek TARGET_NR_llseek
279 #endif
280 
281 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
282 #ifndef TARGET_O_NONBLOCK_MASK
283 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
284 #endif
285 
286 #define __NR_sys_gettid __NR_gettid
287 _syscall0(int, sys_gettid)
288 
289 /* For the 64-bit guest on 32-bit host case we must emulate
290  * getdents using getdents64, because otherwise the host
291  * might hand us back more dirent records than we can fit
292  * into the guest buffer after structure format conversion.
293  * Otherwise we emulate getdents with getdents if the host has it.
294  */
295 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
296 #define EMULATE_GETDENTS_WITH_GETDENTS
297 #endif
298 
299 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
300 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
301 #endif
302 #if (defined(TARGET_NR_getdents) && \
303       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
304     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
305 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
306 #endif
307 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
308 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
309           loff_t *, res, uint, wh);
310 #endif
311 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
312 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
313           siginfo_t *, uinfo)
314 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
315 #ifdef __NR_exit_group
316 _syscall1(int,exit_group,int,error_code)
317 #endif
318 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
319 _syscall1(int,set_tid_address,int *,tidptr)
320 #endif
321 #if defined(__NR_futex)
322 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
323           const struct timespec *,timeout,int *,uaddr2,int,val3)
324 #endif
325 #if defined(__NR_futex_time64)
326 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
327           const struct timespec *,timeout,int *,uaddr2,int,val3)
328 #endif
329 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
330 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
331           unsigned long *, user_mask_ptr);
332 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
333 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
334           unsigned long *, user_mask_ptr);
335 #define __NR_sys_getcpu __NR_getcpu
336 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
337 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
338           void *, arg);
339 _syscall2(int, capget, struct __user_cap_header_struct *, header,
340           struct __user_cap_data_struct *, data);
341 _syscall2(int, capset, struct __user_cap_header_struct *, header,
342           struct __user_cap_data_struct *, data);
343 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
344 _syscall2(int, ioprio_get, int, which, int, who)
345 #endif
346 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
347 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
348 #endif
349 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
350 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
351 #endif
352 
353 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
354 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
355           unsigned long, idx1, unsigned long, idx2)
356 #endif
357 
358 /*
359  * It is assumed that struct statx is architecture independent.
360  */
361 #if defined(TARGET_NR_statx) && defined(__NR_statx)
362 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
363           unsigned int, mask, struct target_statx *, statxbuf)
364 #endif
365 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
366 _syscall2(int, membarrier, int, cmd, int, flags)
367 #endif
368 
369 static const bitmask_transtbl fcntl_flags_tbl[] = {
370   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
371   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
372   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
373   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
374   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
375   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
376   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
377   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
378   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
379   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
380   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
381   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
382   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
383 #if defined(O_DIRECT)
384   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
385 #endif
386 #if defined(O_NOATIME)
387   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
388 #endif
389 #if defined(O_CLOEXEC)
390   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
391 #endif
392 #if defined(O_PATH)
393   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
394 #endif
395 #if defined(O_TMPFILE)
396   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
397 #endif
398   /* Don't terminate the list prematurely on 64-bit host+guest.  */
399 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
400   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
401 #endif
402   { 0, 0, 0, 0 }
403 };
404 
405 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
406 
407 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
408 #if defined(__NR_utimensat)
409 #define __NR_sys_utimensat __NR_utimensat
410 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
411           const struct timespec *,tsp,int,flags)
412 #else
413 static int sys_utimensat(int dirfd, const char *pathname,
414                          const struct timespec times[2], int flags)
415 {
416     errno = ENOSYS;
417     return -1;
418 }
419 #endif
420 #endif /* TARGET_NR_utimensat */
421 
422 #ifdef TARGET_NR_renameat2
423 #if defined(__NR_renameat2)
424 #define __NR_sys_renameat2 __NR_renameat2
425 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
426           const char *, new, unsigned int, flags)
427 #else
428 static int sys_renameat2(int oldfd, const char *old,
429                          int newfd, const char *new, int flags)
430 {
431     if (flags == 0) {
432         return renameat(oldfd, old, newfd, new);
433     }
434     errno = ENOSYS;
435     return -1;
436 }
437 #endif
438 #endif /* TARGET_NR_renameat2 */
439 
440 #ifdef CONFIG_INOTIFY
441 #include <sys/inotify.h>
442 
443 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
444 static int sys_inotify_init(void)
445 {
446   return (inotify_init());
447 }
448 #endif
449 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
450 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
451 {
452   return (inotify_add_watch(fd, pathname, mask));
453 }
454 #endif
455 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
456 static int sys_inotify_rm_watch(int fd, int32_t wd)
457 {
458   return (inotify_rm_watch(fd, wd));
459 }
460 #endif
461 #ifdef CONFIG_INOTIFY1
462 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
463 static int sys_inotify_init1(int flags)
464 {
465   return (inotify_init1(flags));
466 }
467 #endif
468 #endif
469 #else
470 /* Userspace can usually survive runtime without inotify */
471 #undef TARGET_NR_inotify_init
472 #undef TARGET_NR_inotify_init1
473 #undef TARGET_NR_inotify_add_watch
474 #undef TARGET_NR_inotify_rm_watch
475 #endif /* CONFIG_INOTIFY  */
476 
477 #if defined(TARGET_NR_prlimit64)
478 #ifndef __NR_prlimit64
479 # define __NR_prlimit64 -1
480 #endif
481 #define __NR_sys_prlimit64 __NR_prlimit64
482 /* The glibc rlimit structure may not be that used by the underlying syscall */
483 struct host_rlimit64 {
484     uint64_t rlim_cur;
485     uint64_t rlim_max;
486 };
487 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
488           const struct host_rlimit64 *, new_limit,
489           struct host_rlimit64 *, old_limit)
490 #endif
491 
492 
493 #if defined(TARGET_NR_timer_create)
494 /* Maximum of 32 active POSIX timers allowed at any one time. */
495 static timer_t g_posix_timers[32] = { 0, } ;
496 
497 static inline int next_free_host_timer(void)
498 {
499     int k ;
500     /* FIXME: Does finding the next free slot require a lock? */
501     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
502         if (g_posix_timers[k] == 0) {
503             g_posix_timers[k] = (timer_t) 1;
504             return k;
505         }
506     }
507     return -1;
508 }
509 #endif
510 
511 static inline int host_to_target_errno(int host_errno)
512 {
513     switch (host_errno) {
514 #define E(X)  case X: return TARGET_##X;
515 #include "errnos.c.inc"
516 #undef E
517     default:
518         return host_errno;
519     }
520 }
521 
522 static inline int target_to_host_errno(int target_errno)
523 {
524     switch (target_errno) {
525 #define E(X)  case TARGET_##X: return X;
526 #include "errnos.c.inc"
527 #undef E
528     default:
529         return target_errno;
530     }
531 }
532 
533 static inline abi_long get_errno(abi_long ret)
534 {
535     if (ret == -1)
536         return -host_to_target_errno(errno);
537     else
538         return ret;
539 }
540 
541 const char *target_strerror(int err)
542 {
543     if (err == TARGET_ERESTARTSYS) {
544         return "To be restarted";
545     }
546     if (err == TARGET_QEMU_ESIGRETURN) {
547         return "Successful exit from sigreturn";
548     }
549 
550     return strerror(target_to_host_errno(err));
551 }
552 
553 #define safe_syscall0(type, name) \
554 static type safe_##name(void) \
555 { \
556     return safe_syscall(__NR_##name); \
557 }
558 
559 #define safe_syscall1(type, name, type1, arg1) \
560 static type safe_##name(type1 arg1) \
561 { \
562     return safe_syscall(__NR_##name, arg1); \
563 }
564 
565 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
566 static type safe_##name(type1 arg1, type2 arg2) \
567 { \
568     return safe_syscall(__NR_##name, arg1, arg2); \
569 }
570 
571 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
572 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
573 { \
574     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
575 }
576 
577 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
578     type4, arg4) \
579 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
580 { \
581     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
582 }
583 
584 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
585     type4, arg4, type5, arg5) \
586 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
587     type5 arg5) \
588 { \
589     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
590 }
591 
592 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
593     type4, arg4, type5, arg5, type6, arg6) \
594 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
595     type5 arg5, type6 arg6) \
596 { \
597     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
598 }
599 
600 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
601 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
602 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
603               int, flags, mode_t, mode)
604 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
605 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
606               struct rusage *, rusage)
607 #endif
608 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
609               int, options, struct rusage *, rusage)
610 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
611 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
612     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
613 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
614               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
615 #endif
616 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
617 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
618               struct timespec *, tsp, const sigset_t *, sigmask,
619               size_t, sigsetsize)
620 #endif
621 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
622               int, maxevents, int, timeout, const sigset_t *, sigmask,
623               size_t, sigsetsize)
624 #if defined(__NR_futex)
625 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
626               const struct timespec *,timeout,int *,uaddr2,int,val3)
627 #endif
628 #if defined(__NR_futex_time64)
629 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
630               const struct timespec *,timeout,int *,uaddr2,int,val3)
631 #endif
632 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
633 safe_syscall2(int, kill, pid_t, pid, int, sig)
634 safe_syscall2(int, tkill, int, tid, int, sig)
635 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
636 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
637 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
638 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
639               unsigned long, pos_l, unsigned long, pos_h)
640 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
641               unsigned long, pos_l, unsigned long, pos_h)
642 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
643               socklen_t, addrlen)
644 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
645               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
646 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
647               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
648 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
649 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
650 safe_syscall2(int, flock, int, fd, int, operation)
651 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
652 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
653               const struct timespec *, uts, size_t, sigsetsize)
654 #endif
655 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
656               int, flags)
657 #if defined(TARGET_NR_nanosleep)
658 safe_syscall2(int, nanosleep, const struct timespec *, req,
659               struct timespec *, rem)
660 #endif
661 #if defined(TARGET_NR_clock_nanosleep) || \
662     defined(TARGET_NR_clock_nanosleep_time64)
663 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
664               const struct timespec *, req, struct timespec *, rem)
665 #endif
666 #ifdef __NR_ipc
667 #ifdef __s390x__
668 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
669               void *, ptr)
670 #else
671 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
672               void *, ptr, long, fifth)
673 #endif
674 #endif
675 #ifdef __NR_msgsnd
676 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
677               int, flags)
678 #endif
679 #ifdef __NR_msgrcv
680 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
681               long, msgtype, int, flags)
682 #endif
683 #ifdef __NR_semtimedop
684 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
685               unsigned, nsops, const struct timespec *, timeout)
686 #endif
687 #if defined(TARGET_NR_mq_timedsend) || \
688     defined(TARGET_NR_mq_timedsend_time64)
689 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
690               size_t, len, unsigned, prio, const struct timespec *, timeout)
691 #endif
692 #if defined(TARGET_NR_mq_timedreceive) || \
693     defined(TARGET_NR_mq_timedreceive_time64)
694 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
695               size_t, len, unsigned *, prio, const struct timespec *, timeout)
696 #endif
697 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
698 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
699               int, outfd, loff_t *, poutoff, size_t, length,
700               unsigned int, flags)
701 #endif
702 
703 /* We do ioctl like this rather than via safe_syscall3 to preserve the
704  * "third argument might be integer or pointer or not present" behaviour of
705  * the libc function.
706  */
707 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
708 /* Similarly for fcntl. Note that callers must always:
709  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
710  *  use the flock64 struct rather than unsuffixed flock
711  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
712  */
713 #ifdef __NR_fcntl64
714 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
715 #else
716 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
717 #endif
718 
719 static inline int host_to_target_sock_type(int host_type)
720 {
721     int target_type;
722 
723     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
724     case SOCK_DGRAM:
725         target_type = TARGET_SOCK_DGRAM;
726         break;
727     case SOCK_STREAM:
728         target_type = TARGET_SOCK_STREAM;
729         break;
730     default:
731         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
732         break;
733     }
734 
735 #if defined(SOCK_CLOEXEC)
736     if (host_type & SOCK_CLOEXEC) {
737         target_type |= TARGET_SOCK_CLOEXEC;
738     }
739 #endif
740 
741 #if defined(SOCK_NONBLOCK)
742     if (host_type & SOCK_NONBLOCK) {
743         target_type |= TARGET_SOCK_NONBLOCK;
744     }
745 #endif
746 
747     return target_type;
748 }
749 
750 static abi_ulong target_brk;
751 static abi_ulong target_original_brk;
752 static abi_ulong brk_page;
753 
754 void target_set_brk(abi_ulong new_brk)
755 {
756     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
757     brk_page = HOST_PAGE_ALIGN(target_brk);
758 }
759 
760 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
761 #define DEBUGF_BRK(message, args...)
762 
763 /* do_brk() must return target values and target errnos. */
764 abi_long do_brk(abi_ulong new_brk)
765 {
766     abi_long mapped_addr;
767     abi_ulong new_alloc_size;
768 
769     /* brk pointers are always untagged */
770 
771     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
772 
773     if (!new_brk) {
774         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
775         return target_brk;
776     }
777     if (new_brk < target_original_brk) {
778         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
779                    target_brk);
780         return target_brk;
781     }
782 
783     /* If the new brk is less than the highest page reserved to the
784      * target heap allocation, set it and we're almost done...  */
785     if (new_brk <= brk_page) {
786         /* Heap contents are initialized to zero, as for anonymous
787          * mapped pages.  */
788         if (new_brk > target_brk) {
789             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
790         }
791 	target_brk = new_brk;
792         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
793 	return target_brk;
794     }
795 
796     /* We need to allocate more memory after the brk... Note that
797      * we don't use MAP_FIXED because that will map over the top of
798      * any existing mapping (like the one with the host libc or qemu
799      * itself); instead we treat "mapped but at wrong address" as
800      * a failure and unmap again.
801      */
802     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
803     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
804                                         PROT_READ|PROT_WRITE,
805                                         MAP_ANON|MAP_PRIVATE, 0, 0));
806 
807     if (mapped_addr == brk_page) {
808         /* Heap contents are initialized to zero, as for anonymous
809          * mapped pages.  Technically the new pages are already
810          * initialized to zero since they *are* anonymous mapped
811          * pages, however we have to take care with the contents that
812          * come from the remaining part of the previous page: it may
813          * contains garbage data due to a previous heap usage (grown
814          * then shrunken).  */
815         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
816 
817         target_brk = new_brk;
818         brk_page = HOST_PAGE_ALIGN(target_brk);
819         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
820             target_brk);
821         return target_brk;
822     } else if (mapped_addr != -1) {
823         /* Mapped but at wrong address, meaning there wasn't actually
824          * enough space for this brk.
825          */
826         target_munmap(mapped_addr, new_alloc_size);
827         mapped_addr = -1;
828         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
829     }
830     else {
831         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
832     }
833 
834 #if defined(TARGET_ALPHA)
835     /* We (partially) emulate OSF/1 on Alpha, which requires we
836        return a proper errno, not an unchanged brk value.  */
837     return -TARGET_ENOMEM;
838 #endif
839     /* For everything else, return the previous break. */
840     return target_brk;
841 }
842 
843 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
844     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
845 static inline abi_long copy_from_user_fdset(fd_set *fds,
846                                             abi_ulong target_fds_addr,
847                                             int n)
848 {
849     int i, nw, j, k;
850     abi_ulong b, *target_fds;
851 
852     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
853     if (!(target_fds = lock_user(VERIFY_READ,
854                                  target_fds_addr,
855                                  sizeof(abi_ulong) * nw,
856                                  1)))
857         return -TARGET_EFAULT;
858 
859     FD_ZERO(fds);
860     k = 0;
861     for (i = 0; i < nw; i++) {
862         /* grab the abi_ulong */
863         __get_user(b, &target_fds[i]);
864         for (j = 0; j < TARGET_ABI_BITS; j++) {
865             /* check the bit inside the abi_ulong */
866             if ((b >> j) & 1)
867                 FD_SET(k, fds);
868             k++;
869         }
870     }
871 
872     unlock_user(target_fds, target_fds_addr, 0);
873 
874     return 0;
875 }
876 
877 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
878                                                  abi_ulong target_fds_addr,
879                                                  int n)
880 {
881     if (target_fds_addr) {
882         if (copy_from_user_fdset(fds, target_fds_addr, n))
883             return -TARGET_EFAULT;
884         *fds_ptr = fds;
885     } else {
886         *fds_ptr = NULL;
887     }
888     return 0;
889 }
890 
891 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
892                                           const fd_set *fds,
893                                           int n)
894 {
895     int i, nw, j, k;
896     abi_long v;
897     abi_ulong *target_fds;
898 
899     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
900     if (!(target_fds = lock_user(VERIFY_WRITE,
901                                  target_fds_addr,
902                                  sizeof(abi_ulong) * nw,
903                                  0)))
904         return -TARGET_EFAULT;
905 
906     k = 0;
907     for (i = 0; i < nw; i++) {
908         v = 0;
909         for (j = 0; j < TARGET_ABI_BITS; j++) {
910             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
911             k++;
912         }
913         __put_user(v, &target_fds[i]);
914     }
915 
916     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
917 
918     return 0;
919 }
920 #endif
921 
922 #if defined(__alpha__)
923 #define HOST_HZ 1024
924 #else
925 #define HOST_HZ 100
926 #endif
927 
928 static inline abi_long host_to_target_clock_t(long ticks)
929 {
930 #if HOST_HZ == TARGET_HZ
931     return ticks;
932 #else
933     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
934 #endif
935 }
936 
937 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
938                                              const struct rusage *rusage)
939 {
940     struct target_rusage *target_rusage;
941 
942     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
943         return -TARGET_EFAULT;
944     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
945     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
946     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
947     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
948     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
949     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
950     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
951     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
952     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
953     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
954     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
955     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
956     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
957     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
958     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
959     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
960     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
961     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
962     unlock_user_struct(target_rusage, target_addr, 1);
963 
964     return 0;
965 }
966 
967 #ifdef TARGET_NR_setrlimit
968 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
969 {
970     abi_ulong target_rlim_swap;
971     rlim_t result;
972 
973     target_rlim_swap = tswapal(target_rlim);
974     if (target_rlim_swap == TARGET_RLIM_INFINITY)
975         return RLIM_INFINITY;
976 
977     result = target_rlim_swap;
978     if (target_rlim_swap != (rlim_t)result)
979         return RLIM_INFINITY;
980 
981     return result;
982 }
983 #endif
984 
985 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
986 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
987 {
988     abi_ulong target_rlim_swap;
989     abi_ulong result;
990 
991     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
992         target_rlim_swap = TARGET_RLIM_INFINITY;
993     else
994         target_rlim_swap = rlim;
995     result = tswapal(target_rlim_swap);
996 
997     return result;
998 }
999 #endif
1000 
1001 static inline int target_to_host_resource(int code)
1002 {
1003     switch (code) {
1004     case TARGET_RLIMIT_AS:
1005         return RLIMIT_AS;
1006     case TARGET_RLIMIT_CORE:
1007         return RLIMIT_CORE;
1008     case TARGET_RLIMIT_CPU:
1009         return RLIMIT_CPU;
1010     case TARGET_RLIMIT_DATA:
1011         return RLIMIT_DATA;
1012     case TARGET_RLIMIT_FSIZE:
1013         return RLIMIT_FSIZE;
1014     case TARGET_RLIMIT_LOCKS:
1015         return RLIMIT_LOCKS;
1016     case TARGET_RLIMIT_MEMLOCK:
1017         return RLIMIT_MEMLOCK;
1018     case TARGET_RLIMIT_MSGQUEUE:
1019         return RLIMIT_MSGQUEUE;
1020     case TARGET_RLIMIT_NICE:
1021         return RLIMIT_NICE;
1022     case TARGET_RLIMIT_NOFILE:
1023         return RLIMIT_NOFILE;
1024     case TARGET_RLIMIT_NPROC:
1025         return RLIMIT_NPROC;
1026     case TARGET_RLIMIT_RSS:
1027         return RLIMIT_RSS;
1028     case TARGET_RLIMIT_RTPRIO:
1029         return RLIMIT_RTPRIO;
1030     case TARGET_RLIMIT_SIGPENDING:
1031         return RLIMIT_SIGPENDING;
1032     case TARGET_RLIMIT_STACK:
1033         return RLIMIT_STACK;
1034     default:
1035         return code;
1036     }
1037 }
1038 
1039 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1040                                               abi_ulong target_tv_addr)
1041 {
1042     struct target_timeval *target_tv;
1043 
1044     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1045         return -TARGET_EFAULT;
1046     }
1047 
1048     __get_user(tv->tv_sec, &target_tv->tv_sec);
1049     __get_user(tv->tv_usec, &target_tv->tv_usec);
1050 
1051     unlock_user_struct(target_tv, target_tv_addr, 0);
1052 
1053     return 0;
1054 }
1055 
1056 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1057                                             const struct timeval *tv)
1058 {
1059     struct target_timeval *target_tv;
1060 
1061     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1062         return -TARGET_EFAULT;
1063     }
1064 
1065     __put_user(tv->tv_sec, &target_tv->tv_sec);
1066     __put_user(tv->tv_usec, &target_tv->tv_usec);
1067 
1068     unlock_user_struct(target_tv, target_tv_addr, 1);
1069 
1070     return 0;
1071 }
1072 
1073 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1074 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1075                                                 abi_ulong target_tv_addr)
1076 {
1077     struct target__kernel_sock_timeval *target_tv;
1078 
1079     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1080         return -TARGET_EFAULT;
1081     }
1082 
1083     __get_user(tv->tv_sec, &target_tv->tv_sec);
1084     __get_user(tv->tv_usec, &target_tv->tv_usec);
1085 
1086     unlock_user_struct(target_tv, target_tv_addr, 0);
1087 
1088     return 0;
1089 }
1090 #endif
1091 
1092 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1093                                               const struct timeval *tv)
1094 {
1095     struct target__kernel_sock_timeval *target_tv;
1096 
1097     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1098         return -TARGET_EFAULT;
1099     }
1100 
1101     __put_user(tv->tv_sec, &target_tv->tv_sec);
1102     __put_user(tv->tv_usec, &target_tv->tv_usec);
1103 
1104     unlock_user_struct(target_tv, target_tv_addr, 1);
1105 
1106     return 0;
1107 }
1108 
1109 #if defined(TARGET_NR_futex) || \
1110     defined(TARGET_NR_rt_sigtimedwait) || \
1111     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1112     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1113     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1114     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1115     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1116     defined(TARGET_NR_timer_settime) || \
1117     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1118 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1119                                                abi_ulong target_addr)
1120 {
1121     struct target_timespec *target_ts;
1122 
1123     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1124         return -TARGET_EFAULT;
1125     }
1126     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1127     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1128     unlock_user_struct(target_ts, target_addr, 0);
1129     return 0;
1130 }
1131 #endif
1132 
1133 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1134     defined(TARGET_NR_timer_settime64) || \
1135     defined(TARGET_NR_mq_timedsend_time64) || \
1136     defined(TARGET_NR_mq_timedreceive_time64) || \
1137     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1138     defined(TARGET_NR_clock_nanosleep_time64) || \
1139     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1140     defined(TARGET_NR_utimensat) || \
1141     defined(TARGET_NR_utimensat_time64) || \
1142     defined(TARGET_NR_semtimedop_time64) || \
1143     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1144 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1145                                                  abi_ulong target_addr)
1146 {
1147     struct target__kernel_timespec *target_ts;
1148 
1149     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1150         return -TARGET_EFAULT;
1151     }
1152     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1153     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1154     /* in 32bit mode, this drops the padding */
1155     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1156     unlock_user_struct(target_ts, target_addr, 0);
1157     return 0;
1158 }
1159 #endif
1160 
1161 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1162                                                struct timespec *host_ts)
1163 {
1164     struct target_timespec *target_ts;
1165 
1166     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1167         return -TARGET_EFAULT;
1168     }
1169     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1170     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1171     unlock_user_struct(target_ts, target_addr, 1);
1172     return 0;
1173 }
1174 
1175 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1176                                                  struct timespec *host_ts)
1177 {
1178     struct target__kernel_timespec *target_ts;
1179 
1180     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1181         return -TARGET_EFAULT;
1182     }
1183     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1184     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1185     unlock_user_struct(target_ts, target_addr, 1);
1186     return 0;
1187 }
1188 
1189 #if defined(TARGET_NR_gettimeofday)
1190 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1191                                              struct timezone *tz)
1192 {
1193     struct target_timezone *target_tz;
1194 
1195     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1196         return -TARGET_EFAULT;
1197     }
1198 
1199     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1200     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1201 
1202     unlock_user_struct(target_tz, target_tz_addr, 1);
1203 
1204     return 0;
1205 }
1206 #endif
1207 
1208 #if defined(TARGET_NR_settimeofday)
1209 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1210                                                abi_ulong target_tz_addr)
1211 {
1212     struct target_timezone *target_tz;
1213 
1214     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1215         return -TARGET_EFAULT;
1216     }
1217 
1218     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1219     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1220 
1221     unlock_user_struct(target_tz, target_tz_addr, 0);
1222 
1223     return 0;
1224 }
1225 #endif
1226 
1227 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1228 #include <mqueue.h>
1229 
1230 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1231                                               abi_ulong target_mq_attr_addr)
1232 {
1233     struct target_mq_attr *target_mq_attr;
1234 
1235     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1236                           target_mq_attr_addr, 1))
1237         return -TARGET_EFAULT;
1238 
1239     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1240     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1241     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1242     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1243 
1244     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1245 
1246     return 0;
1247 }
1248 
1249 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1250                                             const struct mq_attr *attr)
1251 {
1252     struct target_mq_attr *target_mq_attr;
1253 
1254     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1255                           target_mq_attr_addr, 0))
1256         return -TARGET_EFAULT;
1257 
1258     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1259     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1260     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1261     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1262 
1263     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1264 
1265     return 0;
1266 }
1267 #endif
1268 
1269 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1270 /* do_select() must return target values and target errnos. */
1271 static abi_long do_select(int n,
1272                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1273                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1274 {
1275     fd_set rfds, wfds, efds;
1276     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1277     struct timeval tv;
1278     struct timespec ts, *ts_ptr;
1279     abi_long ret;
1280 
1281     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1282     if (ret) {
1283         return ret;
1284     }
1285     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1286     if (ret) {
1287         return ret;
1288     }
1289     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1290     if (ret) {
1291         return ret;
1292     }
1293 
1294     if (target_tv_addr) {
1295         if (copy_from_user_timeval(&tv, target_tv_addr))
1296             return -TARGET_EFAULT;
1297         ts.tv_sec = tv.tv_sec;
1298         ts.tv_nsec = tv.tv_usec * 1000;
1299         ts_ptr = &ts;
1300     } else {
1301         ts_ptr = NULL;
1302     }
1303 
1304     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1305                                   ts_ptr, NULL));
1306 
1307     if (!is_error(ret)) {
1308         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1309             return -TARGET_EFAULT;
1310         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1311             return -TARGET_EFAULT;
1312         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1313             return -TARGET_EFAULT;
1314 
1315         if (target_tv_addr) {
1316             tv.tv_sec = ts.tv_sec;
1317             tv.tv_usec = ts.tv_nsec / 1000;
1318             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1319                 return -TARGET_EFAULT;
1320             }
1321         }
1322     }
1323 
1324     return ret;
1325 }
1326 
1327 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1328 static abi_long do_old_select(abi_ulong arg1)
1329 {
1330     struct target_sel_arg_struct *sel;
1331     abi_ulong inp, outp, exp, tvp;
1332     long nsel;
1333 
1334     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1335         return -TARGET_EFAULT;
1336     }
1337 
1338     nsel = tswapal(sel->n);
1339     inp = tswapal(sel->inp);
1340     outp = tswapal(sel->outp);
1341     exp = tswapal(sel->exp);
1342     tvp = tswapal(sel->tvp);
1343 
1344     unlock_user_struct(sel, arg1, 0);
1345 
1346     return do_select(nsel, inp, outp, exp, tvp);
1347 }
1348 #endif
1349 #endif
1350 
1351 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1352 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1353                             abi_long arg4, abi_long arg5, abi_long arg6,
1354                             bool time64)
1355 {
1356     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1357     fd_set rfds, wfds, efds;
1358     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1359     struct timespec ts, *ts_ptr;
1360     abi_long ret;
1361 
1362     /*
1363      * The 6th arg is actually two args smashed together,
1364      * so we cannot use the C library.
1365      */
1366     sigset_t set;
1367     struct {
1368         sigset_t *set;
1369         size_t size;
1370     } sig, *sig_ptr;
1371 
1372     abi_ulong arg_sigset, arg_sigsize, *arg7;
1373     target_sigset_t *target_sigset;
1374 
1375     n = arg1;
1376     rfd_addr = arg2;
1377     wfd_addr = arg3;
1378     efd_addr = arg4;
1379     ts_addr = arg5;
1380 
1381     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1382     if (ret) {
1383         return ret;
1384     }
1385     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1386     if (ret) {
1387         return ret;
1388     }
1389     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1390     if (ret) {
1391         return ret;
1392     }
1393 
1394     /*
1395      * This takes a timespec, and not a timeval, so we cannot
1396      * use the do_select() helper ...
1397      */
1398     if (ts_addr) {
1399         if (time64) {
1400             if (target_to_host_timespec64(&ts, ts_addr)) {
1401                 return -TARGET_EFAULT;
1402             }
1403         } else {
1404             if (target_to_host_timespec(&ts, ts_addr)) {
1405                 return -TARGET_EFAULT;
1406             }
1407         }
1408             ts_ptr = &ts;
1409     } else {
1410         ts_ptr = NULL;
1411     }
1412 
1413     /* Extract the two packed args for the sigset */
1414     if (arg6) {
1415         sig_ptr = &sig;
1416         sig.size = SIGSET_T_SIZE;
1417 
1418         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1419         if (!arg7) {
1420             return -TARGET_EFAULT;
1421         }
1422         arg_sigset = tswapal(arg7[0]);
1423         arg_sigsize = tswapal(arg7[1]);
1424         unlock_user(arg7, arg6, 0);
1425 
1426         if (arg_sigset) {
1427             sig.set = &set;
1428             if (arg_sigsize != sizeof(*target_sigset)) {
1429                 /* Like the kernel, we enforce correct size sigsets */
1430                 return -TARGET_EINVAL;
1431             }
1432             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1433                                       sizeof(*target_sigset), 1);
1434             if (!target_sigset) {
1435                 return -TARGET_EFAULT;
1436             }
1437             target_to_host_sigset(&set, target_sigset);
1438             unlock_user(target_sigset, arg_sigset, 0);
1439         } else {
1440             sig.set = NULL;
1441         }
1442     } else {
1443         sig_ptr = NULL;
1444     }
1445 
1446     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1447                                   ts_ptr, sig_ptr));
1448 
1449     if (!is_error(ret)) {
1450         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1451             return -TARGET_EFAULT;
1452         }
1453         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1454             return -TARGET_EFAULT;
1455         }
1456         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1457             return -TARGET_EFAULT;
1458         }
1459         if (time64) {
1460             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1461                 return -TARGET_EFAULT;
1462             }
1463         } else {
1464             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1465                 return -TARGET_EFAULT;
1466             }
1467         }
1468     }
1469     return ret;
1470 }
1471 #endif
1472 
1473 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1474     defined(TARGET_NR_ppoll_time64)
1475 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1476                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1477 {
1478     struct target_pollfd *target_pfd;
1479     unsigned int nfds = arg2;
1480     struct pollfd *pfd;
1481     unsigned int i;
1482     abi_long ret;
1483 
1484     pfd = NULL;
1485     target_pfd = NULL;
1486     if (nfds) {
1487         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1488             return -TARGET_EINVAL;
1489         }
1490         target_pfd = lock_user(VERIFY_WRITE, arg1,
1491                                sizeof(struct target_pollfd) * nfds, 1);
1492         if (!target_pfd) {
1493             return -TARGET_EFAULT;
1494         }
1495 
1496         pfd = alloca(sizeof(struct pollfd) * nfds);
1497         for (i = 0; i < nfds; i++) {
1498             pfd[i].fd = tswap32(target_pfd[i].fd);
1499             pfd[i].events = tswap16(target_pfd[i].events);
1500         }
1501     }
1502     if (ppoll) {
1503         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1504         target_sigset_t *target_set;
1505         sigset_t _set, *set = &_set;
1506 
1507         if (arg3) {
1508             if (time64) {
1509                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1510                     unlock_user(target_pfd, arg1, 0);
1511                     return -TARGET_EFAULT;
1512                 }
1513             } else {
1514                 if (target_to_host_timespec(timeout_ts, arg3)) {
1515                     unlock_user(target_pfd, arg1, 0);
1516                     return -TARGET_EFAULT;
1517                 }
1518             }
1519         } else {
1520             timeout_ts = NULL;
1521         }
1522 
1523         if (arg4) {
1524             if (arg5 != sizeof(target_sigset_t)) {
1525                 unlock_user(target_pfd, arg1, 0);
1526                 return -TARGET_EINVAL;
1527             }
1528 
1529             target_set = lock_user(VERIFY_READ, arg4,
1530                                    sizeof(target_sigset_t), 1);
1531             if (!target_set) {
1532                 unlock_user(target_pfd, arg1, 0);
1533                 return -TARGET_EFAULT;
1534             }
1535             target_to_host_sigset(set, target_set);
1536         } else {
1537             set = NULL;
1538         }
1539 
1540         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1541                                    set, SIGSET_T_SIZE));
1542 
1543         if (!is_error(ret) && arg3) {
1544             if (time64) {
1545                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1546                     return -TARGET_EFAULT;
1547                 }
1548             } else {
1549                 if (host_to_target_timespec(arg3, timeout_ts)) {
1550                     return -TARGET_EFAULT;
1551                 }
1552             }
1553         }
1554         if (arg4) {
1555             unlock_user(target_set, arg4, 0);
1556         }
1557     } else {
1558           struct timespec ts, *pts;
1559 
1560           if (arg3 >= 0) {
1561               /* Convert ms to secs, ns */
1562               ts.tv_sec = arg3 / 1000;
1563               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1564               pts = &ts;
1565           } else {
1566               /* -ve poll() timeout means "infinite" */
1567               pts = NULL;
1568           }
1569           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1570     }
1571 
1572     if (!is_error(ret)) {
1573         for (i = 0; i < nfds; i++) {
1574             target_pfd[i].revents = tswap16(pfd[i].revents);
1575         }
1576     }
1577     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1578     return ret;
1579 }
1580 #endif
1581 
1582 static abi_long do_pipe2(int host_pipe[], int flags)
1583 {
1584 #ifdef CONFIG_PIPE2
1585     return pipe2(host_pipe, flags);
1586 #else
1587     return -ENOSYS;
1588 #endif
1589 }
1590 
1591 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1592                         int flags, int is_pipe2)
1593 {
1594     int host_pipe[2];
1595     abi_long ret;
1596     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1597 
1598     if (is_error(ret))
1599         return get_errno(ret);
1600 
1601     /* Several targets have special calling conventions for the original
1602        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1603     if (!is_pipe2) {
1604 #if defined(TARGET_ALPHA)
1605         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1606         return host_pipe[0];
1607 #elif defined(TARGET_MIPS)
1608         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1609         return host_pipe[0];
1610 #elif defined(TARGET_SH4)
1611         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1612         return host_pipe[0];
1613 #elif defined(TARGET_SPARC)
1614         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1615         return host_pipe[0];
1616 #endif
1617     }
1618 
1619     if (put_user_s32(host_pipe[0], pipedes)
1620         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1621         return -TARGET_EFAULT;
1622     return get_errno(ret);
1623 }
1624 
1625 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1626                                               abi_ulong target_addr,
1627                                               socklen_t len)
1628 {
1629     struct target_ip_mreqn *target_smreqn;
1630 
1631     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1632     if (!target_smreqn)
1633         return -TARGET_EFAULT;
1634     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1635     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1636     if (len == sizeof(struct target_ip_mreqn))
1637         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1638     unlock_user(target_smreqn, target_addr, 0);
1639 
1640     return 0;
1641 }
1642 
1643 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1644                                                abi_ulong target_addr,
1645                                                socklen_t len)
1646 {
1647     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1648     sa_family_t sa_family;
1649     struct target_sockaddr *target_saddr;
1650 
1651     if (fd_trans_target_to_host_addr(fd)) {
1652         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1653     }
1654 
1655     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1656     if (!target_saddr)
1657         return -TARGET_EFAULT;
1658 
1659     sa_family = tswap16(target_saddr->sa_family);
1660 
1661     /* Oops. The caller might send a incomplete sun_path; sun_path
1662      * must be terminated by \0 (see the manual page), but
1663      * unfortunately it is quite common to specify sockaddr_un
1664      * length as "strlen(x->sun_path)" while it should be
1665      * "strlen(...) + 1". We'll fix that here if needed.
1666      * Linux kernel has a similar feature.
1667      */
1668 
1669     if (sa_family == AF_UNIX) {
1670         if (len < unix_maxlen && len > 0) {
1671             char *cp = (char*)target_saddr;
1672 
1673             if ( cp[len-1] && !cp[len] )
1674                 len++;
1675         }
1676         if (len > unix_maxlen)
1677             len = unix_maxlen;
1678     }
1679 
1680     memcpy(addr, target_saddr, len);
1681     addr->sa_family = sa_family;
1682     if (sa_family == AF_NETLINK) {
1683         struct sockaddr_nl *nladdr;
1684 
1685         nladdr = (struct sockaddr_nl *)addr;
1686         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1687         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1688     } else if (sa_family == AF_PACKET) {
1689 	struct target_sockaddr_ll *lladdr;
1690 
1691 	lladdr = (struct target_sockaddr_ll *)addr;
1692 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1693 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1694     }
1695     unlock_user(target_saddr, target_addr, 0);
1696 
1697     return 0;
1698 }
1699 
1700 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1701                                                struct sockaddr *addr,
1702                                                socklen_t len)
1703 {
1704     struct target_sockaddr *target_saddr;
1705 
1706     if (len == 0) {
1707         return 0;
1708     }
1709     assert(addr);
1710 
1711     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1712     if (!target_saddr)
1713         return -TARGET_EFAULT;
1714     memcpy(target_saddr, addr, len);
1715     if (len >= offsetof(struct target_sockaddr, sa_family) +
1716         sizeof(target_saddr->sa_family)) {
1717         target_saddr->sa_family = tswap16(addr->sa_family);
1718     }
1719     if (addr->sa_family == AF_NETLINK &&
1720         len >= sizeof(struct target_sockaddr_nl)) {
1721         struct target_sockaddr_nl *target_nl =
1722                (struct target_sockaddr_nl *)target_saddr;
1723         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1724         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1725     } else if (addr->sa_family == AF_PACKET) {
1726         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1727         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1728         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1729     } else if (addr->sa_family == AF_INET6 &&
1730                len >= sizeof(struct target_sockaddr_in6)) {
1731         struct target_sockaddr_in6 *target_in6 =
1732                (struct target_sockaddr_in6 *)target_saddr;
1733         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1734     }
1735     unlock_user(target_saddr, target_addr, len);
1736 
1737     return 0;
1738 }
1739 
1740 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1741                                            struct target_msghdr *target_msgh)
1742 {
1743     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1744     abi_long msg_controllen;
1745     abi_ulong target_cmsg_addr;
1746     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1747     socklen_t space = 0;
1748 
1749     msg_controllen = tswapal(target_msgh->msg_controllen);
1750     if (msg_controllen < sizeof (struct target_cmsghdr))
1751         goto the_end;
1752     target_cmsg_addr = tswapal(target_msgh->msg_control);
1753     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1754     target_cmsg_start = target_cmsg;
1755     if (!target_cmsg)
1756         return -TARGET_EFAULT;
1757 
1758     while (cmsg && target_cmsg) {
1759         void *data = CMSG_DATA(cmsg);
1760         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1761 
1762         int len = tswapal(target_cmsg->cmsg_len)
1763             - sizeof(struct target_cmsghdr);
1764 
1765         space += CMSG_SPACE(len);
1766         if (space > msgh->msg_controllen) {
1767             space -= CMSG_SPACE(len);
1768             /* This is a QEMU bug, since we allocated the payload
1769              * area ourselves (unlike overflow in host-to-target
1770              * conversion, which is just the guest giving us a buffer
1771              * that's too small). It can't happen for the payload types
1772              * we currently support; if it becomes an issue in future
1773              * we would need to improve our allocation strategy to
1774              * something more intelligent than "twice the size of the
1775              * target buffer we're reading from".
1776              */
1777             qemu_log_mask(LOG_UNIMP,
1778                           ("Unsupported ancillary data %d/%d: "
1779                            "unhandled msg size\n"),
1780                           tswap32(target_cmsg->cmsg_level),
1781                           tswap32(target_cmsg->cmsg_type));
1782             break;
1783         }
1784 
1785         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1786             cmsg->cmsg_level = SOL_SOCKET;
1787         } else {
1788             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1789         }
1790         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1791         cmsg->cmsg_len = CMSG_LEN(len);
1792 
1793         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1794             int *fd = (int *)data;
1795             int *target_fd = (int *)target_data;
1796             int i, numfds = len / sizeof(int);
1797 
1798             for (i = 0; i < numfds; i++) {
1799                 __get_user(fd[i], target_fd + i);
1800             }
1801         } else if (cmsg->cmsg_level == SOL_SOCKET
1802                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1803             struct ucred *cred = (struct ucred *)data;
1804             struct target_ucred *target_cred =
1805                 (struct target_ucred *)target_data;
1806 
1807             __get_user(cred->pid, &target_cred->pid);
1808             __get_user(cred->uid, &target_cred->uid);
1809             __get_user(cred->gid, &target_cred->gid);
1810         } else {
1811             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1812                           cmsg->cmsg_level, cmsg->cmsg_type);
1813             memcpy(data, target_data, len);
1814         }
1815 
1816         cmsg = CMSG_NXTHDR(msgh, cmsg);
1817         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1818                                          target_cmsg_start);
1819     }
1820     unlock_user(target_cmsg, target_cmsg_addr, 0);
1821  the_end:
1822     msgh->msg_controllen = space;
1823     return 0;
1824 }
1825 
1826 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1827                                            struct msghdr *msgh)
1828 {
1829     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1830     abi_long msg_controllen;
1831     abi_ulong target_cmsg_addr;
1832     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1833     socklen_t space = 0;
1834 
1835     msg_controllen = tswapal(target_msgh->msg_controllen);
1836     if (msg_controllen < sizeof (struct target_cmsghdr))
1837         goto the_end;
1838     target_cmsg_addr = tswapal(target_msgh->msg_control);
1839     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1840     target_cmsg_start = target_cmsg;
1841     if (!target_cmsg)
1842         return -TARGET_EFAULT;
1843 
1844     while (cmsg && target_cmsg) {
1845         void *data = CMSG_DATA(cmsg);
1846         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1847 
1848         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1849         int tgt_len, tgt_space;
1850 
1851         /* We never copy a half-header but may copy half-data;
1852          * this is Linux's behaviour in put_cmsg(). Note that
1853          * truncation here is a guest problem (which we report
1854          * to the guest via the CTRUNC bit), unlike truncation
1855          * in target_to_host_cmsg, which is a QEMU bug.
1856          */
1857         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1858             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1859             break;
1860         }
1861 
1862         if (cmsg->cmsg_level == SOL_SOCKET) {
1863             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1864         } else {
1865             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1866         }
1867         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1868 
1869         /* Payload types which need a different size of payload on
1870          * the target must adjust tgt_len here.
1871          */
1872         tgt_len = len;
1873         switch (cmsg->cmsg_level) {
1874         case SOL_SOCKET:
1875             switch (cmsg->cmsg_type) {
1876             case SO_TIMESTAMP:
1877                 tgt_len = sizeof(struct target_timeval);
1878                 break;
1879             default:
1880                 break;
1881             }
1882             break;
1883         default:
1884             break;
1885         }
1886 
1887         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1888             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1889             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1890         }
1891 
1892         /* We must now copy-and-convert len bytes of payload
1893          * into tgt_len bytes of destination space. Bear in mind
1894          * that in both source and destination we may be dealing
1895          * with a truncated value!
1896          */
1897         switch (cmsg->cmsg_level) {
1898         case SOL_SOCKET:
1899             switch (cmsg->cmsg_type) {
1900             case SCM_RIGHTS:
1901             {
1902                 int *fd = (int *)data;
1903                 int *target_fd = (int *)target_data;
1904                 int i, numfds = tgt_len / sizeof(int);
1905 
1906                 for (i = 0; i < numfds; i++) {
1907                     __put_user(fd[i], target_fd + i);
1908                 }
1909                 break;
1910             }
1911             case SO_TIMESTAMP:
1912             {
1913                 struct timeval *tv = (struct timeval *)data;
1914                 struct target_timeval *target_tv =
1915                     (struct target_timeval *)target_data;
1916 
1917                 if (len != sizeof(struct timeval) ||
1918                     tgt_len != sizeof(struct target_timeval)) {
1919                     goto unimplemented;
1920                 }
1921 
1922                 /* copy struct timeval to target */
1923                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1924                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1925                 break;
1926             }
1927             case SCM_CREDENTIALS:
1928             {
1929                 struct ucred *cred = (struct ucred *)data;
1930                 struct target_ucred *target_cred =
1931                     (struct target_ucred *)target_data;
1932 
1933                 __put_user(cred->pid, &target_cred->pid);
1934                 __put_user(cred->uid, &target_cred->uid);
1935                 __put_user(cred->gid, &target_cred->gid);
1936                 break;
1937             }
1938             default:
1939                 goto unimplemented;
1940             }
1941             break;
1942 
1943         case SOL_IP:
1944             switch (cmsg->cmsg_type) {
1945             case IP_TTL:
1946             {
1947                 uint32_t *v = (uint32_t *)data;
1948                 uint32_t *t_int = (uint32_t *)target_data;
1949 
1950                 if (len != sizeof(uint32_t) ||
1951                     tgt_len != sizeof(uint32_t)) {
1952                     goto unimplemented;
1953                 }
1954                 __put_user(*v, t_int);
1955                 break;
1956             }
1957             case IP_RECVERR:
1958             {
1959                 struct errhdr_t {
1960                    struct sock_extended_err ee;
1961                    struct sockaddr_in offender;
1962                 };
1963                 struct errhdr_t *errh = (struct errhdr_t *)data;
1964                 struct errhdr_t *target_errh =
1965                     (struct errhdr_t *)target_data;
1966 
1967                 if (len != sizeof(struct errhdr_t) ||
1968                     tgt_len != sizeof(struct errhdr_t)) {
1969                     goto unimplemented;
1970                 }
1971                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1972                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1973                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1974                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1975                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1976                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1977                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1978                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1979                     (void *) &errh->offender, sizeof(errh->offender));
1980                 break;
1981             }
1982             default:
1983                 goto unimplemented;
1984             }
1985             break;
1986 
1987         case SOL_IPV6:
1988             switch (cmsg->cmsg_type) {
1989             case IPV6_HOPLIMIT:
1990             {
1991                 uint32_t *v = (uint32_t *)data;
1992                 uint32_t *t_int = (uint32_t *)target_data;
1993 
1994                 if (len != sizeof(uint32_t) ||
1995                     tgt_len != sizeof(uint32_t)) {
1996                     goto unimplemented;
1997                 }
1998                 __put_user(*v, t_int);
1999                 break;
2000             }
2001             case IPV6_RECVERR:
2002             {
2003                 struct errhdr6_t {
2004                    struct sock_extended_err ee;
2005                    struct sockaddr_in6 offender;
2006                 };
2007                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2008                 struct errhdr6_t *target_errh =
2009                     (struct errhdr6_t *)target_data;
2010 
2011                 if (len != sizeof(struct errhdr6_t) ||
2012                     tgt_len != sizeof(struct errhdr6_t)) {
2013                     goto unimplemented;
2014                 }
2015                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2016                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2017                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2018                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2019                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2020                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2021                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2022                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2023                     (void *) &errh->offender, sizeof(errh->offender));
2024                 break;
2025             }
2026             default:
2027                 goto unimplemented;
2028             }
2029             break;
2030 
2031         default:
2032         unimplemented:
2033             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2034                           cmsg->cmsg_level, cmsg->cmsg_type);
2035             memcpy(target_data, data, MIN(len, tgt_len));
2036             if (tgt_len > len) {
2037                 memset(target_data + len, 0, tgt_len - len);
2038             }
2039         }
2040 
2041         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2042         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2043         if (msg_controllen < tgt_space) {
2044             tgt_space = msg_controllen;
2045         }
2046         msg_controllen -= tgt_space;
2047         space += tgt_space;
2048         cmsg = CMSG_NXTHDR(msgh, cmsg);
2049         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2050                                          target_cmsg_start);
2051     }
2052     unlock_user(target_cmsg, target_cmsg_addr, space);
2053  the_end:
2054     target_msgh->msg_controllen = tswapal(space);
2055     return 0;
2056 }
2057 
2058 /* do_setsockopt() Must return target values and target errnos. */
2059 static abi_long do_setsockopt(int sockfd, int level, int optname,
2060                               abi_ulong optval_addr, socklen_t optlen)
2061 {
2062     abi_long ret;
2063     int val;
2064     struct ip_mreqn *ip_mreq;
2065     struct ip_mreq_source *ip_mreq_source;
2066 
2067     switch(level) {
2068     case SOL_TCP:
2069     case SOL_UDP:
2070         /* TCP and UDP options all take an 'int' value.  */
2071         if (optlen < sizeof(uint32_t))
2072             return -TARGET_EINVAL;
2073 
2074         if (get_user_u32(val, optval_addr))
2075             return -TARGET_EFAULT;
2076         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2077         break;
2078     case SOL_IP:
2079         switch(optname) {
2080         case IP_TOS:
2081         case IP_TTL:
2082         case IP_HDRINCL:
2083         case IP_ROUTER_ALERT:
2084         case IP_RECVOPTS:
2085         case IP_RETOPTS:
2086         case IP_PKTINFO:
2087         case IP_MTU_DISCOVER:
2088         case IP_RECVERR:
2089         case IP_RECVTTL:
2090         case IP_RECVTOS:
2091 #ifdef IP_FREEBIND
2092         case IP_FREEBIND:
2093 #endif
2094         case IP_MULTICAST_TTL:
2095         case IP_MULTICAST_LOOP:
2096             val = 0;
2097             if (optlen >= sizeof(uint32_t)) {
2098                 if (get_user_u32(val, optval_addr))
2099                     return -TARGET_EFAULT;
2100             } else if (optlen >= 1) {
2101                 if (get_user_u8(val, optval_addr))
2102                     return -TARGET_EFAULT;
2103             }
2104             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2105             break;
2106         case IP_ADD_MEMBERSHIP:
2107         case IP_DROP_MEMBERSHIP:
2108             if (optlen < sizeof (struct target_ip_mreq) ||
2109                 optlen > sizeof (struct target_ip_mreqn))
2110                 return -TARGET_EINVAL;
2111 
2112             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2113             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2114             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2115             break;
2116 
2117         case IP_BLOCK_SOURCE:
2118         case IP_UNBLOCK_SOURCE:
2119         case IP_ADD_SOURCE_MEMBERSHIP:
2120         case IP_DROP_SOURCE_MEMBERSHIP:
2121             if (optlen != sizeof (struct target_ip_mreq_source))
2122                 return -TARGET_EINVAL;
2123 
2124             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2125             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2126             unlock_user (ip_mreq_source, optval_addr, 0);
2127             break;
2128 
2129         default:
2130             goto unimplemented;
2131         }
2132         break;
2133     case SOL_IPV6:
2134         switch (optname) {
2135         case IPV6_MTU_DISCOVER:
2136         case IPV6_MTU:
2137         case IPV6_V6ONLY:
2138         case IPV6_RECVPKTINFO:
2139         case IPV6_UNICAST_HOPS:
2140         case IPV6_MULTICAST_HOPS:
2141         case IPV6_MULTICAST_LOOP:
2142         case IPV6_RECVERR:
2143         case IPV6_RECVHOPLIMIT:
2144         case IPV6_2292HOPLIMIT:
2145         case IPV6_CHECKSUM:
2146         case IPV6_ADDRFORM:
2147         case IPV6_2292PKTINFO:
2148         case IPV6_RECVTCLASS:
2149         case IPV6_RECVRTHDR:
2150         case IPV6_2292RTHDR:
2151         case IPV6_RECVHOPOPTS:
2152         case IPV6_2292HOPOPTS:
2153         case IPV6_RECVDSTOPTS:
2154         case IPV6_2292DSTOPTS:
2155         case IPV6_TCLASS:
2156         case IPV6_ADDR_PREFERENCES:
2157 #ifdef IPV6_RECVPATHMTU
2158         case IPV6_RECVPATHMTU:
2159 #endif
2160 #ifdef IPV6_TRANSPARENT
2161         case IPV6_TRANSPARENT:
2162 #endif
2163 #ifdef IPV6_FREEBIND
2164         case IPV6_FREEBIND:
2165 #endif
2166 #ifdef IPV6_RECVORIGDSTADDR
2167         case IPV6_RECVORIGDSTADDR:
2168 #endif
2169             val = 0;
2170             if (optlen < sizeof(uint32_t)) {
2171                 return -TARGET_EINVAL;
2172             }
2173             if (get_user_u32(val, optval_addr)) {
2174                 return -TARGET_EFAULT;
2175             }
2176             ret = get_errno(setsockopt(sockfd, level, optname,
2177                                        &val, sizeof(val)));
2178             break;
2179         case IPV6_PKTINFO:
2180         {
2181             struct in6_pktinfo pki;
2182 
2183             if (optlen < sizeof(pki)) {
2184                 return -TARGET_EINVAL;
2185             }
2186 
2187             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2188                 return -TARGET_EFAULT;
2189             }
2190 
2191             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2192 
2193             ret = get_errno(setsockopt(sockfd, level, optname,
2194                                        &pki, sizeof(pki)));
2195             break;
2196         }
2197         case IPV6_ADD_MEMBERSHIP:
2198         case IPV6_DROP_MEMBERSHIP:
2199         {
2200             struct ipv6_mreq ipv6mreq;
2201 
2202             if (optlen < sizeof(ipv6mreq)) {
2203                 return -TARGET_EINVAL;
2204             }
2205 
2206             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2207                 return -TARGET_EFAULT;
2208             }
2209 
2210             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2211 
2212             ret = get_errno(setsockopt(sockfd, level, optname,
2213                                        &ipv6mreq, sizeof(ipv6mreq)));
2214             break;
2215         }
2216         default:
2217             goto unimplemented;
2218         }
2219         break;
2220     case SOL_ICMPV6:
2221         switch (optname) {
2222         case ICMPV6_FILTER:
2223         {
2224             struct icmp6_filter icmp6f;
2225 
2226             if (optlen > sizeof(icmp6f)) {
2227                 optlen = sizeof(icmp6f);
2228             }
2229 
2230             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2231                 return -TARGET_EFAULT;
2232             }
2233 
2234             for (val = 0; val < 8; val++) {
2235                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2236             }
2237 
2238             ret = get_errno(setsockopt(sockfd, level, optname,
2239                                        &icmp6f, optlen));
2240             break;
2241         }
2242         default:
2243             goto unimplemented;
2244         }
2245         break;
2246     case SOL_RAW:
2247         switch (optname) {
2248         case ICMP_FILTER:
2249         case IPV6_CHECKSUM:
2250             /* those take an u32 value */
2251             if (optlen < sizeof(uint32_t)) {
2252                 return -TARGET_EINVAL;
2253             }
2254 
2255             if (get_user_u32(val, optval_addr)) {
2256                 return -TARGET_EFAULT;
2257             }
2258             ret = get_errno(setsockopt(sockfd, level, optname,
2259                                        &val, sizeof(val)));
2260             break;
2261 
2262         default:
2263             goto unimplemented;
2264         }
2265         break;
2266 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2267     case SOL_ALG:
2268         switch (optname) {
2269         case ALG_SET_KEY:
2270         {
2271             char *alg_key = g_malloc(optlen);
2272 
2273             if (!alg_key) {
2274                 return -TARGET_ENOMEM;
2275             }
2276             if (copy_from_user(alg_key, optval_addr, optlen)) {
2277                 g_free(alg_key);
2278                 return -TARGET_EFAULT;
2279             }
2280             ret = get_errno(setsockopt(sockfd, level, optname,
2281                                        alg_key, optlen));
2282             g_free(alg_key);
2283             break;
2284         }
2285         case ALG_SET_AEAD_AUTHSIZE:
2286         {
2287             ret = get_errno(setsockopt(sockfd, level, optname,
2288                                        NULL, optlen));
2289             break;
2290         }
2291         default:
2292             goto unimplemented;
2293         }
2294         break;
2295 #endif
2296     case TARGET_SOL_SOCKET:
2297         switch (optname) {
2298         case TARGET_SO_RCVTIMEO:
2299         {
2300                 struct timeval tv;
2301 
2302                 optname = SO_RCVTIMEO;
2303 
2304 set_timeout:
2305                 if (optlen != sizeof(struct target_timeval)) {
2306                     return -TARGET_EINVAL;
2307                 }
2308 
2309                 if (copy_from_user_timeval(&tv, optval_addr)) {
2310                     return -TARGET_EFAULT;
2311                 }
2312 
2313                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2314                                 &tv, sizeof(tv)));
2315                 return ret;
2316         }
2317         case TARGET_SO_SNDTIMEO:
2318                 optname = SO_SNDTIMEO;
2319                 goto set_timeout;
2320         case TARGET_SO_ATTACH_FILTER:
2321         {
2322                 struct target_sock_fprog *tfprog;
2323                 struct target_sock_filter *tfilter;
2324                 struct sock_fprog fprog;
2325                 struct sock_filter *filter;
2326                 int i;
2327 
2328                 if (optlen != sizeof(*tfprog)) {
2329                     return -TARGET_EINVAL;
2330                 }
2331                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2332                     return -TARGET_EFAULT;
2333                 }
2334                 if (!lock_user_struct(VERIFY_READ, tfilter,
2335                                       tswapal(tfprog->filter), 0)) {
2336                     unlock_user_struct(tfprog, optval_addr, 1);
2337                     return -TARGET_EFAULT;
2338                 }
2339 
2340                 fprog.len = tswap16(tfprog->len);
2341                 filter = g_try_new(struct sock_filter, fprog.len);
2342                 if (filter == NULL) {
2343                     unlock_user_struct(tfilter, tfprog->filter, 1);
2344                     unlock_user_struct(tfprog, optval_addr, 1);
2345                     return -TARGET_ENOMEM;
2346                 }
2347                 for (i = 0; i < fprog.len; i++) {
2348                     filter[i].code = tswap16(tfilter[i].code);
2349                     filter[i].jt = tfilter[i].jt;
2350                     filter[i].jf = tfilter[i].jf;
2351                     filter[i].k = tswap32(tfilter[i].k);
2352                 }
2353                 fprog.filter = filter;
2354 
2355                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2356                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2357                 g_free(filter);
2358 
2359                 unlock_user_struct(tfilter, tfprog->filter, 1);
2360                 unlock_user_struct(tfprog, optval_addr, 1);
2361                 return ret;
2362         }
2363 	case TARGET_SO_BINDTODEVICE:
2364 	{
2365 		char *dev_ifname, *addr_ifname;
2366 
2367 		if (optlen > IFNAMSIZ - 1) {
2368 		    optlen = IFNAMSIZ - 1;
2369 		}
2370 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2371 		if (!dev_ifname) {
2372 		    return -TARGET_EFAULT;
2373 		}
2374 		optname = SO_BINDTODEVICE;
2375 		addr_ifname = alloca(IFNAMSIZ);
2376 		memcpy(addr_ifname, dev_ifname, optlen);
2377 		addr_ifname[optlen] = 0;
2378 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2379                                            addr_ifname, optlen));
2380 		unlock_user (dev_ifname, optval_addr, 0);
2381 		return ret;
2382 	}
2383         case TARGET_SO_LINGER:
2384         {
2385                 struct linger lg;
2386                 struct target_linger *tlg;
2387 
2388                 if (optlen != sizeof(struct target_linger)) {
2389                     return -TARGET_EINVAL;
2390                 }
2391                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2392                     return -TARGET_EFAULT;
2393                 }
2394                 __get_user(lg.l_onoff, &tlg->l_onoff);
2395                 __get_user(lg.l_linger, &tlg->l_linger);
2396                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2397                                 &lg, sizeof(lg)));
2398                 unlock_user_struct(tlg, optval_addr, 0);
2399                 return ret;
2400         }
2401             /* Options with 'int' argument.  */
2402         case TARGET_SO_DEBUG:
2403 		optname = SO_DEBUG;
2404 		break;
2405         case TARGET_SO_REUSEADDR:
2406 		optname = SO_REUSEADDR;
2407 		break;
2408 #ifdef SO_REUSEPORT
2409         case TARGET_SO_REUSEPORT:
2410                 optname = SO_REUSEPORT;
2411                 break;
2412 #endif
2413         case TARGET_SO_TYPE:
2414 		optname = SO_TYPE;
2415 		break;
2416         case TARGET_SO_ERROR:
2417 		optname = SO_ERROR;
2418 		break;
2419         case TARGET_SO_DONTROUTE:
2420 		optname = SO_DONTROUTE;
2421 		break;
2422         case TARGET_SO_BROADCAST:
2423 		optname = SO_BROADCAST;
2424 		break;
2425         case TARGET_SO_SNDBUF:
2426 		optname = SO_SNDBUF;
2427 		break;
2428         case TARGET_SO_SNDBUFFORCE:
2429                 optname = SO_SNDBUFFORCE;
2430                 break;
2431         case TARGET_SO_RCVBUF:
2432 		optname = SO_RCVBUF;
2433 		break;
2434         case TARGET_SO_RCVBUFFORCE:
2435                 optname = SO_RCVBUFFORCE;
2436                 break;
2437         case TARGET_SO_KEEPALIVE:
2438 		optname = SO_KEEPALIVE;
2439 		break;
2440         case TARGET_SO_OOBINLINE:
2441 		optname = SO_OOBINLINE;
2442 		break;
2443         case TARGET_SO_NO_CHECK:
2444 		optname = SO_NO_CHECK;
2445 		break;
2446         case TARGET_SO_PRIORITY:
2447 		optname = SO_PRIORITY;
2448 		break;
2449 #ifdef SO_BSDCOMPAT
2450         case TARGET_SO_BSDCOMPAT:
2451 		optname = SO_BSDCOMPAT;
2452 		break;
2453 #endif
2454         case TARGET_SO_PASSCRED:
2455 		optname = SO_PASSCRED;
2456 		break;
2457         case TARGET_SO_PASSSEC:
2458                 optname = SO_PASSSEC;
2459                 break;
2460         case TARGET_SO_TIMESTAMP:
2461 		optname = SO_TIMESTAMP;
2462 		break;
2463         case TARGET_SO_RCVLOWAT:
2464 		optname = SO_RCVLOWAT;
2465 		break;
2466         default:
2467             goto unimplemented;
2468         }
2469 	if (optlen < sizeof(uint32_t))
2470             return -TARGET_EINVAL;
2471 
2472 	if (get_user_u32(val, optval_addr))
2473             return -TARGET_EFAULT;
2474 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2475         break;
2476 #ifdef SOL_NETLINK
2477     case SOL_NETLINK:
2478         switch (optname) {
2479         case NETLINK_PKTINFO:
2480         case NETLINK_ADD_MEMBERSHIP:
2481         case NETLINK_DROP_MEMBERSHIP:
2482         case NETLINK_BROADCAST_ERROR:
2483         case NETLINK_NO_ENOBUFS:
2484 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2485         case NETLINK_LISTEN_ALL_NSID:
2486         case NETLINK_CAP_ACK:
2487 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2488 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2489         case NETLINK_EXT_ACK:
2490 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2491 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2492         case NETLINK_GET_STRICT_CHK:
2493 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2494             break;
2495         default:
2496             goto unimplemented;
2497         }
2498         val = 0;
2499         if (optlen < sizeof(uint32_t)) {
2500             return -TARGET_EINVAL;
2501         }
2502         if (get_user_u32(val, optval_addr)) {
2503             return -TARGET_EFAULT;
2504         }
2505         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2506                                    sizeof(val)));
2507         break;
2508 #endif /* SOL_NETLINK */
2509     default:
2510     unimplemented:
2511         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2512                       level, optname);
2513         ret = -TARGET_ENOPROTOOPT;
2514     }
2515     return ret;
2516 }
2517 
2518 /* do_getsockopt() Must return target values and target errnos. */
2519 static abi_long do_getsockopt(int sockfd, int level, int optname,
2520                               abi_ulong optval_addr, abi_ulong optlen)
2521 {
2522     abi_long ret;
2523     int len, val;
2524     socklen_t lv;
2525 
2526     switch(level) {
2527     case TARGET_SOL_SOCKET:
2528         level = SOL_SOCKET;
2529         switch (optname) {
2530         /* These don't just return a single integer */
2531         case TARGET_SO_PEERNAME:
2532             goto unimplemented;
2533         case TARGET_SO_RCVTIMEO: {
2534             struct timeval tv;
2535             socklen_t tvlen;
2536 
2537             optname = SO_RCVTIMEO;
2538 
2539 get_timeout:
2540             if (get_user_u32(len, optlen)) {
2541                 return -TARGET_EFAULT;
2542             }
2543             if (len < 0) {
2544                 return -TARGET_EINVAL;
2545             }
2546 
2547             tvlen = sizeof(tv);
2548             ret = get_errno(getsockopt(sockfd, level, optname,
2549                                        &tv, &tvlen));
2550             if (ret < 0) {
2551                 return ret;
2552             }
2553             if (len > sizeof(struct target_timeval)) {
2554                 len = sizeof(struct target_timeval);
2555             }
2556             if (copy_to_user_timeval(optval_addr, &tv)) {
2557                 return -TARGET_EFAULT;
2558             }
2559             if (put_user_u32(len, optlen)) {
2560                 return -TARGET_EFAULT;
2561             }
2562             break;
2563         }
2564         case TARGET_SO_SNDTIMEO:
2565             optname = SO_SNDTIMEO;
2566             goto get_timeout;
2567         case TARGET_SO_PEERCRED: {
2568             struct ucred cr;
2569             socklen_t crlen;
2570             struct target_ucred *tcr;
2571 
2572             if (get_user_u32(len, optlen)) {
2573                 return -TARGET_EFAULT;
2574             }
2575             if (len < 0) {
2576                 return -TARGET_EINVAL;
2577             }
2578 
2579             crlen = sizeof(cr);
2580             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2581                                        &cr, &crlen));
2582             if (ret < 0) {
2583                 return ret;
2584             }
2585             if (len > crlen) {
2586                 len = crlen;
2587             }
2588             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2589                 return -TARGET_EFAULT;
2590             }
2591             __put_user(cr.pid, &tcr->pid);
2592             __put_user(cr.uid, &tcr->uid);
2593             __put_user(cr.gid, &tcr->gid);
2594             unlock_user_struct(tcr, optval_addr, 1);
2595             if (put_user_u32(len, optlen)) {
2596                 return -TARGET_EFAULT;
2597             }
2598             break;
2599         }
2600         case TARGET_SO_PEERSEC: {
2601             char *name;
2602 
2603             if (get_user_u32(len, optlen)) {
2604                 return -TARGET_EFAULT;
2605             }
2606             if (len < 0) {
2607                 return -TARGET_EINVAL;
2608             }
2609             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2610             if (!name) {
2611                 return -TARGET_EFAULT;
2612             }
2613             lv = len;
2614             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2615                                        name, &lv));
2616             if (put_user_u32(lv, optlen)) {
2617                 ret = -TARGET_EFAULT;
2618             }
2619             unlock_user(name, optval_addr, lv);
2620             break;
2621         }
2622         case TARGET_SO_LINGER:
2623         {
2624             struct linger lg;
2625             socklen_t lglen;
2626             struct target_linger *tlg;
2627 
2628             if (get_user_u32(len, optlen)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             if (len < 0) {
2632                 return -TARGET_EINVAL;
2633             }
2634 
2635             lglen = sizeof(lg);
2636             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2637                                        &lg, &lglen));
2638             if (ret < 0) {
2639                 return ret;
2640             }
2641             if (len > lglen) {
2642                 len = lglen;
2643             }
2644             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2645                 return -TARGET_EFAULT;
2646             }
2647             __put_user(lg.l_onoff, &tlg->l_onoff);
2648             __put_user(lg.l_linger, &tlg->l_linger);
2649             unlock_user_struct(tlg, optval_addr, 1);
2650             if (put_user_u32(len, optlen)) {
2651                 return -TARGET_EFAULT;
2652             }
2653             break;
2654         }
2655         /* Options with 'int' argument.  */
2656         case TARGET_SO_DEBUG:
2657             optname = SO_DEBUG;
2658             goto int_case;
2659         case TARGET_SO_REUSEADDR:
2660             optname = SO_REUSEADDR;
2661             goto int_case;
2662 #ifdef SO_REUSEPORT
2663         case TARGET_SO_REUSEPORT:
2664             optname = SO_REUSEPORT;
2665             goto int_case;
2666 #endif
2667         case TARGET_SO_TYPE:
2668             optname = SO_TYPE;
2669             goto int_case;
2670         case TARGET_SO_ERROR:
2671             optname = SO_ERROR;
2672             goto int_case;
2673         case TARGET_SO_DONTROUTE:
2674             optname = SO_DONTROUTE;
2675             goto int_case;
2676         case TARGET_SO_BROADCAST:
2677             optname = SO_BROADCAST;
2678             goto int_case;
2679         case TARGET_SO_SNDBUF:
2680             optname = SO_SNDBUF;
2681             goto int_case;
2682         case TARGET_SO_RCVBUF:
2683             optname = SO_RCVBUF;
2684             goto int_case;
2685         case TARGET_SO_KEEPALIVE:
2686             optname = SO_KEEPALIVE;
2687             goto int_case;
2688         case TARGET_SO_OOBINLINE:
2689             optname = SO_OOBINLINE;
2690             goto int_case;
2691         case TARGET_SO_NO_CHECK:
2692             optname = SO_NO_CHECK;
2693             goto int_case;
2694         case TARGET_SO_PRIORITY:
2695             optname = SO_PRIORITY;
2696             goto int_case;
2697 #ifdef SO_BSDCOMPAT
2698         case TARGET_SO_BSDCOMPAT:
2699             optname = SO_BSDCOMPAT;
2700             goto int_case;
2701 #endif
2702         case TARGET_SO_PASSCRED:
2703             optname = SO_PASSCRED;
2704             goto int_case;
2705         case TARGET_SO_TIMESTAMP:
2706             optname = SO_TIMESTAMP;
2707             goto int_case;
2708         case TARGET_SO_RCVLOWAT:
2709             optname = SO_RCVLOWAT;
2710             goto int_case;
2711         case TARGET_SO_ACCEPTCONN:
2712             optname = SO_ACCEPTCONN;
2713             goto int_case;
2714         case TARGET_SO_PROTOCOL:
2715             optname = SO_PROTOCOL;
2716             goto int_case;
2717         case TARGET_SO_DOMAIN:
2718             optname = SO_DOMAIN;
2719             goto int_case;
2720         default:
2721             goto int_case;
2722         }
2723         break;
2724     case SOL_TCP:
2725     case SOL_UDP:
2726         /* TCP and UDP options all take an 'int' value.  */
2727     int_case:
2728         if (get_user_u32(len, optlen))
2729             return -TARGET_EFAULT;
2730         if (len < 0)
2731             return -TARGET_EINVAL;
2732         lv = sizeof(lv);
2733         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2734         if (ret < 0)
2735             return ret;
2736         if (optname == SO_TYPE) {
2737             val = host_to_target_sock_type(val);
2738         }
2739         if (len > lv)
2740             len = lv;
2741         if (len == 4) {
2742             if (put_user_u32(val, optval_addr))
2743                 return -TARGET_EFAULT;
2744         } else {
2745             if (put_user_u8(val, optval_addr))
2746                 return -TARGET_EFAULT;
2747         }
2748         if (put_user_u32(len, optlen))
2749             return -TARGET_EFAULT;
2750         break;
2751     case SOL_IP:
2752         switch(optname) {
2753         case IP_TOS:
2754         case IP_TTL:
2755         case IP_HDRINCL:
2756         case IP_ROUTER_ALERT:
2757         case IP_RECVOPTS:
2758         case IP_RETOPTS:
2759         case IP_PKTINFO:
2760         case IP_MTU_DISCOVER:
2761         case IP_RECVERR:
2762         case IP_RECVTOS:
2763 #ifdef IP_FREEBIND
2764         case IP_FREEBIND:
2765 #endif
2766         case IP_MULTICAST_TTL:
2767         case IP_MULTICAST_LOOP:
2768             if (get_user_u32(len, optlen))
2769                 return -TARGET_EFAULT;
2770             if (len < 0)
2771                 return -TARGET_EINVAL;
2772             lv = sizeof(lv);
2773             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2774             if (ret < 0)
2775                 return ret;
2776             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2777                 len = 1;
2778                 if (put_user_u32(len, optlen)
2779                     || put_user_u8(val, optval_addr))
2780                     return -TARGET_EFAULT;
2781             } else {
2782                 if (len > sizeof(int))
2783                     len = sizeof(int);
2784                 if (put_user_u32(len, optlen)
2785                     || put_user_u32(val, optval_addr))
2786                     return -TARGET_EFAULT;
2787             }
2788             break;
2789         default:
2790             ret = -TARGET_ENOPROTOOPT;
2791             break;
2792         }
2793         break;
2794     case SOL_IPV6:
2795         switch (optname) {
2796         case IPV6_MTU_DISCOVER:
2797         case IPV6_MTU:
2798         case IPV6_V6ONLY:
2799         case IPV6_RECVPKTINFO:
2800         case IPV6_UNICAST_HOPS:
2801         case IPV6_MULTICAST_HOPS:
2802         case IPV6_MULTICAST_LOOP:
2803         case IPV6_RECVERR:
2804         case IPV6_RECVHOPLIMIT:
2805         case IPV6_2292HOPLIMIT:
2806         case IPV6_CHECKSUM:
2807         case IPV6_ADDRFORM:
2808         case IPV6_2292PKTINFO:
2809         case IPV6_RECVTCLASS:
2810         case IPV6_RECVRTHDR:
2811         case IPV6_2292RTHDR:
2812         case IPV6_RECVHOPOPTS:
2813         case IPV6_2292HOPOPTS:
2814         case IPV6_RECVDSTOPTS:
2815         case IPV6_2292DSTOPTS:
2816         case IPV6_TCLASS:
2817         case IPV6_ADDR_PREFERENCES:
2818 #ifdef IPV6_RECVPATHMTU
2819         case IPV6_RECVPATHMTU:
2820 #endif
2821 #ifdef IPV6_TRANSPARENT
2822         case IPV6_TRANSPARENT:
2823 #endif
2824 #ifdef IPV6_FREEBIND
2825         case IPV6_FREEBIND:
2826 #endif
2827 #ifdef IPV6_RECVORIGDSTADDR
2828         case IPV6_RECVORIGDSTADDR:
2829 #endif
2830             if (get_user_u32(len, optlen))
2831                 return -TARGET_EFAULT;
2832             if (len < 0)
2833                 return -TARGET_EINVAL;
2834             lv = sizeof(lv);
2835             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2836             if (ret < 0)
2837                 return ret;
2838             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2839                 len = 1;
2840                 if (put_user_u32(len, optlen)
2841                     || put_user_u8(val, optval_addr))
2842                     return -TARGET_EFAULT;
2843             } else {
2844                 if (len > sizeof(int))
2845                     len = sizeof(int);
2846                 if (put_user_u32(len, optlen)
2847                     || put_user_u32(val, optval_addr))
2848                     return -TARGET_EFAULT;
2849             }
2850             break;
2851         default:
2852             ret = -TARGET_ENOPROTOOPT;
2853             break;
2854         }
2855         break;
2856 #ifdef SOL_NETLINK
2857     case SOL_NETLINK:
2858         switch (optname) {
2859         case NETLINK_PKTINFO:
2860         case NETLINK_BROADCAST_ERROR:
2861         case NETLINK_NO_ENOBUFS:
2862 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2863         case NETLINK_LISTEN_ALL_NSID:
2864         case NETLINK_CAP_ACK:
2865 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2866 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2867         case NETLINK_EXT_ACK:
2868 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2869 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2870         case NETLINK_GET_STRICT_CHK:
2871 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2872             if (get_user_u32(len, optlen)) {
2873                 return -TARGET_EFAULT;
2874             }
2875             if (len != sizeof(val)) {
2876                 return -TARGET_EINVAL;
2877             }
2878             lv = len;
2879             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2880             if (ret < 0) {
2881                 return ret;
2882             }
2883             if (put_user_u32(lv, optlen)
2884                 || put_user_u32(val, optval_addr)) {
2885                 return -TARGET_EFAULT;
2886             }
2887             break;
2888 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2889         case NETLINK_LIST_MEMBERSHIPS:
2890         {
2891             uint32_t *results;
2892             int i;
2893             if (get_user_u32(len, optlen)) {
2894                 return -TARGET_EFAULT;
2895             }
2896             if (len < 0) {
2897                 return -TARGET_EINVAL;
2898             }
2899             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2900             if (!results && len > 0) {
2901                 return -TARGET_EFAULT;
2902             }
2903             lv = len;
2904             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2905             if (ret < 0) {
2906                 unlock_user(results, optval_addr, 0);
2907                 return ret;
2908             }
2909             /* swap host endianess to target endianess. */
2910             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2911                 results[i] = tswap32(results[i]);
2912             }
2913             if (put_user_u32(lv, optlen)) {
2914                 return -TARGET_EFAULT;
2915             }
2916             unlock_user(results, optval_addr, 0);
2917             break;
2918         }
2919 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2920         default:
2921             goto unimplemented;
2922         }
2923         break;
2924 #endif /* SOL_NETLINK */
2925     default:
2926     unimplemented:
2927         qemu_log_mask(LOG_UNIMP,
2928                       "getsockopt level=%d optname=%d not yet supported\n",
2929                       level, optname);
2930         ret = -TARGET_EOPNOTSUPP;
2931         break;
2932     }
2933     return ret;
2934 }
2935 
2936 /* Convert target low/high pair representing file offset into the host
2937  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2938  * as the kernel doesn't handle them either.
2939  */
2940 static void target_to_host_low_high(abi_ulong tlow,
2941                                     abi_ulong thigh,
2942                                     unsigned long *hlow,
2943                                     unsigned long *hhigh)
2944 {
2945     uint64_t off = tlow |
2946         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2947         TARGET_LONG_BITS / 2;
2948 
2949     *hlow = off;
2950     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2951 }
2952 
2953 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2954                                 abi_ulong count, int copy)
2955 {
2956     struct target_iovec *target_vec;
2957     struct iovec *vec;
2958     abi_ulong total_len, max_len;
2959     int i;
2960     int err = 0;
2961     bool bad_address = false;
2962 
2963     if (count == 0) {
2964         errno = 0;
2965         return NULL;
2966     }
2967     if (count > IOV_MAX) {
2968         errno = EINVAL;
2969         return NULL;
2970     }
2971 
2972     vec = g_try_new0(struct iovec, count);
2973     if (vec == NULL) {
2974         errno = ENOMEM;
2975         return NULL;
2976     }
2977 
2978     target_vec = lock_user(VERIFY_READ, target_addr,
2979                            count * sizeof(struct target_iovec), 1);
2980     if (target_vec == NULL) {
2981         err = EFAULT;
2982         goto fail2;
2983     }
2984 
2985     /* ??? If host page size > target page size, this will result in a
2986        value larger than what we can actually support.  */
2987     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2988     total_len = 0;
2989 
2990     for (i = 0; i < count; i++) {
2991         abi_ulong base = tswapal(target_vec[i].iov_base);
2992         abi_long len = tswapal(target_vec[i].iov_len);
2993 
2994         if (len < 0) {
2995             err = EINVAL;
2996             goto fail;
2997         } else if (len == 0) {
2998             /* Zero length pointer is ignored.  */
2999             vec[i].iov_base = 0;
3000         } else {
3001             vec[i].iov_base = lock_user(type, base, len, copy);
3002             /* If the first buffer pointer is bad, this is a fault.  But
3003              * subsequent bad buffers will result in a partial write; this
3004              * is realized by filling the vector with null pointers and
3005              * zero lengths. */
3006             if (!vec[i].iov_base) {
3007                 if (i == 0) {
3008                     err = EFAULT;
3009                     goto fail;
3010                 } else {
3011                     bad_address = true;
3012                 }
3013             }
3014             if (bad_address) {
3015                 len = 0;
3016             }
3017             if (len > max_len - total_len) {
3018                 len = max_len - total_len;
3019             }
3020         }
3021         vec[i].iov_len = len;
3022         total_len += len;
3023     }
3024 
3025     unlock_user(target_vec, target_addr, 0);
3026     return vec;
3027 
3028  fail:
3029     while (--i >= 0) {
3030         if (tswapal(target_vec[i].iov_len) > 0) {
3031             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3032         }
3033     }
3034     unlock_user(target_vec, target_addr, 0);
3035  fail2:
3036     g_free(vec);
3037     errno = err;
3038     return NULL;
3039 }
3040 
3041 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3042                          abi_ulong count, int copy)
3043 {
3044     struct target_iovec *target_vec;
3045     int i;
3046 
3047     target_vec = lock_user(VERIFY_READ, target_addr,
3048                            count * sizeof(struct target_iovec), 1);
3049     if (target_vec) {
3050         for (i = 0; i < count; i++) {
3051             abi_ulong base = tswapal(target_vec[i].iov_base);
3052             abi_long len = tswapal(target_vec[i].iov_len);
3053             if (len < 0) {
3054                 break;
3055             }
3056             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3057         }
3058         unlock_user(target_vec, target_addr, 0);
3059     }
3060 
3061     g_free(vec);
3062 }
3063 
3064 static inline int target_to_host_sock_type(int *type)
3065 {
3066     int host_type = 0;
3067     int target_type = *type;
3068 
3069     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3070     case TARGET_SOCK_DGRAM:
3071         host_type = SOCK_DGRAM;
3072         break;
3073     case TARGET_SOCK_STREAM:
3074         host_type = SOCK_STREAM;
3075         break;
3076     default:
3077         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3078         break;
3079     }
3080     if (target_type & TARGET_SOCK_CLOEXEC) {
3081 #if defined(SOCK_CLOEXEC)
3082         host_type |= SOCK_CLOEXEC;
3083 #else
3084         return -TARGET_EINVAL;
3085 #endif
3086     }
3087     if (target_type & TARGET_SOCK_NONBLOCK) {
3088 #if defined(SOCK_NONBLOCK)
3089         host_type |= SOCK_NONBLOCK;
3090 #elif !defined(O_NONBLOCK)
3091         return -TARGET_EINVAL;
3092 #endif
3093     }
3094     *type = host_type;
3095     return 0;
3096 }
3097 
3098 /* Try to emulate socket type flags after socket creation.  */
3099 static int sock_flags_fixup(int fd, int target_type)
3100 {
3101 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3102     if (target_type & TARGET_SOCK_NONBLOCK) {
3103         int flags = fcntl(fd, F_GETFL);
3104         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3105             close(fd);
3106             return -TARGET_EINVAL;
3107         }
3108     }
3109 #endif
3110     return fd;
3111 }
3112 
3113 /* do_socket() Must return target values and target errnos. */
3114 static abi_long do_socket(int domain, int type, int protocol)
3115 {
3116     int target_type = type;
3117     int ret;
3118 
3119     ret = target_to_host_sock_type(&type);
3120     if (ret) {
3121         return ret;
3122     }
3123 
3124     if (domain == PF_NETLINK && !(
3125 #ifdef CONFIG_RTNETLINK
3126          protocol == NETLINK_ROUTE ||
3127 #endif
3128          protocol == NETLINK_KOBJECT_UEVENT ||
3129          protocol == NETLINK_AUDIT)) {
3130         return -TARGET_EPROTONOSUPPORT;
3131     }
3132 
3133     if (domain == AF_PACKET ||
3134         (domain == AF_INET && type == SOCK_PACKET)) {
3135         protocol = tswap16(protocol);
3136     }
3137 
3138     ret = get_errno(socket(domain, type, protocol));
3139     if (ret >= 0) {
3140         ret = sock_flags_fixup(ret, target_type);
3141         if (type == SOCK_PACKET) {
3142             /* Manage an obsolete case :
3143              * if socket type is SOCK_PACKET, bind by name
3144              */
3145             fd_trans_register(ret, &target_packet_trans);
3146         } else if (domain == PF_NETLINK) {
3147             switch (protocol) {
3148 #ifdef CONFIG_RTNETLINK
3149             case NETLINK_ROUTE:
3150                 fd_trans_register(ret, &target_netlink_route_trans);
3151                 break;
3152 #endif
3153             case NETLINK_KOBJECT_UEVENT:
3154                 /* nothing to do: messages are strings */
3155                 break;
3156             case NETLINK_AUDIT:
3157                 fd_trans_register(ret, &target_netlink_audit_trans);
3158                 break;
3159             default:
3160                 g_assert_not_reached();
3161             }
3162         }
3163     }
3164     return ret;
3165 }
3166 
3167 /* do_bind() Must return target values and target errnos. */
3168 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3169                         socklen_t addrlen)
3170 {
3171     void *addr;
3172     abi_long ret;
3173 
3174     if ((int)addrlen < 0) {
3175         return -TARGET_EINVAL;
3176     }
3177 
3178     addr = alloca(addrlen+1);
3179 
3180     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3181     if (ret)
3182         return ret;
3183 
3184     return get_errno(bind(sockfd, addr, addrlen));
3185 }
3186 
3187 /* do_connect() Must return target values and target errnos. */
3188 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3189                            socklen_t addrlen)
3190 {
3191     void *addr;
3192     abi_long ret;
3193 
3194     if ((int)addrlen < 0) {
3195         return -TARGET_EINVAL;
3196     }
3197 
3198     addr = alloca(addrlen+1);
3199 
3200     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3201     if (ret)
3202         return ret;
3203 
3204     return get_errno(safe_connect(sockfd, addr, addrlen));
3205 }
3206 
3207 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3208 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3209                                       int flags, int send)
3210 {
3211     abi_long ret, len;
3212     struct msghdr msg;
3213     abi_ulong count;
3214     struct iovec *vec;
3215     abi_ulong target_vec;
3216 
3217     if (msgp->msg_name) {
3218         msg.msg_namelen = tswap32(msgp->msg_namelen);
3219         msg.msg_name = alloca(msg.msg_namelen+1);
3220         ret = target_to_host_sockaddr(fd, msg.msg_name,
3221                                       tswapal(msgp->msg_name),
3222                                       msg.msg_namelen);
3223         if (ret == -TARGET_EFAULT) {
3224             /* For connected sockets msg_name and msg_namelen must
3225              * be ignored, so returning EFAULT immediately is wrong.
3226              * Instead, pass a bad msg_name to the host kernel, and
3227              * let it decide whether to return EFAULT or not.
3228              */
3229             msg.msg_name = (void *)-1;
3230         } else if (ret) {
3231             goto out2;
3232         }
3233     } else {
3234         msg.msg_name = NULL;
3235         msg.msg_namelen = 0;
3236     }
3237     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3238     msg.msg_control = alloca(msg.msg_controllen);
3239     memset(msg.msg_control, 0, msg.msg_controllen);
3240 
3241     msg.msg_flags = tswap32(msgp->msg_flags);
3242 
3243     count = tswapal(msgp->msg_iovlen);
3244     target_vec = tswapal(msgp->msg_iov);
3245 
3246     if (count > IOV_MAX) {
3247         /* sendrcvmsg returns a different errno for this condition than
3248          * readv/writev, so we must catch it here before lock_iovec() does.
3249          */
3250         ret = -TARGET_EMSGSIZE;
3251         goto out2;
3252     }
3253 
3254     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3255                      target_vec, count, send);
3256     if (vec == NULL) {
3257         ret = -host_to_target_errno(errno);
3258         goto out2;
3259     }
3260     msg.msg_iovlen = count;
3261     msg.msg_iov = vec;
3262 
3263     if (send) {
3264         if (fd_trans_target_to_host_data(fd)) {
3265             void *host_msg;
3266 
3267             host_msg = g_malloc(msg.msg_iov->iov_len);
3268             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3269             ret = fd_trans_target_to_host_data(fd)(host_msg,
3270                                                    msg.msg_iov->iov_len);
3271             if (ret >= 0) {
3272                 msg.msg_iov->iov_base = host_msg;
3273                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3274             }
3275             g_free(host_msg);
3276         } else {
3277             ret = target_to_host_cmsg(&msg, msgp);
3278             if (ret == 0) {
3279                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3280             }
3281         }
3282     } else {
3283         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3284         if (!is_error(ret)) {
3285             len = ret;
3286             if (fd_trans_host_to_target_data(fd)) {
3287                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3288                                                MIN(msg.msg_iov->iov_len, len));
3289             } else {
3290                 ret = host_to_target_cmsg(msgp, &msg);
3291             }
3292             if (!is_error(ret)) {
3293                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3294                 msgp->msg_flags = tswap32(msg.msg_flags);
3295                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3296                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3297                                     msg.msg_name, msg.msg_namelen);
3298                     if (ret) {
3299                         goto out;
3300                     }
3301                 }
3302 
3303                 ret = len;
3304             }
3305         }
3306     }
3307 
3308 out:
3309     unlock_iovec(vec, target_vec, count, !send);
3310 out2:
3311     return ret;
3312 }
3313 
3314 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3315                                int flags, int send)
3316 {
3317     abi_long ret;
3318     struct target_msghdr *msgp;
3319 
3320     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3321                           msgp,
3322                           target_msg,
3323                           send ? 1 : 0)) {
3324         return -TARGET_EFAULT;
3325     }
3326     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3327     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3328     return ret;
3329 }
3330 
3331 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3332  * so it might not have this *mmsg-specific flag either.
3333  */
3334 #ifndef MSG_WAITFORONE
3335 #define MSG_WAITFORONE 0x10000
3336 #endif
3337 
3338 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3339                                 unsigned int vlen, unsigned int flags,
3340                                 int send)
3341 {
3342     struct target_mmsghdr *mmsgp;
3343     abi_long ret = 0;
3344     int i;
3345 
3346     if (vlen > UIO_MAXIOV) {
3347         vlen = UIO_MAXIOV;
3348     }
3349 
3350     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3351     if (!mmsgp) {
3352         return -TARGET_EFAULT;
3353     }
3354 
3355     for (i = 0; i < vlen; i++) {
3356         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3357         if (is_error(ret)) {
3358             break;
3359         }
3360         mmsgp[i].msg_len = tswap32(ret);
3361         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3362         if (flags & MSG_WAITFORONE) {
3363             flags |= MSG_DONTWAIT;
3364         }
3365     }
3366 
3367     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3368 
3369     /* Return number of datagrams sent if we sent any at all;
3370      * otherwise return the error.
3371      */
3372     if (i) {
3373         return i;
3374     }
3375     return ret;
3376 }
3377 
3378 /* do_accept4() Must return target values and target errnos. */
3379 static abi_long do_accept4(int fd, abi_ulong target_addr,
3380                            abi_ulong target_addrlen_addr, int flags)
3381 {
3382     socklen_t addrlen, ret_addrlen;
3383     void *addr;
3384     abi_long ret;
3385     int host_flags;
3386 
3387     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3388 
3389     if (target_addr == 0) {
3390         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3391     }
3392 
3393     /* linux returns EFAULT if addrlen pointer is invalid */
3394     if (get_user_u32(addrlen, target_addrlen_addr))
3395         return -TARGET_EFAULT;
3396 
3397     if ((int)addrlen < 0) {
3398         return -TARGET_EINVAL;
3399     }
3400 
3401     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3402         return -TARGET_EFAULT;
3403     }
3404 
3405     addr = alloca(addrlen);
3406 
3407     ret_addrlen = addrlen;
3408     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3409     if (!is_error(ret)) {
3410         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3411         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3412             ret = -TARGET_EFAULT;
3413         }
3414     }
3415     return ret;
3416 }
3417 
3418 /* do_getpeername() Must return target values and target errnos. */
3419 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3420                                abi_ulong target_addrlen_addr)
3421 {
3422     socklen_t addrlen, ret_addrlen;
3423     void *addr;
3424     abi_long ret;
3425 
3426     if (get_user_u32(addrlen, target_addrlen_addr))
3427         return -TARGET_EFAULT;
3428 
3429     if ((int)addrlen < 0) {
3430         return -TARGET_EINVAL;
3431     }
3432 
3433     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3434         return -TARGET_EFAULT;
3435     }
3436 
3437     addr = alloca(addrlen);
3438 
3439     ret_addrlen = addrlen;
3440     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3441     if (!is_error(ret)) {
3442         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3443         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3444             ret = -TARGET_EFAULT;
3445         }
3446     }
3447     return ret;
3448 }
3449 
3450 /* do_getsockname() Must return target values and target errnos. */
3451 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3452                                abi_ulong target_addrlen_addr)
3453 {
3454     socklen_t addrlen, ret_addrlen;
3455     void *addr;
3456     abi_long ret;
3457 
3458     if (get_user_u32(addrlen, target_addrlen_addr))
3459         return -TARGET_EFAULT;
3460 
3461     if ((int)addrlen < 0) {
3462         return -TARGET_EINVAL;
3463     }
3464 
3465     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3466         return -TARGET_EFAULT;
3467     }
3468 
3469     addr = alloca(addrlen);
3470 
3471     ret_addrlen = addrlen;
3472     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3473     if (!is_error(ret)) {
3474         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3475         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3476             ret = -TARGET_EFAULT;
3477         }
3478     }
3479     return ret;
3480 }
3481 
3482 /* do_socketpair() Must return target values and target errnos. */
3483 static abi_long do_socketpair(int domain, int type, int protocol,
3484                               abi_ulong target_tab_addr)
3485 {
3486     int tab[2];
3487     abi_long ret;
3488 
3489     target_to_host_sock_type(&type);
3490 
3491     ret = get_errno(socketpair(domain, type, protocol, tab));
3492     if (!is_error(ret)) {
3493         if (put_user_s32(tab[0], target_tab_addr)
3494             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3495             ret = -TARGET_EFAULT;
3496     }
3497     return ret;
3498 }
3499 
3500 /* do_sendto() Must return target values and target errnos. */
3501 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3502                           abi_ulong target_addr, socklen_t addrlen)
3503 {
3504     void *addr;
3505     void *host_msg;
3506     void *copy_msg = NULL;
3507     abi_long ret;
3508 
3509     if ((int)addrlen < 0) {
3510         return -TARGET_EINVAL;
3511     }
3512 
3513     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3514     if (!host_msg)
3515         return -TARGET_EFAULT;
3516     if (fd_trans_target_to_host_data(fd)) {
3517         copy_msg = host_msg;
3518         host_msg = g_malloc(len);
3519         memcpy(host_msg, copy_msg, len);
3520         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3521         if (ret < 0) {
3522             goto fail;
3523         }
3524     }
3525     if (target_addr) {
3526         addr = alloca(addrlen+1);
3527         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3528         if (ret) {
3529             goto fail;
3530         }
3531         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3532     } else {
3533         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3534     }
3535 fail:
3536     if (copy_msg) {
3537         g_free(host_msg);
3538         host_msg = copy_msg;
3539     }
3540     unlock_user(host_msg, msg, 0);
3541     return ret;
3542 }
3543 
3544 /* do_recvfrom() Must return target values and target errnos. */
3545 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3546                             abi_ulong target_addr,
3547                             abi_ulong target_addrlen)
3548 {
3549     socklen_t addrlen, ret_addrlen;
3550     void *addr;
3551     void *host_msg;
3552     abi_long ret;
3553 
3554     if (!msg) {
3555         host_msg = NULL;
3556     } else {
3557         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3558         if (!host_msg) {
3559             return -TARGET_EFAULT;
3560         }
3561     }
3562     if (target_addr) {
3563         if (get_user_u32(addrlen, target_addrlen)) {
3564             ret = -TARGET_EFAULT;
3565             goto fail;
3566         }
3567         if ((int)addrlen < 0) {
3568             ret = -TARGET_EINVAL;
3569             goto fail;
3570         }
3571         addr = alloca(addrlen);
3572         ret_addrlen = addrlen;
3573         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3574                                       addr, &ret_addrlen));
3575     } else {
3576         addr = NULL; /* To keep compiler quiet.  */
3577         addrlen = 0; /* To keep compiler quiet.  */
3578         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3579     }
3580     if (!is_error(ret)) {
3581         if (fd_trans_host_to_target_data(fd)) {
3582             abi_long trans;
3583             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3584             if (is_error(trans)) {
3585                 ret = trans;
3586                 goto fail;
3587             }
3588         }
3589         if (target_addr) {
3590             host_to_target_sockaddr(target_addr, addr,
3591                                     MIN(addrlen, ret_addrlen));
3592             if (put_user_u32(ret_addrlen, target_addrlen)) {
3593                 ret = -TARGET_EFAULT;
3594                 goto fail;
3595             }
3596         }
3597         unlock_user(host_msg, msg, len);
3598     } else {
3599 fail:
3600         unlock_user(host_msg, msg, 0);
3601     }
3602     return ret;
3603 }
3604 
3605 #ifdef TARGET_NR_socketcall
3606 /* do_socketcall() must return target values and target errnos. */
3607 static abi_long do_socketcall(int num, abi_ulong vptr)
3608 {
3609     static const unsigned nargs[] = { /* number of arguments per operation */
3610         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3611         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3612         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3613         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3614         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3615         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3616         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3617         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3618         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3619         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3620         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3621         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3622         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3623         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3624         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3625         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3626         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3627         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3628         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3629         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3630     };
3631     abi_long a[6]; /* max 6 args */
3632     unsigned i;
3633 
3634     /* check the range of the first argument num */
3635     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3636     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3637         return -TARGET_EINVAL;
3638     }
3639     /* ensure we have space for args */
3640     if (nargs[num] > ARRAY_SIZE(a)) {
3641         return -TARGET_EINVAL;
3642     }
3643     /* collect the arguments in a[] according to nargs[] */
3644     for (i = 0; i < nargs[num]; ++i) {
3645         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3646             return -TARGET_EFAULT;
3647         }
3648     }
3649     /* now when we have the args, invoke the appropriate underlying function */
3650     switch (num) {
3651     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3652         return do_socket(a[0], a[1], a[2]);
3653     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3654         return do_bind(a[0], a[1], a[2]);
3655     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3656         return do_connect(a[0], a[1], a[2]);
3657     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3658         return get_errno(listen(a[0], a[1]));
3659     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3660         return do_accept4(a[0], a[1], a[2], 0);
3661     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3662         return do_getsockname(a[0], a[1], a[2]);
3663     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3664         return do_getpeername(a[0], a[1], a[2]);
3665     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3666         return do_socketpair(a[0], a[1], a[2], a[3]);
3667     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3668         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3669     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3670         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3671     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3672         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3673     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3674         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3675     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3676         return get_errno(shutdown(a[0], a[1]));
3677     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3678         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3679     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3680         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3681     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3682         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3683     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3684         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3685     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3686         return do_accept4(a[0], a[1], a[2], a[3]);
3687     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3688         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3689     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3690         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3691     default:
3692         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3693         return -TARGET_EINVAL;
3694     }
3695 }
3696 #endif
3697 
3698 #define N_SHM_REGIONS	32
3699 
3700 static struct shm_region {
3701     abi_ulong start;
3702     abi_ulong size;
3703     bool in_use;
3704 } shm_regions[N_SHM_REGIONS];
3705 
3706 #ifndef TARGET_SEMID64_DS
3707 /* asm-generic version of this struct */
3708 struct target_semid64_ds
3709 {
3710   struct target_ipc_perm sem_perm;
3711   abi_ulong sem_otime;
3712 #if TARGET_ABI_BITS == 32
3713   abi_ulong __unused1;
3714 #endif
3715   abi_ulong sem_ctime;
3716 #if TARGET_ABI_BITS == 32
3717   abi_ulong __unused2;
3718 #endif
3719   abi_ulong sem_nsems;
3720   abi_ulong __unused3;
3721   abi_ulong __unused4;
3722 };
3723 #endif
3724 
3725 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3726                                                abi_ulong target_addr)
3727 {
3728     struct target_ipc_perm *target_ip;
3729     struct target_semid64_ds *target_sd;
3730 
3731     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3732         return -TARGET_EFAULT;
3733     target_ip = &(target_sd->sem_perm);
3734     host_ip->__key = tswap32(target_ip->__key);
3735     host_ip->uid = tswap32(target_ip->uid);
3736     host_ip->gid = tswap32(target_ip->gid);
3737     host_ip->cuid = tswap32(target_ip->cuid);
3738     host_ip->cgid = tswap32(target_ip->cgid);
3739 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3740     host_ip->mode = tswap32(target_ip->mode);
3741 #else
3742     host_ip->mode = tswap16(target_ip->mode);
3743 #endif
3744 #if defined(TARGET_PPC)
3745     host_ip->__seq = tswap32(target_ip->__seq);
3746 #else
3747     host_ip->__seq = tswap16(target_ip->__seq);
3748 #endif
3749     unlock_user_struct(target_sd, target_addr, 0);
3750     return 0;
3751 }
3752 
3753 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3754                                                struct ipc_perm *host_ip)
3755 {
3756     struct target_ipc_perm *target_ip;
3757     struct target_semid64_ds *target_sd;
3758 
3759     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3760         return -TARGET_EFAULT;
3761     target_ip = &(target_sd->sem_perm);
3762     target_ip->__key = tswap32(host_ip->__key);
3763     target_ip->uid = tswap32(host_ip->uid);
3764     target_ip->gid = tswap32(host_ip->gid);
3765     target_ip->cuid = tswap32(host_ip->cuid);
3766     target_ip->cgid = tswap32(host_ip->cgid);
3767 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3768     target_ip->mode = tswap32(host_ip->mode);
3769 #else
3770     target_ip->mode = tswap16(host_ip->mode);
3771 #endif
3772 #if defined(TARGET_PPC)
3773     target_ip->__seq = tswap32(host_ip->__seq);
3774 #else
3775     target_ip->__seq = tswap16(host_ip->__seq);
3776 #endif
3777     unlock_user_struct(target_sd, target_addr, 1);
3778     return 0;
3779 }
3780 
3781 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3782                                                abi_ulong target_addr)
3783 {
3784     struct target_semid64_ds *target_sd;
3785 
3786     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3787         return -TARGET_EFAULT;
3788     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3789         return -TARGET_EFAULT;
3790     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3791     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3792     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3793     unlock_user_struct(target_sd, target_addr, 0);
3794     return 0;
3795 }
3796 
3797 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3798                                                struct semid_ds *host_sd)
3799 {
3800     struct target_semid64_ds *target_sd;
3801 
3802     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3803         return -TARGET_EFAULT;
3804     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3805         return -TARGET_EFAULT;
3806     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3807     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3808     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3809     unlock_user_struct(target_sd, target_addr, 1);
3810     return 0;
3811 }
3812 
3813 struct target_seminfo {
3814     int semmap;
3815     int semmni;
3816     int semmns;
3817     int semmnu;
3818     int semmsl;
3819     int semopm;
3820     int semume;
3821     int semusz;
3822     int semvmx;
3823     int semaem;
3824 };
3825 
3826 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3827                                               struct seminfo *host_seminfo)
3828 {
3829     struct target_seminfo *target_seminfo;
3830     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3831         return -TARGET_EFAULT;
3832     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3833     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3834     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3835     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3836     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3837     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3838     __put_user(host_seminfo->semume, &target_seminfo->semume);
3839     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3840     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3841     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3842     unlock_user_struct(target_seminfo, target_addr, 1);
3843     return 0;
3844 }
3845 
3846 union semun {
3847 	int val;
3848 	struct semid_ds *buf;
3849 	unsigned short *array;
3850 	struct seminfo *__buf;
3851 };
3852 
3853 union target_semun {
3854 	int val;
3855 	abi_ulong buf;
3856 	abi_ulong array;
3857 	abi_ulong __buf;
3858 };
3859 
3860 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3861                                                abi_ulong target_addr)
3862 {
3863     int nsems;
3864     unsigned short *array;
3865     union semun semun;
3866     struct semid_ds semid_ds;
3867     int i, ret;
3868 
3869     semun.buf = &semid_ds;
3870 
3871     ret = semctl(semid, 0, IPC_STAT, semun);
3872     if (ret == -1)
3873         return get_errno(ret);
3874 
3875     nsems = semid_ds.sem_nsems;
3876 
3877     *host_array = g_try_new(unsigned short, nsems);
3878     if (!*host_array) {
3879         return -TARGET_ENOMEM;
3880     }
3881     array = lock_user(VERIFY_READ, target_addr,
3882                       nsems*sizeof(unsigned short), 1);
3883     if (!array) {
3884         g_free(*host_array);
3885         return -TARGET_EFAULT;
3886     }
3887 
3888     for(i=0; i<nsems; i++) {
3889         __get_user((*host_array)[i], &array[i]);
3890     }
3891     unlock_user(array, target_addr, 0);
3892 
3893     return 0;
3894 }
3895 
3896 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3897                                                unsigned short **host_array)
3898 {
3899     int nsems;
3900     unsigned short *array;
3901     union semun semun;
3902     struct semid_ds semid_ds;
3903     int i, ret;
3904 
3905     semun.buf = &semid_ds;
3906 
3907     ret = semctl(semid, 0, IPC_STAT, semun);
3908     if (ret == -1)
3909         return get_errno(ret);
3910 
3911     nsems = semid_ds.sem_nsems;
3912 
3913     array = lock_user(VERIFY_WRITE, target_addr,
3914                       nsems*sizeof(unsigned short), 0);
3915     if (!array)
3916         return -TARGET_EFAULT;
3917 
3918     for(i=0; i<nsems; i++) {
3919         __put_user((*host_array)[i], &array[i]);
3920     }
3921     g_free(*host_array);
3922     unlock_user(array, target_addr, 1);
3923 
3924     return 0;
3925 }
3926 
3927 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3928                                  abi_ulong target_arg)
3929 {
3930     union target_semun target_su = { .buf = target_arg };
3931     union semun arg;
3932     struct semid_ds dsarg;
3933     unsigned short *array = NULL;
3934     struct seminfo seminfo;
3935     abi_long ret = -TARGET_EINVAL;
3936     abi_long err;
3937     cmd &= 0xff;
3938 
3939     switch( cmd ) {
3940 	case GETVAL:
3941 	case SETVAL:
3942             /* In 64 bit cross-endian situations, we will erroneously pick up
3943              * the wrong half of the union for the "val" element.  To rectify
3944              * this, the entire 8-byte structure is byteswapped, followed by
3945 	     * a swap of the 4 byte val field. In other cases, the data is
3946 	     * already in proper host byte order. */
3947 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3948 		target_su.buf = tswapal(target_su.buf);
3949 		arg.val = tswap32(target_su.val);
3950 	    } else {
3951 		arg.val = target_su.val;
3952 	    }
3953             ret = get_errno(semctl(semid, semnum, cmd, arg));
3954             break;
3955 	case GETALL:
3956 	case SETALL:
3957             err = target_to_host_semarray(semid, &array, target_su.array);
3958             if (err)
3959                 return err;
3960             arg.array = array;
3961             ret = get_errno(semctl(semid, semnum, cmd, arg));
3962             err = host_to_target_semarray(semid, target_su.array, &array);
3963             if (err)
3964                 return err;
3965             break;
3966 	case IPC_STAT:
3967 	case IPC_SET:
3968 	case SEM_STAT:
3969             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3970             if (err)
3971                 return err;
3972             arg.buf = &dsarg;
3973             ret = get_errno(semctl(semid, semnum, cmd, arg));
3974             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3975             if (err)
3976                 return err;
3977             break;
3978 	case IPC_INFO:
3979 	case SEM_INFO:
3980             arg.__buf = &seminfo;
3981             ret = get_errno(semctl(semid, semnum, cmd, arg));
3982             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3983             if (err)
3984                 return err;
3985             break;
3986 	case IPC_RMID:
3987 	case GETPID:
3988 	case GETNCNT:
3989 	case GETZCNT:
3990             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3991             break;
3992     }
3993 
3994     return ret;
3995 }
3996 
3997 struct target_sembuf {
3998     unsigned short sem_num;
3999     short sem_op;
4000     short sem_flg;
4001 };
4002 
4003 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4004                                              abi_ulong target_addr,
4005                                              unsigned nsops)
4006 {
4007     struct target_sembuf *target_sembuf;
4008     int i;
4009 
4010     target_sembuf = lock_user(VERIFY_READ, target_addr,
4011                               nsops*sizeof(struct target_sembuf), 1);
4012     if (!target_sembuf)
4013         return -TARGET_EFAULT;
4014 
4015     for(i=0; i<nsops; i++) {
4016         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4017         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4018         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4019     }
4020 
4021     unlock_user(target_sembuf, target_addr, 0);
4022 
4023     return 0;
4024 }
4025 
4026 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4027     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4028 
4029 /*
4030  * This macro is required to handle the s390 variants, which passes the
4031  * arguments in a different order than default.
4032  */
4033 #ifdef __s390x__
4034 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4035   (__nsops), (__timeout), (__sops)
4036 #else
4037 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4038   (__nsops), 0, (__sops), (__timeout)
4039 #endif
4040 
4041 static inline abi_long do_semtimedop(int semid,
4042                                      abi_long ptr,
4043                                      unsigned nsops,
4044                                      abi_long timeout, bool time64)
4045 {
4046     struct sembuf *sops;
4047     struct timespec ts, *pts = NULL;
4048     abi_long ret;
4049 
4050     if (timeout) {
4051         pts = &ts;
4052         if (time64) {
4053             if (target_to_host_timespec64(pts, timeout)) {
4054                 return -TARGET_EFAULT;
4055             }
4056         } else {
4057             if (target_to_host_timespec(pts, timeout)) {
4058                 return -TARGET_EFAULT;
4059             }
4060         }
4061     }
4062 
4063     if (nsops > TARGET_SEMOPM) {
4064         return -TARGET_E2BIG;
4065     }
4066 
4067     sops = g_new(struct sembuf, nsops);
4068 
4069     if (target_to_host_sembuf(sops, ptr, nsops)) {
4070         g_free(sops);
4071         return -TARGET_EFAULT;
4072     }
4073 
4074     ret = -TARGET_ENOSYS;
4075 #ifdef __NR_semtimedop
4076     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4077 #endif
4078 #ifdef __NR_ipc
4079     if (ret == -TARGET_ENOSYS) {
4080         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4081                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4082     }
4083 #endif
4084     g_free(sops);
4085     return ret;
4086 }
4087 #endif
4088 
4089 struct target_msqid_ds
4090 {
4091     struct target_ipc_perm msg_perm;
4092     abi_ulong msg_stime;
4093 #if TARGET_ABI_BITS == 32
4094     abi_ulong __unused1;
4095 #endif
4096     abi_ulong msg_rtime;
4097 #if TARGET_ABI_BITS == 32
4098     abi_ulong __unused2;
4099 #endif
4100     abi_ulong msg_ctime;
4101 #if TARGET_ABI_BITS == 32
4102     abi_ulong __unused3;
4103 #endif
4104     abi_ulong __msg_cbytes;
4105     abi_ulong msg_qnum;
4106     abi_ulong msg_qbytes;
4107     abi_ulong msg_lspid;
4108     abi_ulong msg_lrpid;
4109     abi_ulong __unused4;
4110     abi_ulong __unused5;
4111 };
4112 
4113 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4114                                                abi_ulong target_addr)
4115 {
4116     struct target_msqid_ds *target_md;
4117 
4118     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4119         return -TARGET_EFAULT;
4120     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4121         return -TARGET_EFAULT;
4122     host_md->msg_stime = tswapal(target_md->msg_stime);
4123     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4124     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4125     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4126     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4127     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4128     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4129     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4130     unlock_user_struct(target_md, target_addr, 0);
4131     return 0;
4132 }
4133 
4134 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4135                                                struct msqid_ds *host_md)
4136 {
4137     struct target_msqid_ds *target_md;
4138 
4139     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4140         return -TARGET_EFAULT;
4141     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4142         return -TARGET_EFAULT;
4143     target_md->msg_stime = tswapal(host_md->msg_stime);
4144     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4145     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4146     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4147     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4148     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4149     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4150     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4151     unlock_user_struct(target_md, target_addr, 1);
4152     return 0;
4153 }
4154 
4155 struct target_msginfo {
4156     int msgpool;
4157     int msgmap;
4158     int msgmax;
4159     int msgmnb;
4160     int msgmni;
4161     int msgssz;
4162     int msgtql;
4163     unsigned short int msgseg;
4164 };
4165 
4166 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4167                                               struct msginfo *host_msginfo)
4168 {
4169     struct target_msginfo *target_msginfo;
4170     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4171         return -TARGET_EFAULT;
4172     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4173     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4174     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4175     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4176     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4177     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4178     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4179     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4180     unlock_user_struct(target_msginfo, target_addr, 1);
4181     return 0;
4182 }
4183 
4184 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4185 {
4186     struct msqid_ds dsarg;
4187     struct msginfo msginfo;
4188     abi_long ret = -TARGET_EINVAL;
4189 
4190     cmd &= 0xff;
4191 
4192     switch (cmd) {
4193     case IPC_STAT:
4194     case IPC_SET:
4195     case MSG_STAT:
4196         if (target_to_host_msqid_ds(&dsarg,ptr))
4197             return -TARGET_EFAULT;
4198         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4199         if (host_to_target_msqid_ds(ptr,&dsarg))
4200             return -TARGET_EFAULT;
4201         break;
4202     case IPC_RMID:
4203         ret = get_errno(msgctl(msgid, cmd, NULL));
4204         break;
4205     case IPC_INFO:
4206     case MSG_INFO:
4207         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4208         if (host_to_target_msginfo(ptr, &msginfo))
4209             return -TARGET_EFAULT;
4210         break;
4211     }
4212 
4213     return ret;
4214 }
4215 
4216 struct target_msgbuf {
4217     abi_long mtype;
4218     char	mtext[1];
4219 };
4220 
4221 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4222                                  ssize_t msgsz, int msgflg)
4223 {
4224     struct target_msgbuf *target_mb;
4225     struct msgbuf *host_mb;
4226     abi_long ret = 0;
4227 
4228     if (msgsz < 0) {
4229         return -TARGET_EINVAL;
4230     }
4231 
4232     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4233         return -TARGET_EFAULT;
4234     host_mb = g_try_malloc(msgsz + sizeof(long));
4235     if (!host_mb) {
4236         unlock_user_struct(target_mb, msgp, 0);
4237         return -TARGET_ENOMEM;
4238     }
4239     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4240     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4241     ret = -TARGET_ENOSYS;
4242 #ifdef __NR_msgsnd
4243     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4244 #endif
4245 #ifdef __NR_ipc
4246     if (ret == -TARGET_ENOSYS) {
4247 #ifdef __s390x__
4248         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4249                                  host_mb));
4250 #else
4251         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4252                                  host_mb, 0));
4253 #endif
4254     }
4255 #endif
4256     g_free(host_mb);
4257     unlock_user_struct(target_mb, msgp, 0);
4258 
4259     return ret;
4260 }
4261 
4262 #ifdef __NR_ipc
4263 #if defined(__sparc__)
4264 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4265 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4266 #elif defined(__s390x__)
4267 /* The s390 sys_ipc variant has only five parameters.  */
4268 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4269     ((long int[]){(long int)__msgp, __msgtyp})
4270 #else
4271 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4272     ((long int[]){(long int)__msgp, __msgtyp}), 0
4273 #endif
4274 #endif
4275 
4276 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4277                                  ssize_t msgsz, abi_long msgtyp,
4278                                  int msgflg)
4279 {
4280     struct target_msgbuf *target_mb;
4281     char *target_mtext;
4282     struct msgbuf *host_mb;
4283     abi_long ret = 0;
4284 
4285     if (msgsz < 0) {
4286         return -TARGET_EINVAL;
4287     }
4288 
4289     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4290         return -TARGET_EFAULT;
4291 
4292     host_mb = g_try_malloc(msgsz + sizeof(long));
4293     if (!host_mb) {
4294         ret = -TARGET_ENOMEM;
4295         goto end;
4296     }
4297     ret = -TARGET_ENOSYS;
4298 #ifdef __NR_msgrcv
4299     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4300 #endif
4301 #ifdef __NR_ipc
4302     if (ret == -TARGET_ENOSYS) {
4303         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4304                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4305     }
4306 #endif
4307 
4308     if (ret > 0) {
4309         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4310         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4311         if (!target_mtext) {
4312             ret = -TARGET_EFAULT;
4313             goto end;
4314         }
4315         memcpy(target_mb->mtext, host_mb->mtext, ret);
4316         unlock_user(target_mtext, target_mtext_addr, ret);
4317     }
4318 
4319     target_mb->mtype = tswapal(host_mb->mtype);
4320 
4321 end:
4322     if (target_mb)
4323         unlock_user_struct(target_mb, msgp, 1);
4324     g_free(host_mb);
4325     return ret;
4326 }
4327 
4328 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4329                                                abi_ulong target_addr)
4330 {
4331     struct target_shmid_ds *target_sd;
4332 
4333     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4334         return -TARGET_EFAULT;
4335     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4336         return -TARGET_EFAULT;
4337     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4338     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4339     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4340     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4341     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4342     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4343     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4344     unlock_user_struct(target_sd, target_addr, 0);
4345     return 0;
4346 }
4347 
4348 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4349                                                struct shmid_ds *host_sd)
4350 {
4351     struct target_shmid_ds *target_sd;
4352 
4353     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4354         return -TARGET_EFAULT;
4355     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4356         return -TARGET_EFAULT;
4357     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4358     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4359     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4360     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4361     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4362     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4363     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4364     unlock_user_struct(target_sd, target_addr, 1);
4365     return 0;
4366 }
4367 
4368 struct  target_shminfo {
4369     abi_ulong shmmax;
4370     abi_ulong shmmin;
4371     abi_ulong shmmni;
4372     abi_ulong shmseg;
4373     abi_ulong shmall;
4374 };
4375 
4376 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4377                                               struct shminfo *host_shminfo)
4378 {
4379     struct target_shminfo *target_shminfo;
4380     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4381         return -TARGET_EFAULT;
4382     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4383     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4384     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4385     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4386     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4387     unlock_user_struct(target_shminfo, target_addr, 1);
4388     return 0;
4389 }
4390 
4391 struct target_shm_info {
4392     int used_ids;
4393     abi_ulong shm_tot;
4394     abi_ulong shm_rss;
4395     abi_ulong shm_swp;
4396     abi_ulong swap_attempts;
4397     abi_ulong swap_successes;
4398 };
4399 
4400 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4401                                                struct shm_info *host_shm_info)
4402 {
4403     struct target_shm_info *target_shm_info;
4404     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4405         return -TARGET_EFAULT;
4406     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4407     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4408     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4409     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4410     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4411     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4412     unlock_user_struct(target_shm_info, target_addr, 1);
4413     return 0;
4414 }
4415 
4416 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4417 {
4418     struct shmid_ds dsarg;
4419     struct shminfo shminfo;
4420     struct shm_info shm_info;
4421     abi_long ret = -TARGET_EINVAL;
4422 
4423     cmd &= 0xff;
4424 
4425     switch(cmd) {
4426     case IPC_STAT:
4427     case IPC_SET:
4428     case SHM_STAT:
4429         if (target_to_host_shmid_ds(&dsarg, buf))
4430             return -TARGET_EFAULT;
4431         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4432         if (host_to_target_shmid_ds(buf, &dsarg))
4433             return -TARGET_EFAULT;
4434         break;
4435     case IPC_INFO:
4436         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4437         if (host_to_target_shminfo(buf, &shminfo))
4438             return -TARGET_EFAULT;
4439         break;
4440     case SHM_INFO:
4441         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4442         if (host_to_target_shm_info(buf, &shm_info))
4443             return -TARGET_EFAULT;
4444         break;
4445     case IPC_RMID:
4446     case SHM_LOCK:
4447     case SHM_UNLOCK:
4448         ret = get_errno(shmctl(shmid, cmd, NULL));
4449         break;
4450     }
4451 
4452     return ret;
4453 }
4454 
4455 #ifndef TARGET_FORCE_SHMLBA
4456 /* For most architectures, SHMLBA is the same as the page size;
4457  * some architectures have larger values, in which case they should
4458  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4459  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4460  * and defining its own value for SHMLBA.
4461  *
4462  * The kernel also permits SHMLBA to be set by the architecture to a
4463  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4464  * this means that addresses are rounded to the large size if
4465  * SHM_RND is set but addresses not aligned to that size are not rejected
4466  * as long as they are at least page-aligned. Since the only architecture
4467  * which uses this is ia64 this code doesn't provide for that oddity.
4468  */
4469 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4470 {
4471     return TARGET_PAGE_SIZE;
4472 }
4473 #endif
4474 
4475 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4476                                  int shmid, abi_ulong shmaddr, int shmflg)
4477 {
4478     CPUState *cpu = env_cpu(cpu_env);
4479     abi_long raddr;
4480     void *host_raddr;
4481     struct shmid_ds shm_info;
4482     int i,ret;
4483     abi_ulong shmlba;
4484 
4485     /* shmat pointers are always untagged */
4486 
4487     /* find out the length of the shared memory segment */
4488     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4489     if (is_error(ret)) {
4490         /* can't get length, bail out */
4491         return ret;
4492     }
4493 
4494     shmlba = target_shmlba(cpu_env);
4495 
4496     if (shmaddr & (shmlba - 1)) {
4497         if (shmflg & SHM_RND) {
4498             shmaddr &= ~(shmlba - 1);
4499         } else {
4500             return -TARGET_EINVAL;
4501         }
4502     }
4503     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4504         return -TARGET_EINVAL;
4505     }
4506 
4507     mmap_lock();
4508 
4509     /*
4510      * We're mapping shared memory, so ensure we generate code for parallel
4511      * execution and flush old translations.  This will work up to the level
4512      * supported by the host -- anything that requires EXCP_ATOMIC will not
4513      * be atomic with respect to an external process.
4514      */
4515     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4516         cpu->tcg_cflags |= CF_PARALLEL;
4517         tb_flush(cpu);
4518     }
4519 
4520     if (shmaddr)
4521         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4522     else {
4523         abi_ulong mmap_start;
4524 
4525         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4526         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4527 
4528         if (mmap_start == -1) {
4529             errno = ENOMEM;
4530             host_raddr = (void *)-1;
4531         } else
4532             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4533                                shmflg | SHM_REMAP);
4534     }
4535 
4536     if (host_raddr == (void *)-1) {
4537         mmap_unlock();
4538         return get_errno((long)host_raddr);
4539     }
4540     raddr=h2g((unsigned long)host_raddr);
4541 
4542     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4543                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4544                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4545 
4546     for (i = 0; i < N_SHM_REGIONS; i++) {
4547         if (!shm_regions[i].in_use) {
4548             shm_regions[i].in_use = true;
4549             shm_regions[i].start = raddr;
4550             shm_regions[i].size = shm_info.shm_segsz;
4551             break;
4552         }
4553     }
4554 
4555     mmap_unlock();
4556     return raddr;
4557 
4558 }
4559 
4560 static inline abi_long do_shmdt(abi_ulong shmaddr)
4561 {
4562     int i;
4563     abi_long rv;
4564 
4565     /* shmdt pointers are always untagged */
4566 
4567     mmap_lock();
4568 
4569     for (i = 0; i < N_SHM_REGIONS; ++i) {
4570         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4571             shm_regions[i].in_use = false;
4572             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4573             break;
4574         }
4575     }
4576     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4577 
4578     mmap_unlock();
4579 
4580     return rv;
4581 }
4582 
4583 #ifdef TARGET_NR_ipc
4584 /* ??? This only works with linear mappings.  */
4585 /* do_ipc() must return target values and target errnos. */
4586 static abi_long do_ipc(CPUArchState *cpu_env,
4587                        unsigned int call, abi_long first,
4588                        abi_long second, abi_long third,
4589                        abi_long ptr, abi_long fifth)
4590 {
4591     int version;
4592     abi_long ret = 0;
4593 
4594     version = call >> 16;
4595     call &= 0xffff;
4596 
4597     switch (call) {
4598     case IPCOP_semop:
4599         ret = do_semtimedop(first, ptr, second, 0, false);
4600         break;
4601     case IPCOP_semtimedop:
4602     /*
4603      * The s390 sys_ipc variant has only five parameters instead of six
4604      * (as for default variant) and the only difference is the handling of
4605      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4606      * to a struct timespec where the generic variant uses fifth parameter.
4607      */
4608 #if defined(TARGET_S390X)
4609         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4610 #else
4611         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4612 #endif
4613         break;
4614 
4615     case IPCOP_semget:
4616         ret = get_errno(semget(first, second, third));
4617         break;
4618 
4619     case IPCOP_semctl: {
4620         /* The semun argument to semctl is passed by value, so dereference the
4621          * ptr argument. */
4622         abi_ulong atptr;
4623         get_user_ual(atptr, ptr);
4624         ret = do_semctl(first, second, third, atptr);
4625         break;
4626     }
4627 
4628     case IPCOP_msgget:
4629         ret = get_errno(msgget(first, second));
4630         break;
4631 
4632     case IPCOP_msgsnd:
4633         ret = do_msgsnd(first, ptr, second, third);
4634         break;
4635 
4636     case IPCOP_msgctl:
4637         ret = do_msgctl(first, second, ptr);
4638         break;
4639 
4640     case IPCOP_msgrcv:
4641         switch (version) {
4642         case 0:
4643             {
4644                 struct target_ipc_kludge {
4645                     abi_long msgp;
4646                     abi_long msgtyp;
4647                 } *tmp;
4648 
4649                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4650                     ret = -TARGET_EFAULT;
4651                     break;
4652                 }
4653 
4654                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4655 
4656                 unlock_user_struct(tmp, ptr, 0);
4657                 break;
4658             }
4659         default:
4660             ret = do_msgrcv(first, ptr, second, fifth, third);
4661         }
4662         break;
4663 
4664     case IPCOP_shmat:
4665         switch (version) {
4666         default:
4667         {
4668             abi_ulong raddr;
4669             raddr = do_shmat(cpu_env, first, ptr, second);
4670             if (is_error(raddr))
4671                 return get_errno(raddr);
4672             if (put_user_ual(raddr, third))
4673                 return -TARGET_EFAULT;
4674             break;
4675         }
4676         case 1:
4677             ret = -TARGET_EINVAL;
4678             break;
4679         }
4680 	break;
4681     case IPCOP_shmdt:
4682         ret = do_shmdt(ptr);
4683 	break;
4684 
4685     case IPCOP_shmget:
4686 	/* IPC_* flag values are the same on all linux platforms */
4687 	ret = get_errno(shmget(first, second, third));
4688 	break;
4689 
4690 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4691     case IPCOP_shmctl:
4692         ret = do_shmctl(first, second, ptr);
4693         break;
4694     default:
4695         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4696                       call, version);
4697 	ret = -TARGET_ENOSYS;
4698 	break;
4699     }
4700     return ret;
4701 }
4702 #endif
4703 
4704 /* kernel structure types definitions */
4705 
4706 #define STRUCT(name, ...) STRUCT_ ## name,
4707 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4708 enum {
4709 #include "syscall_types.h"
4710 STRUCT_MAX
4711 };
4712 #undef STRUCT
4713 #undef STRUCT_SPECIAL
4714 
4715 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4716 #define STRUCT_SPECIAL(name)
4717 #include "syscall_types.h"
4718 #undef STRUCT
4719 #undef STRUCT_SPECIAL
4720 
4721 #define MAX_STRUCT_SIZE 4096
4722 
4723 #ifdef CONFIG_FIEMAP
4724 /* So fiemap access checks don't overflow on 32 bit systems.
4725  * This is very slightly smaller than the limit imposed by
4726  * the underlying kernel.
4727  */
4728 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4729                             / sizeof(struct fiemap_extent))
4730 
4731 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4732                                        int fd, int cmd, abi_long arg)
4733 {
4734     /* The parameter for this ioctl is a struct fiemap followed
4735      * by an array of struct fiemap_extent whose size is set
4736      * in fiemap->fm_extent_count. The array is filled in by the
4737      * ioctl.
4738      */
4739     int target_size_in, target_size_out;
4740     struct fiemap *fm;
4741     const argtype *arg_type = ie->arg_type;
4742     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4743     void *argptr, *p;
4744     abi_long ret;
4745     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4746     uint32_t outbufsz;
4747     int free_fm = 0;
4748 
4749     assert(arg_type[0] == TYPE_PTR);
4750     assert(ie->access == IOC_RW);
4751     arg_type++;
4752     target_size_in = thunk_type_size(arg_type, 0);
4753     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4754     if (!argptr) {
4755         return -TARGET_EFAULT;
4756     }
4757     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4758     unlock_user(argptr, arg, 0);
4759     fm = (struct fiemap *)buf_temp;
4760     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4761         return -TARGET_EINVAL;
4762     }
4763 
4764     outbufsz = sizeof (*fm) +
4765         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4766 
4767     if (outbufsz > MAX_STRUCT_SIZE) {
4768         /* We can't fit all the extents into the fixed size buffer.
4769          * Allocate one that is large enough and use it instead.
4770          */
4771         fm = g_try_malloc(outbufsz);
4772         if (!fm) {
4773             return -TARGET_ENOMEM;
4774         }
4775         memcpy(fm, buf_temp, sizeof(struct fiemap));
4776         free_fm = 1;
4777     }
4778     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4779     if (!is_error(ret)) {
4780         target_size_out = target_size_in;
4781         /* An extent_count of 0 means we were only counting the extents
4782          * so there are no structs to copy
4783          */
4784         if (fm->fm_extent_count != 0) {
4785             target_size_out += fm->fm_mapped_extents * extent_size;
4786         }
4787         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4788         if (!argptr) {
4789             ret = -TARGET_EFAULT;
4790         } else {
4791             /* Convert the struct fiemap */
4792             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4793             if (fm->fm_extent_count != 0) {
4794                 p = argptr + target_size_in;
4795                 /* ...and then all the struct fiemap_extents */
4796                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4797                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4798                                   THUNK_TARGET);
4799                     p += extent_size;
4800                 }
4801             }
4802             unlock_user(argptr, arg, target_size_out);
4803         }
4804     }
4805     if (free_fm) {
4806         g_free(fm);
4807     }
4808     return ret;
4809 }
4810 #endif
4811 
4812 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4813                                 int fd, int cmd, abi_long arg)
4814 {
4815     const argtype *arg_type = ie->arg_type;
4816     int target_size;
4817     void *argptr;
4818     int ret;
4819     struct ifconf *host_ifconf;
4820     uint32_t outbufsz;
4821     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4822     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4823     int target_ifreq_size;
4824     int nb_ifreq;
4825     int free_buf = 0;
4826     int i;
4827     int target_ifc_len;
4828     abi_long target_ifc_buf;
4829     int host_ifc_len;
4830     char *host_ifc_buf;
4831 
4832     assert(arg_type[0] == TYPE_PTR);
4833     assert(ie->access == IOC_RW);
4834 
4835     arg_type++;
4836     target_size = thunk_type_size(arg_type, 0);
4837 
4838     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4839     if (!argptr)
4840         return -TARGET_EFAULT;
4841     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4842     unlock_user(argptr, arg, 0);
4843 
4844     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4845     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4846     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4847 
4848     if (target_ifc_buf != 0) {
4849         target_ifc_len = host_ifconf->ifc_len;
4850         nb_ifreq = target_ifc_len / target_ifreq_size;
4851         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4852 
4853         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4854         if (outbufsz > MAX_STRUCT_SIZE) {
4855             /*
4856              * We can't fit all the extents into the fixed size buffer.
4857              * Allocate one that is large enough and use it instead.
4858              */
4859             host_ifconf = malloc(outbufsz);
4860             if (!host_ifconf) {
4861                 return -TARGET_ENOMEM;
4862             }
4863             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4864             free_buf = 1;
4865         }
4866         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4867 
4868         host_ifconf->ifc_len = host_ifc_len;
4869     } else {
4870       host_ifc_buf = NULL;
4871     }
4872     host_ifconf->ifc_buf = host_ifc_buf;
4873 
4874     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4875     if (!is_error(ret)) {
4876 	/* convert host ifc_len to target ifc_len */
4877 
4878         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4879         target_ifc_len = nb_ifreq * target_ifreq_size;
4880         host_ifconf->ifc_len = target_ifc_len;
4881 
4882 	/* restore target ifc_buf */
4883 
4884         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4885 
4886 	/* copy struct ifconf to target user */
4887 
4888         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4889         if (!argptr)
4890             return -TARGET_EFAULT;
4891         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4892         unlock_user(argptr, arg, target_size);
4893 
4894         if (target_ifc_buf != 0) {
4895             /* copy ifreq[] to target user */
4896             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4897             for (i = 0; i < nb_ifreq ; i++) {
4898                 thunk_convert(argptr + i * target_ifreq_size,
4899                               host_ifc_buf + i * sizeof(struct ifreq),
4900                               ifreq_arg_type, THUNK_TARGET);
4901             }
4902             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4903         }
4904     }
4905 
4906     if (free_buf) {
4907         free(host_ifconf);
4908     }
4909 
4910     return ret;
4911 }
4912 
4913 #if defined(CONFIG_USBFS)
4914 #if HOST_LONG_BITS > 64
4915 #error USBDEVFS thunks do not support >64 bit hosts yet.
4916 #endif
4917 struct live_urb {
4918     uint64_t target_urb_adr;
4919     uint64_t target_buf_adr;
4920     char *target_buf_ptr;
4921     struct usbdevfs_urb host_urb;
4922 };
4923 
4924 static GHashTable *usbdevfs_urb_hashtable(void)
4925 {
4926     static GHashTable *urb_hashtable;
4927 
4928     if (!urb_hashtable) {
4929         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4930     }
4931     return urb_hashtable;
4932 }
4933 
4934 static void urb_hashtable_insert(struct live_urb *urb)
4935 {
4936     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4937     g_hash_table_insert(urb_hashtable, urb, urb);
4938 }
4939 
4940 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4941 {
4942     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4943     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4944 }
4945 
4946 static void urb_hashtable_remove(struct live_urb *urb)
4947 {
4948     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4949     g_hash_table_remove(urb_hashtable, urb);
4950 }
4951 
4952 static abi_long
4953 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4954                           int fd, int cmd, abi_long arg)
4955 {
4956     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4957     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4958     struct live_urb *lurb;
4959     void *argptr;
4960     uint64_t hurb;
4961     int target_size;
4962     uintptr_t target_urb_adr;
4963     abi_long ret;
4964 
4965     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4966 
4967     memset(buf_temp, 0, sizeof(uint64_t));
4968     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4969     if (is_error(ret)) {
4970         return ret;
4971     }
4972 
4973     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4974     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4975     if (!lurb->target_urb_adr) {
4976         return -TARGET_EFAULT;
4977     }
4978     urb_hashtable_remove(lurb);
4979     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4980         lurb->host_urb.buffer_length);
4981     lurb->target_buf_ptr = NULL;
4982 
4983     /* restore the guest buffer pointer */
4984     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4985 
4986     /* update the guest urb struct */
4987     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4988     if (!argptr) {
4989         g_free(lurb);
4990         return -TARGET_EFAULT;
4991     }
4992     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4993     unlock_user(argptr, lurb->target_urb_adr, target_size);
4994 
4995     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4996     /* write back the urb handle */
4997     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4998     if (!argptr) {
4999         g_free(lurb);
5000         return -TARGET_EFAULT;
5001     }
5002 
5003     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5004     target_urb_adr = lurb->target_urb_adr;
5005     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5006     unlock_user(argptr, arg, target_size);
5007 
5008     g_free(lurb);
5009     return ret;
5010 }
5011 
5012 static abi_long
5013 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5014                              uint8_t *buf_temp __attribute__((unused)),
5015                              int fd, int cmd, abi_long arg)
5016 {
5017     struct live_urb *lurb;
5018 
5019     /* map target address back to host URB with metadata. */
5020     lurb = urb_hashtable_lookup(arg);
5021     if (!lurb) {
5022         return -TARGET_EFAULT;
5023     }
5024     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5025 }
5026 
5027 static abi_long
5028 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5029                             int fd, int cmd, abi_long arg)
5030 {
5031     const argtype *arg_type = ie->arg_type;
5032     int target_size;
5033     abi_long ret;
5034     void *argptr;
5035     int rw_dir;
5036     struct live_urb *lurb;
5037 
5038     /*
5039      * each submitted URB needs to map to a unique ID for the
5040      * kernel, and that unique ID needs to be a pointer to
5041      * host memory.  hence, we need to malloc for each URB.
5042      * isochronous transfers have a variable length struct.
5043      */
5044     arg_type++;
5045     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5046 
5047     /* construct host copy of urb and metadata */
5048     lurb = g_try_malloc0(sizeof(struct live_urb));
5049     if (!lurb) {
5050         return -TARGET_ENOMEM;
5051     }
5052 
5053     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5054     if (!argptr) {
5055         g_free(lurb);
5056         return -TARGET_EFAULT;
5057     }
5058     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5059     unlock_user(argptr, arg, 0);
5060 
5061     lurb->target_urb_adr = arg;
5062     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5063 
5064     /* buffer space used depends on endpoint type so lock the entire buffer */
5065     /* control type urbs should check the buffer contents for true direction */
5066     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5067     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5068         lurb->host_urb.buffer_length, 1);
5069     if (lurb->target_buf_ptr == NULL) {
5070         g_free(lurb);
5071         return -TARGET_EFAULT;
5072     }
5073 
5074     /* update buffer pointer in host copy */
5075     lurb->host_urb.buffer = lurb->target_buf_ptr;
5076 
5077     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5078     if (is_error(ret)) {
5079         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5080         g_free(lurb);
5081     } else {
5082         urb_hashtable_insert(lurb);
5083     }
5084 
5085     return ret;
5086 }
5087 #endif /* CONFIG_USBFS */
5088 
5089 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5090                             int cmd, abi_long arg)
5091 {
5092     void *argptr;
5093     struct dm_ioctl *host_dm;
5094     abi_long guest_data;
5095     uint32_t guest_data_size;
5096     int target_size;
5097     const argtype *arg_type = ie->arg_type;
5098     abi_long ret;
5099     void *big_buf = NULL;
5100     char *host_data;
5101 
5102     arg_type++;
5103     target_size = thunk_type_size(arg_type, 0);
5104     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5105     if (!argptr) {
5106         ret = -TARGET_EFAULT;
5107         goto out;
5108     }
5109     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5110     unlock_user(argptr, arg, 0);
5111 
5112     /* buf_temp is too small, so fetch things into a bigger buffer */
5113     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5114     memcpy(big_buf, buf_temp, target_size);
5115     buf_temp = big_buf;
5116     host_dm = big_buf;
5117 
5118     guest_data = arg + host_dm->data_start;
5119     if ((guest_data - arg) < 0) {
5120         ret = -TARGET_EINVAL;
5121         goto out;
5122     }
5123     guest_data_size = host_dm->data_size - host_dm->data_start;
5124     host_data = (char*)host_dm + host_dm->data_start;
5125 
5126     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5127     if (!argptr) {
5128         ret = -TARGET_EFAULT;
5129         goto out;
5130     }
5131 
5132     switch (ie->host_cmd) {
5133     case DM_REMOVE_ALL:
5134     case DM_LIST_DEVICES:
5135     case DM_DEV_CREATE:
5136     case DM_DEV_REMOVE:
5137     case DM_DEV_SUSPEND:
5138     case DM_DEV_STATUS:
5139     case DM_DEV_WAIT:
5140     case DM_TABLE_STATUS:
5141     case DM_TABLE_CLEAR:
5142     case DM_TABLE_DEPS:
5143     case DM_LIST_VERSIONS:
5144         /* no input data */
5145         break;
5146     case DM_DEV_RENAME:
5147     case DM_DEV_SET_GEOMETRY:
5148         /* data contains only strings */
5149         memcpy(host_data, argptr, guest_data_size);
5150         break;
5151     case DM_TARGET_MSG:
5152         memcpy(host_data, argptr, guest_data_size);
5153         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5154         break;
5155     case DM_TABLE_LOAD:
5156     {
5157         void *gspec = argptr;
5158         void *cur_data = host_data;
5159         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5160         int spec_size = thunk_type_size(arg_type, 0);
5161         int i;
5162 
5163         for (i = 0; i < host_dm->target_count; i++) {
5164             struct dm_target_spec *spec = cur_data;
5165             uint32_t next;
5166             int slen;
5167 
5168             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5169             slen = strlen((char*)gspec + spec_size) + 1;
5170             next = spec->next;
5171             spec->next = sizeof(*spec) + slen;
5172             strcpy((char*)&spec[1], gspec + spec_size);
5173             gspec += next;
5174             cur_data += spec->next;
5175         }
5176         break;
5177     }
5178     default:
5179         ret = -TARGET_EINVAL;
5180         unlock_user(argptr, guest_data, 0);
5181         goto out;
5182     }
5183     unlock_user(argptr, guest_data, 0);
5184 
5185     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5186     if (!is_error(ret)) {
5187         guest_data = arg + host_dm->data_start;
5188         guest_data_size = host_dm->data_size - host_dm->data_start;
5189         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5190         switch (ie->host_cmd) {
5191         case DM_REMOVE_ALL:
5192         case DM_DEV_CREATE:
5193         case DM_DEV_REMOVE:
5194         case DM_DEV_RENAME:
5195         case DM_DEV_SUSPEND:
5196         case DM_DEV_STATUS:
5197         case DM_TABLE_LOAD:
5198         case DM_TABLE_CLEAR:
5199         case DM_TARGET_MSG:
5200         case DM_DEV_SET_GEOMETRY:
5201             /* no return data */
5202             break;
5203         case DM_LIST_DEVICES:
5204         {
5205             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5206             uint32_t remaining_data = guest_data_size;
5207             void *cur_data = argptr;
5208             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5209             int nl_size = 12; /* can't use thunk_size due to alignment */
5210 
5211             while (1) {
5212                 uint32_t next = nl->next;
5213                 if (next) {
5214                     nl->next = nl_size + (strlen(nl->name) + 1);
5215                 }
5216                 if (remaining_data < nl->next) {
5217                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5218                     break;
5219                 }
5220                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5221                 strcpy(cur_data + nl_size, nl->name);
5222                 cur_data += nl->next;
5223                 remaining_data -= nl->next;
5224                 if (!next) {
5225                     break;
5226                 }
5227                 nl = (void*)nl + next;
5228             }
5229             break;
5230         }
5231         case DM_DEV_WAIT:
5232         case DM_TABLE_STATUS:
5233         {
5234             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5235             void *cur_data = argptr;
5236             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5237             int spec_size = thunk_type_size(arg_type, 0);
5238             int i;
5239 
5240             for (i = 0; i < host_dm->target_count; i++) {
5241                 uint32_t next = spec->next;
5242                 int slen = strlen((char*)&spec[1]) + 1;
5243                 spec->next = (cur_data - argptr) + spec_size + slen;
5244                 if (guest_data_size < spec->next) {
5245                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5246                     break;
5247                 }
5248                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5249                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5250                 cur_data = argptr + spec->next;
5251                 spec = (void*)host_dm + host_dm->data_start + next;
5252             }
5253             break;
5254         }
5255         case DM_TABLE_DEPS:
5256         {
5257             void *hdata = (void*)host_dm + host_dm->data_start;
5258             int count = *(uint32_t*)hdata;
5259             uint64_t *hdev = hdata + 8;
5260             uint64_t *gdev = argptr + 8;
5261             int i;
5262 
5263             *(uint32_t*)argptr = tswap32(count);
5264             for (i = 0; i < count; i++) {
5265                 *gdev = tswap64(*hdev);
5266                 gdev++;
5267                 hdev++;
5268             }
5269             break;
5270         }
5271         case DM_LIST_VERSIONS:
5272         {
5273             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5274             uint32_t remaining_data = guest_data_size;
5275             void *cur_data = argptr;
5276             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5277             int vers_size = thunk_type_size(arg_type, 0);
5278 
5279             while (1) {
5280                 uint32_t next = vers->next;
5281                 if (next) {
5282                     vers->next = vers_size + (strlen(vers->name) + 1);
5283                 }
5284                 if (remaining_data < vers->next) {
5285                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5286                     break;
5287                 }
5288                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5289                 strcpy(cur_data + vers_size, vers->name);
5290                 cur_data += vers->next;
5291                 remaining_data -= vers->next;
5292                 if (!next) {
5293                     break;
5294                 }
5295                 vers = (void*)vers + next;
5296             }
5297             break;
5298         }
5299         default:
5300             unlock_user(argptr, guest_data, 0);
5301             ret = -TARGET_EINVAL;
5302             goto out;
5303         }
5304         unlock_user(argptr, guest_data, guest_data_size);
5305 
5306         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5307         if (!argptr) {
5308             ret = -TARGET_EFAULT;
5309             goto out;
5310         }
5311         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5312         unlock_user(argptr, arg, target_size);
5313     }
5314 out:
5315     g_free(big_buf);
5316     return ret;
5317 }
5318 
5319 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5320                                int cmd, abi_long arg)
5321 {
5322     void *argptr;
5323     int target_size;
5324     const argtype *arg_type = ie->arg_type;
5325     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5326     abi_long ret;
5327 
5328     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5329     struct blkpg_partition host_part;
5330 
5331     /* Read and convert blkpg */
5332     arg_type++;
5333     target_size = thunk_type_size(arg_type, 0);
5334     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5335     if (!argptr) {
5336         ret = -TARGET_EFAULT;
5337         goto out;
5338     }
5339     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5340     unlock_user(argptr, arg, 0);
5341 
5342     switch (host_blkpg->op) {
5343     case BLKPG_ADD_PARTITION:
5344     case BLKPG_DEL_PARTITION:
5345         /* payload is struct blkpg_partition */
5346         break;
5347     default:
5348         /* Unknown opcode */
5349         ret = -TARGET_EINVAL;
5350         goto out;
5351     }
5352 
5353     /* Read and convert blkpg->data */
5354     arg = (abi_long)(uintptr_t)host_blkpg->data;
5355     target_size = thunk_type_size(part_arg_type, 0);
5356     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5357     if (!argptr) {
5358         ret = -TARGET_EFAULT;
5359         goto out;
5360     }
5361     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5362     unlock_user(argptr, arg, 0);
5363 
5364     /* Swizzle the data pointer to our local copy and call! */
5365     host_blkpg->data = &host_part;
5366     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5367 
5368 out:
5369     return ret;
5370 }
5371 
5372 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5373                                 int fd, int cmd, abi_long arg)
5374 {
5375     const argtype *arg_type = ie->arg_type;
5376     const StructEntry *se;
5377     const argtype *field_types;
5378     const int *dst_offsets, *src_offsets;
5379     int target_size;
5380     void *argptr;
5381     abi_ulong *target_rt_dev_ptr = NULL;
5382     unsigned long *host_rt_dev_ptr = NULL;
5383     abi_long ret;
5384     int i;
5385 
5386     assert(ie->access == IOC_W);
5387     assert(*arg_type == TYPE_PTR);
5388     arg_type++;
5389     assert(*arg_type == TYPE_STRUCT);
5390     target_size = thunk_type_size(arg_type, 0);
5391     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5392     if (!argptr) {
5393         return -TARGET_EFAULT;
5394     }
5395     arg_type++;
5396     assert(*arg_type == (int)STRUCT_rtentry);
5397     se = struct_entries + *arg_type++;
5398     assert(se->convert[0] == NULL);
5399     /* convert struct here to be able to catch rt_dev string */
5400     field_types = se->field_types;
5401     dst_offsets = se->field_offsets[THUNK_HOST];
5402     src_offsets = se->field_offsets[THUNK_TARGET];
5403     for (i = 0; i < se->nb_fields; i++) {
5404         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5405             assert(*field_types == TYPE_PTRVOID);
5406             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5407             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5408             if (*target_rt_dev_ptr != 0) {
5409                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5410                                                   tswapal(*target_rt_dev_ptr));
5411                 if (!*host_rt_dev_ptr) {
5412                     unlock_user(argptr, arg, 0);
5413                     return -TARGET_EFAULT;
5414                 }
5415             } else {
5416                 *host_rt_dev_ptr = 0;
5417             }
5418             field_types++;
5419             continue;
5420         }
5421         field_types = thunk_convert(buf_temp + dst_offsets[i],
5422                                     argptr + src_offsets[i],
5423                                     field_types, THUNK_HOST);
5424     }
5425     unlock_user(argptr, arg, 0);
5426 
5427     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5428 
5429     assert(host_rt_dev_ptr != NULL);
5430     assert(target_rt_dev_ptr != NULL);
5431     if (*host_rt_dev_ptr != 0) {
5432         unlock_user((void *)*host_rt_dev_ptr,
5433                     *target_rt_dev_ptr, 0);
5434     }
5435     return ret;
5436 }
5437 
5438 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5439                                      int fd, int cmd, abi_long arg)
5440 {
5441     int sig = target_to_host_signal(arg);
5442     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5443 }
5444 
5445 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5446                                     int fd, int cmd, abi_long arg)
5447 {
5448     struct timeval tv;
5449     abi_long ret;
5450 
5451     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5452     if (is_error(ret)) {
5453         return ret;
5454     }
5455 
5456     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5457         if (copy_to_user_timeval(arg, &tv)) {
5458             return -TARGET_EFAULT;
5459         }
5460     } else {
5461         if (copy_to_user_timeval64(arg, &tv)) {
5462             return -TARGET_EFAULT;
5463         }
5464     }
5465 
5466     return ret;
5467 }
5468 
5469 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5470                                       int fd, int cmd, abi_long arg)
5471 {
5472     struct timespec ts;
5473     abi_long ret;
5474 
5475     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5476     if (is_error(ret)) {
5477         return ret;
5478     }
5479 
5480     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5481         if (host_to_target_timespec(arg, &ts)) {
5482             return -TARGET_EFAULT;
5483         }
5484     } else{
5485         if (host_to_target_timespec64(arg, &ts)) {
5486             return -TARGET_EFAULT;
5487         }
5488     }
5489 
5490     return ret;
5491 }
5492 
5493 #ifdef TIOCGPTPEER
5494 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5495                                      int fd, int cmd, abi_long arg)
5496 {
5497     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5498     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5499 }
5500 #endif
5501 
5502 #ifdef HAVE_DRM_H
5503 
5504 static void unlock_drm_version(struct drm_version *host_ver,
5505                                struct target_drm_version *target_ver,
5506                                bool copy)
5507 {
5508     unlock_user(host_ver->name, target_ver->name,
5509                                 copy ? host_ver->name_len : 0);
5510     unlock_user(host_ver->date, target_ver->date,
5511                                 copy ? host_ver->date_len : 0);
5512     unlock_user(host_ver->desc, target_ver->desc,
5513                                 copy ? host_ver->desc_len : 0);
5514 }
5515 
5516 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5517                                           struct target_drm_version *target_ver)
5518 {
5519     memset(host_ver, 0, sizeof(*host_ver));
5520 
5521     __get_user(host_ver->name_len, &target_ver->name_len);
5522     if (host_ver->name_len) {
5523         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5524                                    target_ver->name_len, 0);
5525         if (!host_ver->name) {
5526             return -EFAULT;
5527         }
5528     }
5529 
5530     __get_user(host_ver->date_len, &target_ver->date_len);
5531     if (host_ver->date_len) {
5532         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5533                                    target_ver->date_len, 0);
5534         if (!host_ver->date) {
5535             goto err;
5536         }
5537     }
5538 
5539     __get_user(host_ver->desc_len, &target_ver->desc_len);
5540     if (host_ver->desc_len) {
5541         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5542                                    target_ver->desc_len, 0);
5543         if (!host_ver->desc) {
5544             goto err;
5545         }
5546     }
5547 
5548     return 0;
5549 err:
5550     unlock_drm_version(host_ver, target_ver, false);
5551     return -EFAULT;
5552 }
5553 
5554 static inline void host_to_target_drmversion(
5555                                           struct target_drm_version *target_ver,
5556                                           struct drm_version *host_ver)
5557 {
5558     __put_user(host_ver->version_major, &target_ver->version_major);
5559     __put_user(host_ver->version_minor, &target_ver->version_minor);
5560     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5561     __put_user(host_ver->name_len, &target_ver->name_len);
5562     __put_user(host_ver->date_len, &target_ver->date_len);
5563     __put_user(host_ver->desc_len, &target_ver->desc_len);
5564     unlock_drm_version(host_ver, target_ver, true);
5565 }
5566 
5567 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5568                              int fd, int cmd, abi_long arg)
5569 {
5570     struct drm_version *ver;
5571     struct target_drm_version *target_ver;
5572     abi_long ret;
5573 
5574     switch (ie->host_cmd) {
5575     case DRM_IOCTL_VERSION:
5576         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5577             return -TARGET_EFAULT;
5578         }
5579         ver = (struct drm_version *)buf_temp;
5580         ret = target_to_host_drmversion(ver, target_ver);
5581         if (!is_error(ret)) {
5582             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5583             if (is_error(ret)) {
5584                 unlock_drm_version(ver, target_ver, false);
5585             } else {
5586                 host_to_target_drmversion(target_ver, ver);
5587             }
5588         }
5589         unlock_user_struct(target_ver, arg, 0);
5590         return ret;
5591     }
5592     return -TARGET_ENOSYS;
5593 }
5594 
5595 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5596                                            struct drm_i915_getparam *gparam,
5597                                            int fd, abi_long arg)
5598 {
5599     abi_long ret;
5600     int value;
5601     struct target_drm_i915_getparam *target_gparam;
5602 
5603     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5604         return -TARGET_EFAULT;
5605     }
5606 
5607     __get_user(gparam->param, &target_gparam->param);
5608     gparam->value = &value;
5609     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5610     put_user_s32(value, target_gparam->value);
5611 
5612     unlock_user_struct(target_gparam, arg, 0);
5613     return ret;
5614 }
5615 
5616 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5617                                   int fd, int cmd, abi_long arg)
5618 {
5619     switch (ie->host_cmd) {
5620     case DRM_IOCTL_I915_GETPARAM:
5621         return do_ioctl_drm_i915_getparam(ie,
5622                                           (struct drm_i915_getparam *)buf_temp,
5623                                           fd, arg);
5624     default:
5625         return -TARGET_ENOSYS;
5626     }
5627 }
5628 
5629 #endif
5630 
5631 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5632                                         int fd, int cmd, abi_long arg)
5633 {
5634     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5635     struct tun_filter *target_filter;
5636     char *target_addr;
5637 
5638     assert(ie->access == IOC_W);
5639 
5640     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5641     if (!target_filter) {
5642         return -TARGET_EFAULT;
5643     }
5644     filter->flags = tswap16(target_filter->flags);
5645     filter->count = tswap16(target_filter->count);
5646     unlock_user(target_filter, arg, 0);
5647 
5648     if (filter->count) {
5649         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5650             MAX_STRUCT_SIZE) {
5651             return -TARGET_EFAULT;
5652         }
5653 
5654         target_addr = lock_user(VERIFY_READ,
5655                                 arg + offsetof(struct tun_filter, addr),
5656                                 filter->count * ETH_ALEN, 1);
5657         if (!target_addr) {
5658             return -TARGET_EFAULT;
5659         }
5660         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5661         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5662     }
5663 
5664     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5665 }
5666 
5667 IOCTLEntry ioctl_entries[] = {
5668 #define IOCTL(cmd, access, ...) \
5669     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5670 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5671     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5672 #define IOCTL_IGNORE(cmd) \
5673     { TARGET_ ## cmd, 0, #cmd },
5674 #include "ioctls.h"
5675     { 0, 0, },
5676 };
5677 
5678 /* ??? Implement proper locking for ioctls.  */
5679 /* do_ioctl() Must return target values and target errnos. */
5680 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5681 {
5682     const IOCTLEntry *ie;
5683     const argtype *arg_type;
5684     abi_long ret;
5685     uint8_t buf_temp[MAX_STRUCT_SIZE];
5686     int target_size;
5687     void *argptr;
5688 
5689     ie = ioctl_entries;
5690     for(;;) {
5691         if (ie->target_cmd == 0) {
5692             qemu_log_mask(
5693                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5694             return -TARGET_ENOSYS;
5695         }
5696         if (ie->target_cmd == cmd)
5697             break;
5698         ie++;
5699     }
5700     arg_type = ie->arg_type;
5701     if (ie->do_ioctl) {
5702         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5703     } else if (!ie->host_cmd) {
5704         /* Some architectures define BSD ioctls in their headers
5705            that are not implemented in Linux.  */
5706         return -TARGET_ENOSYS;
5707     }
5708 
5709     switch(arg_type[0]) {
5710     case TYPE_NULL:
5711         /* no argument */
5712         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5713         break;
5714     case TYPE_PTRVOID:
5715     case TYPE_INT:
5716     case TYPE_LONG:
5717     case TYPE_ULONG:
5718         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5719         break;
5720     case TYPE_PTR:
5721         arg_type++;
5722         target_size = thunk_type_size(arg_type, 0);
5723         switch(ie->access) {
5724         case IOC_R:
5725             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5726             if (!is_error(ret)) {
5727                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5728                 if (!argptr)
5729                     return -TARGET_EFAULT;
5730                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5731                 unlock_user(argptr, arg, target_size);
5732             }
5733             break;
5734         case IOC_W:
5735             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5736             if (!argptr)
5737                 return -TARGET_EFAULT;
5738             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5739             unlock_user(argptr, arg, 0);
5740             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5741             break;
5742         default:
5743         case IOC_RW:
5744             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5745             if (!argptr)
5746                 return -TARGET_EFAULT;
5747             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5748             unlock_user(argptr, arg, 0);
5749             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5750             if (!is_error(ret)) {
5751                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5752                 if (!argptr)
5753                     return -TARGET_EFAULT;
5754                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5755                 unlock_user(argptr, arg, target_size);
5756             }
5757             break;
5758         }
5759         break;
5760     default:
5761         qemu_log_mask(LOG_UNIMP,
5762                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5763                       (long)cmd, arg_type[0]);
5764         ret = -TARGET_ENOSYS;
5765         break;
5766     }
5767     return ret;
5768 }
5769 
5770 static const bitmask_transtbl iflag_tbl[] = {
5771         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5772         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5773         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5774         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5775         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5776         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5777         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5778         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5779         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5780         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5781         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5782         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5783         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5784         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5785         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5786         { 0, 0, 0, 0 }
5787 };
5788 
5789 static const bitmask_transtbl oflag_tbl[] = {
5790 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5791 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5792 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5793 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5794 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5795 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5796 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5797 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5798 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5799 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5800 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5801 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5802 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5803 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5804 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5805 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5806 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5807 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5808 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5809 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5810 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5811 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5812 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5813 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5814 	{ 0, 0, 0, 0 }
5815 };
5816 
5817 static const bitmask_transtbl cflag_tbl[] = {
5818 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5819 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5820 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5821 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5822 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5823 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5824 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5825 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5826 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5827 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5828 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5829 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5830 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5831 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5832 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5833 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5834 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5835 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5836 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5837 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5838 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5839 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5840 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5841 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5842 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5843 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5844 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5845 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5846 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5847 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5848 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5849 	{ 0, 0, 0, 0 }
5850 };
5851 
5852 static const bitmask_transtbl lflag_tbl[] = {
5853   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5854   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5855   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5856   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5857   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5858   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5859   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5860   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5861   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5862   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5863   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5864   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5865   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5866   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5867   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5868   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5869   { 0, 0, 0, 0 }
5870 };
5871 
5872 static void target_to_host_termios (void *dst, const void *src)
5873 {
5874     struct host_termios *host = dst;
5875     const struct target_termios *target = src;
5876 
5877     host->c_iflag =
5878         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5879     host->c_oflag =
5880         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5881     host->c_cflag =
5882         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5883     host->c_lflag =
5884         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5885     host->c_line = target->c_line;
5886 
5887     memset(host->c_cc, 0, sizeof(host->c_cc));
5888     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5889     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5890     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5891     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5892     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5893     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5894     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5895     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5896     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5897     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5898     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5899     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5900     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5901     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5902     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5903     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5904     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5905 }
5906 
5907 static void host_to_target_termios (void *dst, const void *src)
5908 {
5909     struct target_termios *target = dst;
5910     const struct host_termios *host = src;
5911 
5912     target->c_iflag =
5913         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5914     target->c_oflag =
5915         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5916     target->c_cflag =
5917         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5918     target->c_lflag =
5919         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5920     target->c_line = host->c_line;
5921 
5922     memset(target->c_cc, 0, sizeof(target->c_cc));
5923     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5924     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5925     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5926     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5927     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5928     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5929     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5930     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5931     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5932     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5933     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5934     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5935     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5936     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5937     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5938     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5939     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5940 }
5941 
5942 static const StructEntry struct_termios_def = {
5943     .convert = { host_to_target_termios, target_to_host_termios },
5944     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5945     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5946     .print = print_termios,
5947 };
5948 
5949 static const bitmask_transtbl mmap_flags_tbl[] = {
5950     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5951     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5952     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5953     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5954       MAP_ANONYMOUS, MAP_ANONYMOUS },
5955     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5956       MAP_GROWSDOWN, MAP_GROWSDOWN },
5957     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5958       MAP_DENYWRITE, MAP_DENYWRITE },
5959     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5960       MAP_EXECUTABLE, MAP_EXECUTABLE },
5961     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5962     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5963       MAP_NORESERVE, MAP_NORESERVE },
5964     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5965     /* MAP_STACK had been ignored by the kernel for quite some time.
5966        Recognize it for the target insofar as we do not want to pass
5967        it through to the host.  */
5968     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5969     { 0, 0, 0, 0 }
5970 };
5971 
5972 /*
5973  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5974  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5975  */
5976 #if defined(TARGET_I386)
5977 
5978 /* NOTE: there is really one LDT for all the threads */
5979 static uint8_t *ldt_table;
5980 
5981 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5982 {
5983     int size;
5984     void *p;
5985 
5986     if (!ldt_table)
5987         return 0;
5988     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5989     if (size > bytecount)
5990         size = bytecount;
5991     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5992     if (!p)
5993         return -TARGET_EFAULT;
5994     /* ??? Should this by byteswapped?  */
5995     memcpy(p, ldt_table, size);
5996     unlock_user(p, ptr, size);
5997     return size;
5998 }
5999 
6000 /* XXX: add locking support */
6001 static abi_long write_ldt(CPUX86State *env,
6002                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6003 {
6004     struct target_modify_ldt_ldt_s ldt_info;
6005     struct target_modify_ldt_ldt_s *target_ldt_info;
6006     int seg_32bit, contents, read_exec_only, limit_in_pages;
6007     int seg_not_present, useable, lm;
6008     uint32_t *lp, entry_1, entry_2;
6009 
6010     if (bytecount != sizeof(ldt_info))
6011         return -TARGET_EINVAL;
6012     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6013         return -TARGET_EFAULT;
6014     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6015     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6016     ldt_info.limit = tswap32(target_ldt_info->limit);
6017     ldt_info.flags = tswap32(target_ldt_info->flags);
6018     unlock_user_struct(target_ldt_info, ptr, 0);
6019 
6020     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6021         return -TARGET_EINVAL;
6022     seg_32bit = ldt_info.flags & 1;
6023     contents = (ldt_info.flags >> 1) & 3;
6024     read_exec_only = (ldt_info.flags >> 3) & 1;
6025     limit_in_pages = (ldt_info.flags >> 4) & 1;
6026     seg_not_present = (ldt_info.flags >> 5) & 1;
6027     useable = (ldt_info.flags >> 6) & 1;
6028 #ifdef TARGET_ABI32
6029     lm = 0;
6030 #else
6031     lm = (ldt_info.flags >> 7) & 1;
6032 #endif
6033     if (contents == 3) {
6034         if (oldmode)
6035             return -TARGET_EINVAL;
6036         if (seg_not_present == 0)
6037             return -TARGET_EINVAL;
6038     }
6039     /* allocate the LDT */
6040     if (!ldt_table) {
6041         env->ldt.base = target_mmap(0,
6042                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6043                                     PROT_READ|PROT_WRITE,
6044                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6045         if (env->ldt.base == -1)
6046             return -TARGET_ENOMEM;
6047         memset(g2h_untagged(env->ldt.base), 0,
6048                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6049         env->ldt.limit = 0xffff;
6050         ldt_table = g2h_untagged(env->ldt.base);
6051     }
6052 
6053     /* NOTE: same code as Linux kernel */
6054     /* Allow LDTs to be cleared by the user. */
6055     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6056         if (oldmode ||
6057             (contents == 0		&&
6058              read_exec_only == 1	&&
6059              seg_32bit == 0		&&
6060              limit_in_pages == 0	&&
6061              seg_not_present == 1	&&
6062              useable == 0 )) {
6063             entry_1 = 0;
6064             entry_2 = 0;
6065             goto install;
6066         }
6067     }
6068 
6069     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6070         (ldt_info.limit & 0x0ffff);
6071     entry_2 = (ldt_info.base_addr & 0xff000000) |
6072         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6073         (ldt_info.limit & 0xf0000) |
6074         ((read_exec_only ^ 1) << 9) |
6075         (contents << 10) |
6076         ((seg_not_present ^ 1) << 15) |
6077         (seg_32bit << 22) |
6078         (limit_in_pages << 23) |
6079         (lm << 21) |
6080         0x7000;
6081     if (!oldmode)
6082         entry_2 |= (useable << 20);
6083 
6084     /* Install the new entry ...  */
6085 install:
6086     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6087     lp[0] = tswap32(entry_1);
6088     lp[1] = tswap32(entry_2);
6089     return 0;
6090 }
6091 
6092 /* specific and weird i386 syscalls */
6093 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6094                               unsigned long bytecount)
6095 {
6096     abi_long ret;
6097 
6098     switch (func) {
6099     case 0:
6100         ret = read_ldt(ptr, bytecount);
6101         break;
6102     case 1:
6103         ret = write_ldt(env, ptr, bytecount, 1);
6104         break;
6105     case 0x11:
6106         ret = write_ldt(env, ptr, bytecount, 0);
6107         break;
6108     default:
6109         ret = -TARGET_ENOSYS;
6110         break;
6111     }
6112     return ret;
6113 }
6114 
6115 #if defined(TARGET_ABI32)
6116 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6117 {
6118     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6119     struct target_modify_ldt_ldt_s ldt_info;
6120     struct target_modify_ldt_ldt_s *target_ldt_info;
6121     int seg_32bit, contents, read_exec_only, limit_in_pages;
6122     int seg_not_present, useable, lm;
6123     uint32_t *lp, entry_1, entry_2;
6124     int i;
6125 
6126     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6127     if (!target_ldt_info)
6128         return -TARGET_EFAULT;
6129     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6130     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6131     ldt_info.limit = tswap32(target_ldt_info->limit);
6132     ldt_info.flags = tswap32(target_ldt_info->flags);
6133     if (ldt_info.entry_number == -1) {
6134         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6135             if (gdt_table[i] == 0) {
6136                 ldt_info.entry_number = i;
6137                 target_ldt_info->entry_number = tswap32(i);
6138                 break;
6139             }
6140         }
6141     }
6142     unlock_user_struct(target_ldt_info, ptr, 1);
6143 
6144     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6145         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6146            return -TARGET_EINVAL;
6147     seg_32bit = ldt_info.flags & 1;
6148     contents = (ldt_info.flags >> 1) & 3;
6149     read_exec_only = (ldt_info.flags >> 3) & 1;
6150     limit_in_pages = (ldt_info.flags >> 4) & 1;
6151     seg_not_present = (ldt_info.flags >> 5) & 1;
6152     useable = (ldt_info.flags >> 6) & 1;
6153 #ifdef TARGET_ABI32
6154     lm = 0;
6155 #else
6156     lm = (ldt_info.flags >> 7) & 1;
6157 #endif
6158 
6159     if (contents == 3) {
6160         if (seg_not_present == 0)
6161             return -TARGET_EINVAL;
6162     }
6163 
6164     /* NOTE: same code as Linux kernel */
6165     /* Allow LDTs to be cleared by the user. */
6166     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6167         if ((contents == 0             &&
6168              read_exec_only == 1       &&
6169              seg_32bit == 0            &&
6170              limit_in_pages == 0       &&
6171              seg_not_present == 1      &&
6172              useable == 0 )) {
6173             entry_1 = 0;
6174             entry_2 = 0;
6175             goto install;
6176         }
6177     }
6178 
6179     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6180         (ldt_info.limit & 0x0ffff);
6181     entry_2 = (ldt_info.base_addr & 0xff000000) |
6182         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6183         (ldt_info.limit & 0xf0000) |
6184         ((read_exec_only ^ 1) << 9) |
6185         (contents << 10) |
6186         ((seg_not_present ^ 1) << 15) |
6187         (seg_32bit << 22) |
6188         (limit_in_pages << 23) |
6189         (useable << 20) |
6190         (lm << 21) |
6191         0x7000;
6192 
6193     /* Install the new entry ...  */
6194 install:
6195     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6196     lp[0] = tswap32(entry_1);
6197     lp[1] = tswap32(entry_2);
6198     return 0;
6199 }
6200 
6201 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6202 {
6203     struct target_modify_ldt_ldt_s *target_ldt_info;
6204     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6205     uint32_t base_addr, limit, flags;
6206     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6207     int seg_not_present, useable, lm;
6208     uint32_t *lp, entry_1, entry_2;
6209 
6210     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6211     if (!target_ldt_info)
6212         return -TARGET_EFAULT;
6213     idx = tswap32(target_ldt_info->entry_number);
6214     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6215         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6216         unlock_user_struct(target_ldt_info, ptr, 1);
6217         return -TARGET_EINVAL;
6218     }
6219     lp = (uint32_t *)(gdt_table + idx);
6220     entry_1 = tswap32(lp[0]);
6221     entry_2 = tswap32(lp[1]);
6222 
6223     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6224     contents = (entry_2 >> 10) & 3;
6225     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6226     seg_32bit = (entry_2 >> 22) & 1;
6227     limit_in_pages = (entry_2 >> 23) & 1;
6228     useable = (entry_2 >> 20) & 1;
6229 #ifdef TARGET_ABI32
6230     lm = 0;
6231 #else
6232     lm = (entry_2 >> 21) & 1;
6233 #endif
6234     flags = (seg_32bit << 0) | (contents << 1) |
6235         (read_exec_only << 3) | (limit_in_pages << 4) |
6236         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6237     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6238     base_addr = (entry_1 >> 16) |
6239         (entry_2 & 0xff000000) |
6240         ((entry_2 & 0xff) << 16);
6241     target_ldt_info->base_addr = tswapal(base_addr);
6242     target_ldt_info->limit = tswap32(limit);
6243     target_ldt_info->flags = tswap32(flags);
6244     unlock_user_struct(target_ldt_info, ptr, 1);
6245     return 0;
6246 }
6247 
6248 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6249 {
6250     return -TARGET_ENOSYS;
6251 }
6252 #else
6253 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6254 {
6255     abi_long ret = 0;
6256     abi_ulong val;
6257     int idx;
6258 
6259     switch(code) {
6260     case TARGET_ARCH_SET_GS:
6261     case TARGET_ARCH_SET_FS:
6262         if (code == TARGET_ARCH_SET_GS)
6263             idx = R_GS;
6264         else
6265             idx = R_FS;
6266         cpu_x86_load_seg(env, idx, 0);
6267         env->segs[idx].base = addr;
6268         break;
6269     case TARGET_ARCH_GET_GS:
6270     case TARGET_ARCH_GET_FS:
6271         if (code == TARGET_ARCH_GET_GS)
6272             idx = R_GS;
6273         else
6274             idx = R_FS;
6275         val = env->segs[idx].base;
6276         if (put_user(val, addr, abi_ulong))
6277             ret = -TARGET_EFAULT;
6278         break;
6279     default:
6280         ret = -TARGET_EINVAL;
6281         break;
6282     }
6283     return ret;
6284 }
6285 #endif /* defined(TARGET_ABI32 */
6286 
6287 #endif /* defined(TARGET_I386) */
6288 
6289 #define NEW_STACK_SIZE 0x40000
6290 
6291 
6292 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6293 typedef struct {
6294     CPUArchState *env;
6295     pthread_mutex_t mutex;
6296     pthread_cond_t cond;
6297     pthread_t thread;
6298     uint32_t tid;
6299     abi_ulong child_tidptr;
6300     abi_ulong parent_tidptr;
6301     sigset_t sigmask;
6302 } new_thread_info;
6303 
6304 static void *clone_func(void *arg)
6305 {
6306     new_thread_info *info = arg;
6307     CPUArchState *env;
6308     CPUState *cpu;
6309     TaskState *ts;
6310 
6311     rcu_register_thread();
6312     tcg_register_thread();
6313     env = info->env;
6314     cpu = env_cpu(env);
6315     thread_cpu = cpu;
6316     ts = (TaskState *)cpu->opaque;
6317     info->tid = sys_gettid();
6318     task_settid(ts);
6319     if (info->child_tidptr)
6320         put_user_u32(info->tid, info->child_tidptr);
6321     if (info->parent_tidptr)
6322         put_user_u32(info->tid, info->parent_tidptr);
6323     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6324     /* Enable signals.  */
6325     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6326     /* Signal to the parent that we're ready.  */
6327     pthread_mutex_lock(&info->mutex);
6328     pthread_cond_broadcast(&info->cond);
6329     pthread_mutex_unlock(&info->mutex);
6330     /* Wait until the parent has finished initializing the tls state.  */
6331     pthread_mutex_lock(&clone_lock);
6332     pthread_mutex_unlock(&clone_lock);
6333     cpu_loop(env);
6334     /* never exits */
6335     return NULL;
6336 }
6337 
6338 /* do_fork() Must return host values and target errnos (unlike most
6339    do_*() functions). */
6340 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6341                    abi_ulong parent_tidptr, target_ulong newtls,
6342                    abi_ulong child_tidptr)
6343 {
6344     CPUState *cpu = env_cpu(env);
6345     int ret;
6346     TaskState *ts;
6347     CPUState *new_cpu;
6348     CPUArchState *new_env;
6349     sigset_t sigmask;
6350 
6351     flags &= ~CLONE_IGNORED_FLAGS;
6352 
6353     /* Emulate vfork() with fork() */
6354     if (flags & CLONE_VFORK)
6355         flags &= ~(CLONE_VFORK | CLONE_VM);
6356 
6357     if (flags & CLONE_VM) {
6358         TaskState *parent_ts = (TaskState *)cpu->opaque;
6359         new_thread_info info;
6360         pthread_attr_t attr;
6361 
6362         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6363             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6364             return -TARGET_EINVAL;
6365         }
6366 
6367         ts = g_new0(TaskState, 1);
6368         init_task_state(ts);
6369 
6370         /* Grab a mutex so that thread setup appears atomic.  */
6371         pthread_mutex_lock(&clone_lock);
6372 
6373         /*
6374          * If this is our first additional thread, we need to ensure we
6375          * generate code for parallel execution and flush old translations.
6376          * Do this now so that the copy gets CF_PARALLEL too.
6377          */
6378         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6379             cpu->tcg_cflags |= CF_PARALLEL;
6380             tb_flush(cpu);
6381         }
6382 
6383         /* we create a new CPU instance. */
6384         new_env = cpu_copy(env);
6385         /* Init regs that differ from the parent.  */
6386         cpu_clone_regs_child(new_env, newsp, flags);
6387         cpu_clone_regs_parent(env, flags);
6388         new_cpu = env_cpu(new_env);
6389         new_cpu->opaque = ts;
6390         ts->bprm = parent_ts->bprm;
6391         ts->info = parent_ts->info;
6392         ts->signal_mask = parent_ts->signal_mask;
6393 
6394         if (flags & CLONE_CHILD_CLEARTID) {
6395             ts->child_tidptr = child_tidptr;
6396         }
6397 
6398         if (flags & CLONE_SETTLS) {
6399             cpu_set_tls (new_env, newtls);
6400         }
6401 
6402         memset(&info, 0, sizeof(info));
6403         pthread_mutex_init(&info.mutex, NULL);
6404         pthread_mutex_lock(&info.mutex);
6405         pthread_cond_init(&info.cond, NULL);
6406         info.env = new_env;
6407         if (flags & CLONE_CHILD_SETTID) {
6408             info.child_tidptr = child_tidptr;
6409         }
6410         if (flags & CLONE_PARENT_SETTID) {
6411             info.parent_tidptr = parent_tidptr;
6412         }
6413 
6414         ret = pthread_attr_init(&attr);
6415         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6416         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6417         /* It is not safe to deliver signals until the child has finished
6418            initializing, so temporarily block all signals.  */
6419         sigfillset(&sigmask);
6420         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6421         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6422 
6423         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6424         /* TODO: Free new CPU state if thread creation failed.  */
6425 
6426         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6427         pthread_attr_destroy(&attr);
6428         if (ret == 0) {
6429             /* Wait for the child to initialize.  */
6430             pthread_cond_wait(&info.cond, &info.mutex);
6431             ret = info.tid;
6432         } else {
6433             ret = -1;
6434         }
6435         pthread_mutex_unlock(&info.mutex);
6436         pthread_cond_destroy(&info.cond);
6437         pthread_mutex_destroy(&info.mutex);
6438         pthread_mutex_unlock(&clone_lock);
6439     } else {
6440         /* if no CLONE_VM, we consider it is a fork */
6441         if (flags & CLONE_INVALID_FORK_FLAGS) {
6442             return -TARGET_EINVAL;
6443         }
6444 
6445         /* We can't support custom termination signals */
6446         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6447             return -TARGET_EINVAL;
6448         }
6449 
6450         if (block_signals()) {
6451             return -TARGET_ERESTARTSYS;
6452         }
6453 
6454         fork_start();
6455         ret = fork();
6456         if (ret == 0) {
6457             /* Child Process.  */
6458             cpu_clone_regs_child(env, newsp, flags);
6459             fork_end(1);
6460             /* There is a race condition here.  The parent process could
6461                theoretically read the TID in the child process before the child
6462                tid is set.  This would require using either ptrace
6463                (not implemented) or having *_tidptr to point at a shared memory
6464                mapping.  We can't repeat the spinlock hack used above because
6465                the child process gets its own copy of the lock.  */
6466             if (flags & CLONE_CHILD_SETTID)
6467                 put_user_u32(sys_gettid(), child_tidptr);
6468             if (flags & CLONE_PARENT_SETTID)
6469                 put_user_u32(sys_gettid(), parent_tidptr);
6470             ts = (TaskState *)cpu->opaque;
6471             if (flags & CLONE_SETTLS)
6472                 cpu_set_tls (env, newtls);
6473             if (flags & CLONE_CHILD_CLEARTID)
6474                 ts->child_tidptr = child_tidptr;
6475         } else {
6476             cpu_clone_regs_parent(env, flags);
6477             fork_end(0);
6478         }
6479     }
6480     return ret;
6481 }
6482 
6483 /* warning : doesn't handle linux specific flags... */
6484 static int target_to_host_fcntl_cmd(int cmd)
6485 {
6486     int ret;
6487 
6488     switch(cmd) {
6489     case TARGET_F_DUPFD:
6490     case TARGET_F_GETFD:
6491     case TARGET_F_SETFD:
6492     case TARGET_F_GETFL:
6493     case TARGET_F_SETFL:
6494     case TARGET_F_OFD_GETLK:
6495     case TARGET_F_OFD_SETLK:
6496     case TARGET_F_OFD_SETLKW:
6497         ret = cmd;
6498         break;
6499     case TARGET_F_GETLK:
6500         ret = F_GETLK64;
6501         break;
6502     case TARGET_F_SETLK:
6503         ret = F_SETLK64;
6504         break;
6505     case TARGET_F_SETLKW:
6506         ret = F_SETLKW64;
6507         break;
6508     case TARGET_F_GETOWN:
6509         ret = F_GETOWN;
6510         break;
6511     case TARGET_F_SETOWN:
6512         ret = F_SETOWN;
6513         break;
6514     case TARGET_F_GETSIG:
6515         ret = F_GETSIG;
6516         break;
6517     case TARGET_F_SETSIG:
6518         ret = F_SETSIG;
6519         break;
6520 #if TARGET_ABI_BITS == 32
6521     case TARGET_F_GETLK64:
6522         ret = F_GETLK64;
6523         break;
6524     case TARGET_F_SETLK64:
6525         ret = F_SETLK64;
6526         break;
6527     case TARGET_F_SETLKW64:
6528         ret = F_SETLKW64;
6529         break;
6530 #endif
6531     case TARGET_F_SETLEASE:
6532         ret = F_SETLEASE;
6533         break;
6534     case TARGET_F_GETLEASE:
6535         ret = F_GETLEASE;
6536         break;
6537 #ifdef F_DUPFD_CLOEXEC
6538     case TARGET_F_DUPFD_CLOEXEC:
6539         ret = F_DUPFD_CLOEXEC;
6540         break;
6541 #endif
6542     case TARGET_F_NOTIFY:
6543         ret = F_NOTIFY;
6544         break;
6545 #ifdef F_GETOWN_EX
6546     case TARGET_F_GETOWN_EX:
6547         ret = F_GETOWN_EX;
6548         break;
6549 #endif
6550 #ifdef F_SETOWN_EX
6551     case TARGET_F_SETOWN_EX:
6552         ret = F_SETOWN_EX;
6553         break;
6554 #endif
6555 #ifdef F_SETPIPE_SZ
6556     case TARGET_F_SETPIPE_SZ:
6557         ret = F_SETPIPE_SZ;
6558         break;
6559     case TARGET_F_GETPIPE_SZ:
6560         ret = F_GETPIPE_SZ;
6561         break;
6562 #endif
6563 #ifdef F_ADD_SEALS
6564     case TARGET_F_ADD_SEALS:
6565         ret = F_ADD_SEALS;
6566         break;
6567     case TARGET_F_GET_SEALS:
6568         ret = F_GET_SEALS;
6569         break;
6570 #endif
6571     default:
6572         ret = -TARGET_EINVAL;
6573         break;
6574     }
6575 
6576 #if defined(__powerpc64__)
6577     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6578      * is not supported by kernel. The glibc fcntl call actually adjusts
6579      * them to 5, 6 and 7 before making the syscall(). Since we make the
6580      * syscall directly, adjust to what is supported by the kernel.
6581      */
6582     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6583         ret -= F_GETLK64 - 5;
6584     }
6585 #endif
6586 
6587     return ret;
6588 }
6589 
6590 #define FLOCK_TRANSTBL \
6591     switch (type) { \
6592     TRANSTBL_CONVERT(F_RDLCK); \
6593     TRANSTBL_CONVERT(F_WRLCK); \
6594     TRANSTBL_CONVERT(F_UNLCK); \
6595     }
6596 
6597 static int target_to_host_flock(int type)
6598 {
6599 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6600     FLOCK_TRANSTBL
6601 #undef  TRANSTBL_CONVERT
6602     return -TARGET_EINVAL;
6603 }
6604 
6605 static int host_to_target_flock(int type)
6606 {
6607 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6608     FLOCK_TRANSTBL
6609 #undef  TRANSTBL_CONVERT
6610     /* if we don't know how to convert the value coming
6611      * from the host we copy to the target field as-is
6612      */
6613     return type;
6614 }
6615 
6616 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6617                                             abi_ulong target_flock_addr)
6618 {
6619     struct target_flock *target_fl;
6620     int l_type;
6621 
6622     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6623         return -TARGET_EFAULT;
6624     }
6625 
6626     __get_user(l_type, &target_fl->l_type);
6627     l_type = target_to_host_flock(l_type);
6628     if (l_type < 0) {
6629         return l_type;
6630     }
6631     fl->l_type = l_type;
6632     __get_user(fl->l_whence, &target_fl->l_whence);
6633     __get_user(fl->l_start, &target_fl->l_start);
6634     __get_user(fl->l_len, &target_fl->l_len);
6635     __get_user(fl->l_pid, &target_fl->l_pid);
6636     unlock_user_struct(target_fl, target_flock_addr, 0);
6637     return 0;
6638 }
6639 
6640 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6641                                           const struct flock64 *fl)
6642 {
6643     struct target_flock *target_fl;
6644     short l_type;
6645 
6646     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6647         return -TARGET_EFAULT;
6648     }
6649 
6650     l_type = host_to_target_flock(fl->l_type);
6651     __put_user(l_type, &target_fl->l_type);
6652     __put_user(fl->l_whence, &target_fl->l_whence);
6653     __put_user(fl->l_start, &target_fl->l_start);
6654     __put_user(fl->l_len, &target_fl->l_len);
6655     __put_user(fl->l_pid, &target_fl->l_pid);
6656     unlock_user_struct(target_fl, target_flock_addr, 1);
6657     return 0;
6658 }
6659 
6660 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6661 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6662 
6663 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6664 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6665                                                    abi_ulong target_flock_addr)
6666 {
6667     struct target_oabi_flock64 *target_fl;
6668     int l_type;
6669 
6670     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6671         return -TARGET_EFAULT;
6672     }
6673 
6674     __get_user(l_type, &target_fl->l_type);
6675     l_type = target_to_host_flock(l_type);
6676     if (l_type < 0) {
6677         return l_type;
6678     }
6679     fl->l_type = l_type;
6680     __get_user(fl->l_whence, &target_fl->l_whence);
6681     __get_user(fl->l_start, &target_fl->l_start);
6682     __get_user(fl->l_len, &target_fl->l_len);
6683     __get_user(fl->l_pid, &target_fl->l_pid);
6684     unlock_user_struct(target_fl, target_flock_addr, 0);
6685     return 0;
6686 }
6687 
6688 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6689                                                  const struct flock64 *fl)
6690 {
6691     struct target_oabi_flock64 *target_fl;
6692     short l_type;
6693 
6694     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6695         return -TARGET_EFAULT;
6696     }
6697 
6698     l_type = host_to_target_flock(fl->l_type);
6699     __put_user(l_type, &target_fl->l_type);
6700     __put_user(fl->l_whence, &target_fl->l_whence);
6701     __put_user(fl->l_start, &target_fl->l_start);
6702     __put_user(fl->l_len, &target_fl->l_len);
6703     __put_user(fl->l_pid, &target_fl->l_pid);
6704     unlock_user_struct(target_fl, target_flock_addr, 1);
6705     return 0;
6706 }
6707 #endif
6708 
6709 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6710                                               abi_ulong target_flock_addr)
6711 {
6712     struct target_flock64 *target_fl;
6713     int l_type;
6714 
6715     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6716         return -TARGET_EFAULT;
6717     }
6718 
6719     __get_user(l_type, &target_fl->l_type);
6720     l_type = target_to_host_flock(l_type);
6721     if (l_type < 0) {
6722         return l_type;
6723     }
6724     fl->l_type = l_type;
6725     __get_user(fl->l_whence, &target_fl->l_whence);
6726     __get_user(fl->l_start, &target_fl->l_start);
6727     __get_user(fl->l_len, &target_fl->l_len);
6728     __get_user(fl->l_pid, &target_fl->l_pid);
6729     unlock_user_struct(target_fl, target_flock_addr, 0);
6730     return 0;
6731 }
6732 
6733 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6734                                             const struct flock64 *fl)
6735 {
6736     struct target_flock64 *target_fl;
6737     short l_type;
6738 
6739     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6740         return -TARGET_EFAULT;
6741     }
6742 
6743     l_type = host_to_target_flock(fl->l_type);
6744     __put_user(l_type, &target_fl->l_type);
6745     __put_user(fl->l_whence, &target_fl->l_whence);
6746     __put_user(fl->l_start, &target_fl->l_start);
6747     __put_user(fl->l_len, &target_fl->l_len);
6748     __put_user(fl->l_pid, &target_fl->l_pid);
6749     unlock_user_struct(target_fl, target_flock_addr, 1);
6750     return 0;
6751 }
6752 
6753 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6754 {
6755     struct flock64 fl64;
6756 #ifdef F_GETOWN_EX
6757     struct f_owner_ex fox;
6758     struct target_f_owner_ex *target_fox;
6759 #endif
6760     abi_long ret;
6761     int host_cmd = target_to_host_fcntl_cmd(cmd);
6762 
6763     if (host_cmd == -TARGET_EINVAL)
6764 	    return host_cmd;
6765 
6766     switch(cmd) {
6767     case TARGET_F_GETLK:
6768         ret = copy_from_user_flock(&fl64, arg);
6769         if (ret) {
6770             return ret;
6771         }
6772         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6773         if (ret == 0) {
6774             ret = copy_to_user_flock(arg, &fl64);
6775         }
6776         break;
6777 
6778     case TARGET_F_SETLK:
6779     case TARGET_F_SETLKW:
6780         ret = copy_from_user_flock(&fl64, arg);
6781         if (ret) {
6782             return ret;
6783         }
6784         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6785         break;
6786 
6787     case TARGET_F_GETLK64:
6788     case TARGET_F_OFD_GETLK:
6789         ret = copy_from_user_flock64(&fl64, arg);
6790         if (ret) {
6791             return ret;
6792         }
6793         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6794         if (ret == 0) {
6795             ret = copy_to_user_flock64(arg, &fl64);
6796         }
6797         break;
6798     case TARGET_F_SETLK64:
6799     case TARGET_F_SETLKW64:
6800     case TARGET_F_OFD_SETLK:
6801     case TARGET_F_OFD_SETLKW:
6802         ret = copy_from_user_flock64(&fl64, arg);
6803         if (ret) {
6804             return ret;
6805         }
6806         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6807         break;
6808 
6809     case TARGET_F_GETFL:
6810         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6811         if (ret >= 0) {
6812             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6813         }
6814         break;
6815 
6816     case TARGET_F_SETFL:
6817         ret = get_errno(safe_fcntl(fd, host_cmd,
6818                                    target_to_host_bitmask(arg,
6819                                                           fcntl_flags_tbl)));
6820         break;
6821 
6822 #ifdef F_GETOWN_EX
6823     case TARGET_F_GETOWN_EX:
6824         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6825         if (ret >= 0) {
6826             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6827                 return -TARGET_EFAULT;
6828             target_fox->type = tswap32(fox.type);
6829             target_fox->pid = tswap32(fox.pid);
6830             unlock_user_struct(target_fox, arg, 1);
6831         }
6832         break;
6833 #endif
6834 
6835 #ifdef F_SETOWN_EX
6836     case TARGET_F_SETOWN_EX:
6837         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6838             return -TARGET_EFAULT;
6839         fox.type = tswap32(target_fox->type);
6840         fox.pid = tswap32(target_fox->pid);
6841         unlock_user_struct(target_fox, arg, 0);
6842         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6843         break;
6844 #endif
6845 
6846     case TARGET_F_SETSIG:
6847         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6848         break;
6849 
6850     case TARGET_F_GETSIG:
6851         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6852         break;
6853 
6854     case TARGET_F_SETOWN:
6855     case TARGET_F_GETOWN:
6856     case TARGET_F_SETLEASE:
6857     case TARGET_F_GETLEASE:
6858     case TARGET_F_SETPIPE_SZ:
6859     case TARGET_F_GETPIPE_SZ:
6860     case TARGET_F_ADD_SEALS:
6861     case TARGET_F_GET_SEALS:
6862         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6863         break;
6864 
6865     default:
6866         ret = get_errno(safe_fcntl(fd, cmd, arg));
6867         break;
6868     }
6869     return ret;
6870 }
6871 
6872 #ifdef USE_UID16
6873 
6874 static inline int high2lowuid(int uid)
6875 {
6876     if (uid > 65535)
6877         return 65534;
6878     else
6879         return uid;
6880 }
6881 
6882 static inline int high2lowgid(int gid)
6883 {
6884     if (gid > 65535)
6885         return 65534;
6886     else
6887         return gid;
6888 }
6889 
6890 static inline int low2highuid(int uid)
6891 {
6892     if ((int16_t)uid == -1)
6893         return -1;
6894     else
6895         return uid;
6896 }
6897 
6898 static inline int low2highgid(int gid)
6899 {
6900     if ((int16_t)gid == -1)
6901         return -1;
6902     else
6903         return gid;
6904 }
6905 static inline int tswapid(int id)
6906 {
6907     return tswap16(id);
6908 }
6909 
6910 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6911 
6912 #else /* !USE_UID16 */
6913 static inline int high2lowuid(int uid)
6914 {
6915     return uid;
6916 }
6917 static inline int high2lowgid(int gid)
6918 {
6919     return gid;
6920 }
6921 static inline int low2highuid(int uid)
6922 {
6923     return uid;
6924 }
6925 static inline int low2highgid(int gid)
6926 {
6927     return gid;
6928 }
6929 static inline int tswapid(int id)
6930 {
6931     return tswap32(id);
6932 }
6933 
6934 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6935 
6936 #endif /* USE_UID16 */
6937 
6938 /* We must do direct syscalls for setting UID/GID, because we want to
6939  * implement the Linux system call semantics of "change only for this thread",
6940  * not the libc/POSIX semantics of "change for all threads in process".
6941  * (See http://ewontfix.com/17/ for more details.)
6942  * We use the 32-bit version of the syscalls if present; if it is not
6943  * then either the host architecture supports 32-bit UIDs natively with
6944  * the standard syscall, or the 16-bit UID is the best we can do.
6945  */
6946 #ifdef __NR_setuid32
6947 #define __NR_sys_setuid __NR_setuid32
6948 #else
6949 #define __NR_sys_setuid __NR_setuid
6950 #endif
6951 #ifdef __NR_setgid32
6952 #define __NR_sys_setgid __NR_setgid32
6953 #else
6954 #define __NR_sys_setgid __NR_setgid
6955 #endif
6956 #ifdef __NR_setresuid32
6957 #define __NR_sys_setresuid __NR_setresuid32
6958 #else
6959 #define __NR_sys_setresuid __NR_setresuid
6960 #endif
6961 #ifdef __NR_setresgid32
6962 #define __NR_sys_setresgid __NR_setresgid32
6963 #else
6964 #define __NR_sys_setresgid __NR_setresgid
6965 #endif
6966 
6967 _syscall1(int, sys_setuid, uid_t, uid)
6968 _syscall1(int, sys_setgid, gid_t, gid)
6969 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6970 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6971 
6972 void syscall_init(void)
6973 {
6974     IOCTLEntry *ie;
6975     const argtype *arg_type;
6976     int size;
6977 
6978     thunk_init(STRUCT_MAX);
6979 
6980 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6981 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6982 #include "syscall_types.h"
6983 #undef STRUCT
6984 #undef STRUCT_SPECIAL
6985 
6986     /* we patch the ioctl size if necessary. We rely on the fact that
6987        no ioctl has all the bits at '1' in the size field */
6988     ie = ioctl_entries;
6989     while (ie->target_cmd != 0) {
6990         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6991             TARGET_IOC_SIZEMASK) {
6992             arg_type = ie->arg_type;
6993             if (arg_type[0] != TYPE_PTR) {
6994                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6995                         ie->target_cmd);
6996                 exit(1);
6997             }
6998             arg_type++;
6999             size = thunk_type_size(arg_type, 0);
7000             ie->target_cmd = (ie->target_cmd &
7001                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7002                 (size << TARGET_IOC_SIZESHIFT);
7003         }
7004 
7005         /* automatic consistency check if same arch */
7006 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7007     (defined(__x86_64__) && defined(TARGET_X86_64))
7008         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7009             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7010                     ie->name, ie->target_cmd, ie->host_cmd);
7011         }
7012 #endif
7013         ie++;
7014     }
7015 }
7016 
7017 #ifdef TARGET_NR_truncate64
7018 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7019                                          abi_long arg2,
7020                                          abi_long arg3,
7021                                          abi_long arg4)
7022 {
7023     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7024         arg2 = arg3;
7025         arg3 = arg4;
7026     }
7027     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7028 }
7029 #endif
7030 
7031 #ifdef TARGET_NR_ftruncate64
7032 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7033                                           abi_long arg2,
7034                                           abi_long arg3,
7035                                           abi_long arg4)
7036 {
7037     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7038         arg2 = arg3;
7039         arg3 = arg4;
7040     }
7041     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7042 }
7043 #endif
7044 
7045 #if defined(TARGET_NR_timer_settime) || \
7046     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7047 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7048                                                  abi_ulong target_addr)
7049 {
7050     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7051                                 offsetof(struct target_itimerspec,
7052                                          it_interval)) ||
7053         target_to_host_timespec(&host_its->it_value, target_addr +
7054                                 offsetof(struct target_itimerspec,
7055                                          it_value))) {
7056         return -TARGET_EFAULT;
7057     }
7058 
7059     return 0;
7060 }
7061 #endif
7062 
7063 #if defined(TARGET_NR_timer_settime64) || \
7064     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7065 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7066                                                    abi_ulong target_addr)
7067 {
7068     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7069                                   offsetof(struct target__kernel_itimerspec,
7070                                            it_interval)) ||
7071         target_to_host_timespec64(&host_its->it_value, target_addr +
7072                                   offsetof(struct target__kernel_itimerspec,
7073                                            it_value))) {
7074         return -TARGET_EFAULT;
7075     }
7076 
7077     return 0;
7078 }
7079 #endif
7080 
7081 #if ((defined(TARGET_NR_timerfd_gettime) || \
7082       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7083       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7084 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7085                                                  struct itimerspec *host_its)
7086 {
7087     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7088                                                        it_interval),
7089                                 &host_its->it_interval) ||
7090         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7091                                                        it_value),
7092                                 &host_its->it_value)) {
7093         return -TARGET_EFAULT;
7094     }
7095     return 0;
7096 }
7097 #endif
7098 
7099 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7100       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7101       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7102 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7103                                                    struct itimerspec *host_its)
7104 {
7105     if (host_to_target_timespec64(target_addr +
7106                                   offsetof(struct target__kernel_itimerspec,
7107                                            it_interval),
7108                                   &host_its->it_interval) ||
7109         host_to_target_timespec64(target_addr +
7110                                   offsetof(struct target__kernel_itimerspec,
7111                                            it_value),
7112                                   &host_its->it_value)) {
7113         return -TARGET_EFAULT;
7114     }
7115     return 0;
7116 }
7117 #endif
7118 
7119 #if defined(TARGET_NR_adjtimex) || \
7120     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7121 static inline abi_long target_to_host_timex(struct timex *host_tx,
7122                                             abi_long target_addr)
7123 {
7124     struct target_timex *target_tx;
7125 
7126     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7127         return -TARGET_EFAULT;
7128     }
7129 
7130     __get_user(host_tx->modes, &target_tx->modes);
7131     __get_user(host_tx->offset, &target_tx->offset);
7132     __get_user(host_tx->freq, &target_tx->freq);
7133     __get_user(host_tx->maxerror, &target_tx->maxerror);
7134     __get_user(host_tx->esterror, &target_tx->esterror);
7135     __get_user(host_tx->status, &target_tx->status);
7136     __get_user(host_tx->constant, &target_tx->constant);
7137     __get_user(host_tx->precision, &target_tx->precision);
7138     __get_user(host_tx->tolerance, &target_tx->tolerance);
7139     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7140     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7141     __get_user(host_tx->tick, &target_tx->tick);
7142     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7143     __get_user(host_tx->jitter, &target_tx->jitter);
7144     __get_user(host_tx->shift, &target_tx->shift);
7145     __get_user(host_tx->stabil, &target_tx->stabil);
7146     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7147     __get_user(host_tx->calcnt, &target_tx->calcnt);
7148     __get_user(host_tx->errcnt, &target_tx->errcnt);
7149     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7150     __get_user(host_tx->tai, &target_tx->tai);
7151 
7152     unlock_user_struct(target_tx, target_addr, 0);
7153     return 0;
7154 }
7155 
7156 static inline abi_long host_to_target_timex(abi_long target_addr,
7157                                             struct timex *host_tx)
7158 {
7159     struct target_timex *target_tx;
7160 
7161     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7162         return -TARGET_EFAULT;
7163     }
7164 
7165     __put_user(host_tx->modes, &target_tx->modes);
7166     __put_user(host_tx->offset, &target_tx->offset);
7167     __put_user(host_tx->freq, &target_tx->freq);
7168     __put_user(host_tx->maxerror, &target_tx->maxerror);
7169     __put_user(host_tx->esterror, &target_tx->esterror);
7170     __put_user(host_tx->status, &target_tx->status);
7171     __put_user(host_tx->constant, &target_tx->constant);
7172     __put_user(host_tx->precision, &target_tx->precision);
7173     __put_user(host_tx->tolerance, &target_tx->tolerance);
7174     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7175     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7176     __put_user(host_tx->tick, &target_tx->tick);
7177     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7178     __put_user(host_tx->jitter, &target_tx->jitter);
7179     __put_user(host_tx->shift, &target_tx->shift);
7180     __put_user(host_tx->stabil, &target_tx->stabil);
7181     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7182     __put_user(host_tx->calcnt, &target_tx->calcnt);
7183     __put_user(host_tx->errcnt, &target_tx->errcnt);
7184     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7185     __put_user(host_tx->tai, &target_tx->tai);
7186 
7187     unlock_user_struct(target_tx, target_addr, 1);
7188     return 0;
7189 }
7190 #endif
7191 
7192 
7193 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7194 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7195                                               abi_long target_addr)
7196 {
7197     struct target__kernel_timex *target_tx;
7198 
7199     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7200                                  offsetof(struct target__kernel_timex,
7201                                           time))) {
7202         return -TARGET_EFAULT;
7203     }
7204 
7205     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7206         return -TARGET_EFAULT;
7207     }
7208 
7209     __get_user(host_tx->modes, &target_tx->modes);
7210     __get_user(host_tx->offset, &target_tx->offset);
7211     __get_user(host_tx->freq, &target_tx->freq);
7212     __get_user(host_tx->maxerror, &target_tx->maxerror);
7213     __get_user(host_tx->esterror, &target_tx->esterror);
7214     __get_user(host_tx->status, &target_tx->status);
7215     __get_user(host_tx->constant, &target_tx->constant);
7216     __get_user(host_tx->precision, &target_tx->precision);
7217     __get_user(host_tx->tolerance, &target_tx->tolerance);
7218     __get_user(host_tx->tick, &target_tx->tick);
7219     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7220     __get_user(host_tx->jitter, &target_tx->jitter);
7221     __get_user(host_tx->shift, &target_tx->shift);
7222     __get_user(host_tx->stabil, &target_tx->stabil);
7223     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7224     __get_user(host_tx->calcnt, &target_tx->calcnt);
7225     __get_user(host_tx->errcnt, &target_tx->errcnt);
7226     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7227     __get_user(host_tx->tai, &target_tx->tai);
7228 
7229     unlock_user_struct(target_tx, target_addr, 0);
7230     return 0;
7231 }
7232 
7233 static inline abi_long host_to_target_timex64(abi_long target_addr,
7234                                               struct timex *host_tx)
7235 {
7236     struct target__kernel_timex *target_tx;
7237 
7238    if (copy_to_user_timeval64(target_addr +
7239                               offsetof(struct target__kernel_timex, time),
7240                               &host_tx->time)) {
7241         return -TARGET_EFAULT;
7242     }
7243 
7244     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7245         return -TARGET_EFAULT;
7246     }
7247 
7248     __put_user(host_tx->modes, &target_tx->modes);
7249     __put_user(host_tx->offset, &target_tx->offset);
7250     __put_user(host_tx->freq, &target_tx->freq);
7251     __put_user(host_tx->maxerror, &target_tx->maxerror);
7252     __put_user(host_tx->esterror, &target_tx->esterror);
7253     __put_user(host_tx->status, &target_tx->status);
7254     __put_user(host_tx->constant, &target_tx->constant);
7255     __put_user(host_tx->precision, &target_tx->precision);
7256     __put_user(host_tx->tolerance, &target_tx->tolerance);
7257     __put_user(host_tx->tick, &target_tx->tick);
7258     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7259     __put_user(host_tx->jitter, &target_tx->jitter);
7260     __put_user(host_tx->shift, &target_tx->shift);
7261     __put_user(host_tx->stabil, &target_tx->stabil);
7262     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7263     __put_user(host_tx->calcnt, &target_tx->calcnt);
7264     __put_user(host_tx->errcnt, &target_tx->errcnt);
7265     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7266     __put_user(host_tx->tai, &target_tx->tai);
7267 
7268     unlock_user_struct(target_tx, target_addr, 1);
7269     return 0;
7270 }
7271 #endif
7272 
7273 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7274 #define sigev_notify_thread_id _sigev_un._tid
7275 #endif
7276 
7277 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7278                                                abi_ulong target_addr)
7279 {
7280     struct target_sigevent *target_sevp;
7281 
7282     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7283         return -TARGET_EFAULT;
7284     }
7285 
7286     /* This union is awkward on 64 bit systems because it has a 32 bit
7287      * integer and a pointer in it; we follow the conversion approach
7288      * used for handling sigval types in signal.c so the guest should get
7289      * the correct value back even if we did a 64 bit byteswap and it's
7290      * using the 32 bit integer.
7291      */
7292     host_sevp->sigev_value.sival_ptr =
7293         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7294     host_sevp->sigev_signo =
7295         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7296     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7297     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7298 
7299     unlock_user_struct(target_sevp, target_addr, 1);
7300     return 0;
7301 }
7302 
7303 #if defined(TARGET_NR_mlockall)
7304 static inline int target_to_host_mlockall_arg(int arg)
7305 {
7306     int result = 0;
7307 
7308     if (arg & TARGET_MCL_CURRENT) {
7309         result |= MCL_CURRENT;
7310     }
7311     if (arg & TARGET_MCL_FUTURE) {
7312         result |= MCL_FUTURE;
7313     }
7314 #ifdef MCL_ONFAULT
7315     if (arg & TARGET_MCL_ONFAULT) {
7316         result |= MCL_ONFAULT;
7317     }
7318 #endif
7319 
7320     return result;
7321 }
7322 #endif
7323 
7324 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7325      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7326      defined(TARGET_NR_newfstatat))
7327 static inline abi_long host_to_target_stat64(void *cpu_env,
7328                                              abi_ulong target_addr,
7329                                              struct stat *host_st)
7330 {
7331 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7332     if (((CPUARMState *)cpu_env)->eabi) {
7333         struct target_eabi_stat64 *target_st;
7334 
7335         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7336             return -TARGET_EFAULT;
7337         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7338         __put_user(host_st->st_dev, &target_st->st_dev);
7339         __put_user(host_st->st_ino, &target_st->st_ino);
7340 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7341         __put_user(host_st->st_ino, &target_st->__st_ino);
7342 #endif
7343         __put_user(host_st->st_mode, &target_st->st_mode);
7344         __put_user(host_st->st_nlink, &target_st->st_nlink);
7345         __put_user(host_st->st_uid, &target_st->st_uid);
7346         __put_user(host_st->st_gid, &target_st->st_gid);
7347         __put_user(host_st->st_rdev, &target_st->st_rdev);
7348         __put_user(host_st->st_size, &target_st->st_size);
7349         __put_user(host_st->st_blksize, &target_st->st_blksize);
7350         __put_user(host_st->st_blocks, &target_st->st_blocks);
7351         __put_user(host_st->st_atime, &target_st->target_st_atime);
7352         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7353         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7354 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7355         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7356         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7357         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7358 #endif
7359         unlock_user_struct(target_st, target_addr, 1);
7360     } else
7361 #endif
7362     {
7363 #if defined(TARGET_HAS_STRUCT_STAT64)
7364         struct target_stat64 *target_st;
7365 #else
7366         struct target_stat *target_st;
7367 #endif
7368 
7369         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7370             return -TARGET_EFAULT;
7371         memset(target_st, 0, sizeof(*target_st));
7372         __put_user(host_st->st_dev, &target_st->st_dev);
7373         __put_user(host_st->st_ino, &target_st->st_ino);
7374 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7375         __put_user(host_st->st_ino, &target_st->__st_ino);
7376 #endif
7377         __put_user(host_st->st_mode, &target_st->st_mode);
7378         __put_user(host_st->st_nlink, &target_st->st_nlink);
7379         __put_user(host_st->st_uid, &target_st->st_uid);
7380         __put_user(host_st->st_gid, &target_st->st_gid);
7381         __put_user(host_st->st_rdev, &target_st->st_rdev);
7382         /* XXX: better use of kernel struct */
7383         __put_user(host_st->st_size, &target_st->st_size);
7384         __put_user(host_st->st_blksize, &target_st->st_blksize);
7385         __put_user(host_st->st_blocks, &target_st->st_blocks);
7386         __put_user(host_st->st_atime, &target_st->target_st_atime);
7387         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7388         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7389 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7390         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7391         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7392         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7393 #endif
7394         unlock_user_struct(target_st, target_addr, 1);
7395     }
7396 
7397     return 0;
7398 }
7399 #endif
7400 
7401 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7402 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7403                                             abi_ulong target_addr)
7404 {
7405     struct target_statx *target_stx;
7406 
7407     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7408         return -TARGET_EFAULT;
7409     }
7410     memset(target_stx, 0, sizeof(*target_stx));
7411 
7412     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7413     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7414     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7415     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7416     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7417     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7418     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7419     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7420     __put_user(host_stx->stx_size, &target_stx->stx_size);
7421     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7422     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7423     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7424     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7425     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7426     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7427     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7428     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7429     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7430     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7431     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7432     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7433     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7434     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7435 
7436     unlock_user_struct(target_stx, target_addr, 1);
7437 
7438     return 0;
7439 }
7440 #endif
7441 
7442 static int do_sys_futex(int *uaddr, int op, int val,
7443                          const struct timespec *timeout, int *uaddr2,
7444                          int val3)
7445 {
7446 #if HOST_LONG_BITS == 64
7447 #if defined(__NR_futex)
7448     /* always a 64-bit time_t, it doesn't define _time64 version  */
7449     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7450 
7451 #endif
7452 #else /* HOST_LONG_BITS == 64 */
7453 #if defined(__NR_futex_time64)
7454     if (sizeof(timeout->tv_sec) == 8) {
7455         /* _time64 function on 32bit arch */
7456         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7457     }
7458 #endif
7459 #if defined(__NR_futex)
7460     /* old function on 32bit arch */
7461     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7462 #endif
7463 #endif /* HOST_LONG_BITS == 64 */
7464     g_assert_not_reached();
7465 }
7466 
7467 static int do_safe_futex(int *uaddr, int op, int val,
7468                          const struct timespec *timeout, int *uaddr2,
7469                          int val3)
7470 {
7471 #if HOST_LONG_BITS == 64
7472 #if defined(__NR_futex)
7473     /* always a 64-bit time_t, it doesn't define _time64 version  */
7474     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7475 #endif
7476 #else /* HOST_LONG_BITS == 64 */
7477 #if defined(__NR_futex_time64)
7478     if (sizeof(timeout->tv_sec) == 8) {
7479         /* _time64 function on 32bit arch */
7480         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7481                                            val3));
7482     }
7483 #endif
7484 #if defined(__NR_futex)
7485     /* old function on 32bit arch */
7486     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7487 #endif
7488 #endif /* HOST_LONG_BITS == 64 */
7489     return -TARGET_ENOSYS;
7490 }
7491 
7492 /* ??? Using host futex calls even when target atomic operations
7493    are not really atomic probably breaks things.  However implementing
7494    futexes locally would make futexes shared between multiple processes
7495    tricky.  However they're probably useless because guest atomic
7496    operations won't work either.  */
7497 #if defined(TARGET_NR_futex)
7498 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7499                     target_ulong timeout, target_ulong uaddr2, int val3)
7500 {
7501     struct timespec ts, *pts;
7502     int base_op;
7503 
7504     /* ??? We assume FUTEX_* constants are the same on both host
7505        and target.  */
7506 #ifdef FUTEX_CMD_MASK
7507     base_op = op & FUTEX_CMD_MASK;
7508 #else
7509     base_op = op;
7510 #endif
7511     switch (base_op) {
7512     case FUTEX_WAIT:
7513     case FUTEX_WAIT_BITSET:
7514         if (timeout) {
7515             pts = &ts;
7516             target_to_host_timespec(pts, timeout);
7517         } else {
7518             pts = NULL;
7519         }
7520         return do_safe_futex(g2h(cpu, uaddr),
7521                              op, tswap32(val), pts, NULL, val3);
7522     case FUTEX_WAKE:
7523         return do_safe_futex(g2h(cpu, uaddr),
7524                              op, val, NULL, NULL, 0);
7525     case FUTEX_FD:
7526         return do_safe_futex(g2h(cpu, uaddr),
7527                              op, val, NULL, NULL, 0);
7528     case FUTEX_REQUEUE:
7529     case FUTEX_CMP_REQUEUE:
7530     case FUTEX_WAKE_OP:
7531         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7532            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7533            But the prototype takes a `struct timespec *'; insert casts
7534            to satisfy the compiler.  We do not need to tswap TIMEOUT
7535            since it's not compared to guest memory.  */
7536         pts = (struct timespec *)(uintptr_t) timeout;
7537         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7538                              (base_op == FUTEX_CMP_REQUEUE
7539                               ? tswap32(val3) : val3));
7540     default:
7541         return -TARGET_ENOSYS;
7542     }
7543 }
7544 #endif
7545 
7546 #if defined(TARGET_NR_futex_time64)
7547 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7548                            int val, target_ulong timeout,
7549                            target_ulong uaddr2, int val3)
7550 {
7551     struct timespec ts, *pts;
7552     int base_op;
7553 
7554     /* ??? We assume FUTEX_* constants are the same on both host
7555        and target.  */
7556 #ifdef FUTEX_CMD_MASK
7557     base_op = op & FUTEX_CMD_MASK;
7558 #else
7559     base_op = op;
7560 #endif
7561     switch (base_op) {
7562     case FUTEX_WAIT:
7563     case FUTEX_WAIT_BITSET:
7564         if (timeout) {
7565             pts = &ts;
7566             if (target_to_host_timespec64(pts, timeout)) {
7567                 return -TARGET_EFAULT;
7568             }
7569         } else {
7570             pts = NULL;
7571         }
7572         return do_safe_futex(g2h(cpu, uaddr), op,
7573                              tswap32(val), pts, NULL, val3);
7574     case FUTEX_WAKE:
7575         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7576     case FUTEX_FD:
7577         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7578     case FUTEX_REQUEUE:
7579     case FUTEX_CMP_REQUEUE:
7580     case FUTEX_WAKE_OP:
7581         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7582            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7583            But the prototype takes a `struct timespec *'; insert casts
7584            to satisfy the compiler.  We do not need to tswap TIMEOUT
7585            since it's not compared to guest memory.  */
7586         pts = (struct timespec *)(uintptr_t) timeout;
7587         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7588                              (base_op == FUTEX_CMP_REQUEUE
7589                               ? tswap32(val3) : val3));
7590     default:
7591         return -TARGET_ENOSYS;
7592     }
7593 }
7594 #endif
7595 
7596 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7597 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7598                                      abi_long handle, abi_long mount_id,
7599                                      abi_long flags)
7600 {
7601     struct file_handle *target_fh;
7602     struct file_handle *fh;
7603     int mid = 0;
7604     abi_long ret;
7605     char *name;
7606     unsigned int size, total_size;
7607 
7608     if (get_user_s32(size, handle)) {
7609         return -TARGET_EFAULT;
7610     }
7611 
7612     name = lock_user_string(pathname);
7613     if (!name) {
7614         return -TARGET_EFAULT;
7615     }
7616 
7617     total_size = sizeof(struct file_handle) + size;
7618     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7619     if (!target_fh) {
7620         unlock_user(name, pathname, 0);
7621         return -TARGET_EFAULT;
7622     }
7623 
7624     fh = g_malloc0(total_size);
7625     fh->handle_bytes = size;
7626 
7627     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7628     unlock_user(name, pathname, 0);
7629 
7630     /* man name_to_handle_at(2):
7631      * Other than the use of the handle_bytes field, the caller should treat
7632      * the file_handle structure as an opaque data type
7633      */
7634 
7635     memcpy(target_fh, fh, total_size);
7636     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7637     target_fh->handle_type = tswap32(fh->handle_type);
7638     g_free(fh);
7639     unlock_user(target_fh, handle, total_size);
7640 
7641     if (put_user_s32(mid, mount_id)) {
7642         return -TARGET_EFAULT;
7643     }
7644 
7645     return ret;
7646 
7647 }
7648 #endif
7649 
7650 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7651 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7652                                      abi_long flags)
7653 {
7654     struct file_handle *target_fh;
7655     struct file_handle *fh;
7656     unsigned int size, total_size;
7657     abi_long ret;
7658 
7659     if (get_user_s32(size, handle)) {
7660         return -TARGET_EFAULT;
7661     }
7662 
7663     total_size = sizeof(struct file_handle) + size;
7664     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7665     if (!target_fh) {
7666         return -TARGET_EFAULT;
7667     }
7668 
7669     fh = g_memdup(target_fh, total_size);
7670     fh->handle_bytes = size;
7671     fh->handle_type = tswap32(target_fh->handle_type);
7672 
7673     ret = get_errno(open_by_handle_at(mount_fd, fh,
7674                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7675 
7676     g_free(fh);
7677 
7678     unlock_user(target_fh, handle, total_size);
7679 
7680     return ret;
7681 }
7682 #endif
7683 
7684 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7685 
7686 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7687 {
7688     int host_flags;
7689     target_sigset_t *target_mask;
7690     sigset_t host_mask;
7691     abi_long ret;
7692 
7693     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7694         return -TARGET_EINVAL;
7695     }
7696     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7697         return -TARGET_EFAULT;
7698     }
7699 
7700     target_to_host_sigset(&host_mask, target_mask);
7701 
7702     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7703 
7704     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7705     if (ret >= 0) {
7706         fd_trans_register(ret, &target_signalfd_trans);
7707     }
7708 
7709     unlock_user_struct(target_mask, mask, 0);
7710 
7711     return ret;
7712 }
7713 #endif
7714 
7715 /* Map host to target signal numbers for the wait family of syscalls.
7716    Assume all other status bits are the same.  */
7717 int host_to_target_waitstatus(int status)
7718 {
7719     if (WIFSIGNALED(status)) {
7720         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7721     }
7722     if (WIFSTOPPED(status)) {
7723         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7724                | (status & 0xff);
7725     }
7726     return status;
7727 }
7728 
7729 static int open_self_cmdline(void *cpu_env, int fd)
7730 {
7731     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7732     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7733     int i;
7734 
7735     for (i = 0; i < bprm->argc; i++) {
7736         size_t len = strlen(bprm->argv[i]) + 1;
7737 
7738         if (write(fd, bprm->argv[i], len) != len) {
7739             return -1;
7740         }
7741     }
7742 
7743     return 0;
7744 }
7745 
7746 static int open_self_maps(void *cpu_env, int fd)
7747 {
7748     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7749     TaskState *ts = cpu->opaque;
7750     GSList *map_info = read_self_maps();
7751     GSList *s;
7752     int count;
7753 
7754     for (s = map_info; s; s = g_slist_next(s)) {
7755         MapInfo *e = (MapInfo *) s->data;
7756 
7757         if (h2g_valid(e->start)) {
7758             unsigned long min = e->start;
7759             unsigned long max = e->end;
7760             int flags = page_get_flags(h2g(min));
7761             const char *path;
7762 
7763             max = h2g_valid(max - 1) ?
7764                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7765 
7766             if (page_check_range(h2g(min), max - min, flags) == -1) {
7767                 continue;
7768             }
7769 
7770             if (h2g(min) == ts->info->stack_limit) {
7771                 path = "[stack]";
7772             } else {
7773                 path = e->path;
7774             }
7775 
7776             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7777                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7778                             h2g(min), h2g(max - 1) + 1,
7779                             (flags & PAGE_READ) ? 'r' : '-',
7780                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7781                             (flags & PAGE_EXEC) ? 'x' : '-',
7782                             e->is_priv ? 'p' : '-',
7783                             (uint64_t) e->offset, e->dev, e->inode);
7784             if (path) {
7785                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7786             } else {
7787                 dprintf(fd, "\n");
7788             }
7789         }
7790     }
7791 
7792     free_self_maps(map_info);
7793 
7794 #ifdef TARGET_VSYSCALL_PAGE
7795     /*
7796      * We only support execution from the vsyscall page.
7797      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7798      */
7799     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7800                     " --xp 00000000 00:00 0",
7801                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7802     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7803 #endif
7804 
7805     return 0;
7806 }
7807 
7808 static int open_self_stat(void *cpu_env, int fd)
7809 {
7810     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7811     TaskState *ts = cpu->opaque;
7812     g_autoptr(GString) buf = g_string_new(NULL);
7813     int i;
7814 
7815     for (i = 0; i < 44; i++) {
7816         if (i == 0) {
7817             /* pid */
7818             g_string_printf(buf, FMT_pid " ", getpid());
7819         } else if (i == 1) {
7820             /* app name */
7821             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7822             bin = bin ? bin + 1 : ts->bprm->argv[0];
7823             g_string_printf(buf, "(%.15s) ", bin);
7824         } else if (i == 3) {
7825             /* ppid */
7826             g_string_printf(buf, FMT_pid " ", getppid());
7827         } else if (i == 27) {
7828             /* stack bottom */
7829             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7830         } else {
7831             /* for the rest, there is MasterCard */
7832             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7833         }
7834 
7835         if (write(fd, buf->str, buf->len) != buf->len) {
7836             return -1;
7837         }
7838     }
7839 
7840     return 0;
7841 }
7842 
7843 static int open_self_auxv(void *cpu_env, int fd)
7844 {
7845     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7846     TaskState *ts = cpu->opaque;
7847     abi_ulong auxv = ts->info->saved_auxv;
7848     abi_ulong len = ts->info->auxv_len;
7849     char *ptr;
7850 
7851     /*
7852      * Auxiliary vector is stored in target process stack.
7853      * read in whole auxv vector and copy it to file
7854      */
7855     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7856     if (ptr != NULL) {
7857         while (len > 0) {
7858             ssize_t r;
7859             r = write(fd, ptr, len);
7860             if (r <= 0) {
7861                 break;
7862             }
7863             len -= r;
7864             ptr += r;
7865         }
7866         lseek(fd, 0, SEEK_SET);
7867         unlock_user(ptr, auxv, len);
7868     }
7869 
7870     return 0;
7871 }
7872 
7873 static int is_proc_myself(const char *filename, const char *entry)
7874 {
7875     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7876         filename += strlen("/proc/");
7877         if (!strncmp(filename, "self/", strlen("self/"))) {
7878             filename += strlen("self/");
7879         } else if (*filename >= '1' && *filename <= '9') {
7880             char myself[80];
7881             snprintf(myself, sizeof(myself), "%d/", getpid());
7882             if (!strncmp(filename, myself, strlen(myself))) {
7883                 filename += strlen(myself);
7884             } else {
7885                 return 0;
7886             }
7887         } else {
7888             return 0;
7889         }
7890         if (!strcmp(filename, entry)) {
7891             return 1;
7892         }
7893     }
7894     return 0;
7895 }
7896 
7897 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7898     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7899 static int is_proc(const char *filename, const char *entry)
7900 {
7901     return strcmp(filename, entry) == 0;
7902 }
7903 #endif
7904 
7905 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7906 static int open_net_route(void *cpu_env, int fd)
7907 {
7908     FILE *fp;
7909     char *line = NULL;
7910     size_t len = 0;
7911     ssize_t read;
7912 
7913     fp = fopen("/proc/net/route", "r");
7914     if (fp == NULL) {
7915         return -1;
7916     }
7917 
7918     /* read header */
7919 
7920     read = getline(&line, &len, fp);
7921     dprintf(fd, "%s", line);
7922 
7923     /* read routes */
7924 
7925     while ((read = getline(&line, &len, fp)) != -1) {
7926         char iface[16];
7927         uint32_t dest, gw, mask;
7928         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7929         int fields;
7930 
7931         fields = sscanf(line,
7932                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7933                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7934                         &mask, &mtu, &window, &irtt);
7935         if (fields != 11) {
7936             continue;
7937         }
7938         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7939                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7940                 metric, tswap32(mask), mtu, window, irtt);
7941     }
7942 
7943     free(line);
7944     fclose(fp);
7945 
7946     return 0;
7947 }
7948 #endif
7949 
7950 #if defined(TARGET_SPARC)
7951 static int open_cpuinfo(void *cpu_env, int fd)
7952 {
7953     dprintf(fd, "type\t\t: sun4u\n");
7954     return 0;
7955 }
7956 #endif
7957 
7958 #if defined(TARGET_HPPA)
7959 static int open_cpuinfo(void *cpu_env, int fd)
7960 {
7961     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7962     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7963     dprintf(fd, "capabilities\t: os32\n");
7964     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7965     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7966     return 0;
7967 }
7968 #endif
7969 
7970 #if defined(TARGET_M68K)
7971 static int open_hardware(void *cpu_env, int fd)
7972 {
7973     dprintf(fd, "Model:\t\tqemu-m68k\n");
7974     return 0;
7975 }
7976 #endif
7977 
7978 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7979 {
7980     struct fake_open {
7981         const char *filename;
7982         int (*fill)(void *cpu_env, int fd);
7983         int (*cmp)(const char *s1, const char *s2);
7984     };
7985     const struct fake_open *fake_open;
7986     static const struct fake_open fakes[] = {
7987         { "maps", open_self_maps, is_proc_myself },
7988         { "stat", open_self_stat, is_proc_myself },
7989         { "auxv", open_self_auxv, is_proc_myself },
7990         { "cmdline", open_self_cmdline, is_proc_myself },
7991 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7992         { "/proc/net/route", open_net_route, is_proc },
7993 #endif
7994 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7995         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7996 #endif
7997 #if defined(TARGET_M68K)
7998         { "/proc/hardware", open_hardware, is_proc },
7999 #endif
8000         { NULL, NULL, NULL }
8001     };
8002 
8003     if (is_proc_myself(pathname, "exe")) {
8004         int execfd = qemu_getauxval(AT_EXECFD);
8005         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8006     }
8007 
8008     for (fake_open = fakes; fake_open->filename; fake_open++) {
8009         if (fake_open->cmp(pathname, fake_open->filename)) {
8010             break;
8011         }
8012     }
8013 
8014     if (fake_open->filename) {
8015         const char *tmpdir;
8016         char filename[PATH_MAX];
8017         int fd, r;
8018 
8019         /* create temporary file to map stat to */
8020         tmpdir = getenv("TMPDIR");
8021         if (!tmpdir)
8022             tmpdir = "/tmp";
8023         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8024         fd = mkstemp(filename);
8025         if (fd < 0) {
8026             return fd;
8027         }
8028         unlink(filename);
8029 
8030         if ((r = fake_open->fill(cpu_env, fd))) {
8031             int e = errno;
8032             close(fd);
8033             errno = e;
8034             return r;
8035         }
8036         lseek(fd, 0, SEEK_SET);
8037 
8038         return fd;
8039     }
8040 
8041     return safe_openat(dirfd, path(pathname), flags, mode);
8042 }
8043 
8044 #define TIMER_MAGIC 0x0caf0000
8045 #define TIMER_MAGIC_MASK 0xffff0000
8046 
8047 /* Convert QEMU provided timer ID back to internal 16bit index format */
8048 static target_timer_t get_timer_id(abi_long arg)
8049 {
8050     target_timer_t timerid = arg;
8051 
8052     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8053         return -TARGET_EINVAL;
8054     }
8055 
8056     timerid &= 0xffff;
8057 
8058     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8059         return -TARGET_EINVAL;
8060     }
8061 
8062     return timerid;
8063 }
8064 
8065 static int target_to_host_cpu_mask(unsigned long *host_mask,
8066                                    size_t host_size,
8067                                    abi_ulong target_addr,
8068                                    size_t target_size)
8069 {
8070     unsigned target_bits = sizeof(abi_ulong) * 8;
8071     unsigned host_bits = sizeof(*host_mask) * 8;
8072     abi_ulong *target_mask;
8073     unsigned i, j;
8074 
8075     assert(host_size >= target_size);
8076 
8077     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8078     if (!target_mask) {
8079         return -TARGET_EFAULT;
8080     }
8081     memset(host_mask, 0, host_size);
8082 
8083     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8084         unsigned bit = i * target_bits;
8085         abi_ulong val;
8086 
8087         __get_user(val, &target_mask[i]);
8088         for (j = 0; j < target_bits; j++, bit++) {
8089             if (val & (1UL << j)) {
8090                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8091             }
8092         }
8093     }
8094 
8095     unlock_user(target_mask, target_addr, 0);
8096     return 0;
8097 }
8098 
8099 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8100                                    size_t host_size,
8101                                    abi_ulong target_addr,
8102                                    size_t target_size)
8103 {
8104     unsigned target_bits = sizeof(abi_ulong) * 8;
8105     unsigned host_bits = sizeof(*host_mask) * 8;
8106     abi_ulong *target_mask;
8107     unsigned i, j;
8108 
8109     assert(host_size >= target_size);
8110 
8111     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8112     if (!target_mask) {
8113         return -TARGET_EFAULT;
8114     }
8115 
8116     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8117         unsigned bit = i * target_bits;
8118         abi_ulong val = 0;
8119 
8120         for (j = 0; j < target_bits; j++, bit++) {
8121             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8122                 val |= 1UL << j;
8123             }
8124         }
8125         __put_user(val, &target_mask[i]);
8126     }
8127 
8128     unlock_user(target_mask, target_addr, target_size);
8129     return 0;
8130 }
8131 
8132 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8133 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8134 #endif
8135 
8136 /* This is an internal helper for do_syscall so that it is easier
8137  * to have a single return point, so that actions, such as logging
8138  * of syscall results, can be performed.
8139  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8140  */
8141 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8142                             abi_long arg2, abi_long arg3, abi_long arg4,
8143                             abi_long arg5, abi_long arg6, abi_long arg7,
8144                             abi_long arg8)
8145 {
8146     CPUState *cpu = env_cpu(cpu_env);
8147     abi_long ret;
8148 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8149     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8150     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8151     || defined(TARGET_NR_statx)
8152     struct stat st;
8153 #endif
8154 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8155     || defined(TARGET_NR_fstatfs)
8156     struct statfs stfs;
8157 #endif
8158     void *p;
8159 
8160     switch(num) {
8161     case TARGET_NR_exit:
8162         /* In old applications this may be used to implement _exit(2).
8163            However in threaded applications it is used for thread termination,
8164            and _exit_group is used for application termination.
8165            Do thread termination if we have more then one thread.  */
8166 
8167         if (block_signals()) {
8168             return -TARGET_ERESTARTSYS;
8169         }
8170 
8171         pthread_mutex_lock(&clone_lock);
8172 
8173         if (CPU_NEXT(first_cpu)) {
8174             TaskState *ts = cpu->opaque;
8175 
8176             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8177             object_unref(OBJECT(cpu));
8178             /*
8179              * At this point the CPU should be unrealized and removed
8180              * from cpu lists. We can clean-up the rest of the thread
8181              * data without the lock held.
8182              */
8183 
8184             pthread_mutex_unlock(&clone_lock);
8185 
8186             if (ts->child_tidptr) {
8187                 put_user_u32(0, ts->child_tidptr);
8188                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8189                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8190             }
8191             thread_cpu = NULL;
8192             g_free(ts);
8193             rcu_unregister_thread();
8194             pthread_exit(NULL);
8195         }
8196 
8197         pthread_mutex_unlock(&clone_lock);
8198         preexit_cleanup(cpu_env, arg1);
8199         _exit(arg1);
8200         return 0; /* avoid warning */
8201     case TARGET_NR_read:
8202         if (arg2 == 0 && arg3 == 0) {
8203             return get_errno(safe_read(arg1, 0, 0));
8204         } else {
8205             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8206                 return -TARGET_EFAULT;
8207             ret = get_errno(safe_read(arg1, p, arg3));
8208             if (ret >= 0 &&
8209                 fd_trans_host_to_target_data(arg1)) {
8210                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8211             }
8212             unlock_user(p, arg2, ret);
8213         }
8214         return ret;
8215     case TARGET_NR_write:
8216         if (arg2 == 0 && arg3 == 0) {
8217             return get_errno(safe_write(arg1, 0, 0));
8218         }
8219         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8220             return -TARGET_EFAULT;
8221         if (fd_trans_target_to_host_data(arg1)) {
8222             void *copy = g_malloc(arg3);
8223             memcpy(copy, p, arg3);
8224             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8225             if (ret >= 0) {
8226                 ret = get_errno(safe_write(arg1, copy, ret));
8227             }
8228             g_free(copy);
8229         } else {
8230             ret = get_errno(safe_write(arg1, p, arg3));
8231         }
8232         unlock_user(p, arg2, 0);
8233         return ret;
8234 
8235 #ifdef TARGET_NR_open
8236     case TARGET_NR_open:
8237         if (!(p = lock_user_string(arg1)))
8238             return -TARGET_EFAULT;
8239         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8240                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8241                                   arg3));
8242         fd_trans_unregister(ret);
8243         unlock_user(p, arg1, 0);
8244         return ret;
8245 #endif
8246     case TARGET_NR_openat:
8247         if (!(p = lock_user_string(arg2)))
8248             return -TARGET_EFAULT;
8249         ret = get_errno(do_openat(cpu_env, arg1, p,
8250                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8251                                   arg4));
8252         fd_trans_unregister(ret);
8253         unlock_user(p, arg2, 0);
8254         return ret;
8255 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8256     case TARGET_NR_name_to_handle_at:
8257         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8258         return ret;
8259 #endif
8260 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8261     case TARGET_NR_open_by_handle_at:
8262         ret = do_open_by_handle_at(arg1, arg2, arg3);
8263         fd_trans_unregister(ret);
8264         return ret;
8265 #endif
8266     case TARGET_NR_close:
8267         fd_trans_unregister(arg1);
8268         return get_errno(close(arg1));
8269 
8270     case TARGET_NR_brk:
8271         return do_brk(arg1);
8272 #ifdef TARGET_NR_fork
8273     case TARGET_NR_fork:
8274         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8275 #endif
8276 #ifdef TARGET_NR_waitpid
8277     case TARGET_NR_waitpid:
8278         {
8279             int status;
8280             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8281             if (!is_error(ret) && arg2 && ret
8282                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8283                 return -TARGET_EFAULT;
8284         }
8285         return ret;
8286 #endif
8287 #ifdef TARGET_NR_waitid
8288     case TARGET_NR_waitid:
8289         {
8290             siginfo_t info;
8291             info.si_pid = 0;
8292             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8293             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8294                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8295                     return -TARGET_EFAULT;
8296                 host_to_target_siginfo(p, &info);
8297                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8298             }
8299         }
8300         return ret;
8301 #endif
8302 #ifdef TARGET_NR_creat /* not on alpha */
8303     case TARGET_NR_creat:
8304         if (!(p = lock_user_string(arg1)))
8305             return -TARGET_EFAULT;
8306         ret = get_errno(creat(p, arg2));
8307         fd_trans_unregister(ret);
8308         unlock_user(p, arg1, 0);
8309         return ret;
8310 #endif
8311 #ifdef TARGET_NR_link
8312     case TARGET_NR_link:
8313         {
8314             void * p2;
8315             p = lock_user_string(arg1);
8316             p2 = lock_user_string(arg2);
8317             if (!p || !p2)
8318                 ret = -TARGET_EFAULT;
8319             else
8320                 ret = get_errno(link(p, p2));
8321             unlock_user(p2, arg2, 0);
8322             unlock_user(p, arg1, 0);
8323         }
8324         return ret;
8325 #endif
8326 #if defined(TARGET_NR_linkat)
8327     case TARGET_NR_linkat:
8328         {
8329             void * p2 = NULL;
8330             if (!arg2 || !arg4)
8331                 return -TARGET_EFAULT;
8332             p  = lock_user_string(arg2);
8333             p2 = lock_user_string(arg4);
8334             if (!p || !p2)
8335                 ret = -TARGET_EFAULT;
8336             else
8337                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8338             unlock_user(p, arg2, 0);
8339             unlock_user(p2, arg4, 0);
8340         }
8341         return ret;
8342 #endif
8343 #ifdef TARGET_NR_unlink
8344     case TARGET_NR_unlink:
8345         if (!(p = lock_user_string(arg1)))
8346             return -TARGET_EFAULT;
8347         ret = get_errno(unlink(p));
8348         unlock_user(p, arg1, 0);
8349         return ret;
8350 #endif
8351 #if defined(TARGET_NR_unlinkat)
8352     case TARGET_NR_unlinkat:
8353         if (!(p = lock_user_string(arg2)))
8354             return -TARGET_EFAULT;
8355         ret = get_errno(unlinkat(arg1, p, arg3));
8356         unlock_user(p, arg2, 0);
8357         return ret;
8358 #endif
8359     case TARGET_NR_execve:
8360         {
8361             char **argp, **envp;
8362             int argc, envc;
8363             abi_ulong gp;
8364             abi_ulong guest_argp;
8365             abi_ulong guest_envp;
8366             abi_ulong addr;
8367             char **q;
8368 
8369             argc = 0;
8370             guest_argp = arg2;
8371             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8372                 if (get_user_ual(addr, gp))
8373                     return -TARGET_EFAULT;
8374                 if (!addr)
8375                     break;
8376                 argc++;
8377             }
8378             envc = 0;
8379             guest_envp = arg3;
8380             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8381                 if (get_user_ual(addr, gp))
8382                     return -TARGET_EFAULT;
8383                 if (!addr)
8384                     break;
8385                 envc++;
8386             }
8387 
8388             argp = g_new0(char *, argc + 1);
8389             envp = g_new0(char *, envc + 1);
8390 
8391             for (gp = guest_argp, q = argp; gp;
8392                   gp += sizeof(abi_ulong), q++) {
8393                 if (get_user_ual(addr, gp))
8394                     goto execve_efault;
8395                 if (!addr)
8396                     break;
8397                 if (!(*q = lock_user_string(addr)))
8398                     goto execve_efault;
8399             }
8400             *q = NULL;
8401 
8402             for (gp = guest_envp, q = envp; gp;
8403                   gp += sizeof(abi_ulong), q++) {
8404                 if (get_user_ual(addr, gp))
8405                     goto execve_efault;
8406                 if (!addr)
8407                     break;
8408                 if (!(*q = lock_user_string(addr)))
8409                     goto execve_efault;
8410             }
8411             *q = NULL;
8412 
8413             if (!(p = lock_user_string(arg1)))
8414                 goto execve_efault;
8415             /* Although execve() is not an interruptible syscall it is
8416              * a special case where we must use the safe_syscall wrapper:
8417              * if we allow a signal to happen before we make the host
8418              * syscall then we will 'lose' it, because at the point of
8419              * execve the process leaves QEMU's control. So we use the
8420              * safe syscall wrapper to ensure that we either take the
8421              * signal as a guest signal, or else it does not happen
8422              * before the execve completes and makes it the other
8423              * program's problem.
8424              */
8425             ret = get_errno(safe_execve(p, argp, envp));
8426             unlock_user(p, arg1, 0);
8427 
8428             goto execve_end;
8429 
8430         execve_efault:
8431             ret = -TARGET_EFAULT;
8432 
8433         execve_end:
8434             for (gp = guest_argp, q = argp; *q;
8435                   gp += sizeof(abi_ulong), q++) {
8436                 if (get_user_ual(addr, gp)
8437                     || !addr)
8438                     break;
8439                 unlock_user(*q, addr, 0);
8440             }
8441             for (gp = guest_envp, q = envp; *q;
8442                   gp += sizeof(abi_ulong), q++) {
8443                 if (get_user_ual(addr, gp)
8444                     || !addr)
8445                     break;
8446                 unlock_user(*q, addr, 0);
8447             }
8448 
8449             g_free(argp);
8450             g_free(envp);
8451         }
8452         return ret;
8453     case TARGET_NR_chdir:
8454         if (!(p = lock_user_string(arg1)))
8455             return -TARGET_EFAULT;
8456         ret = get_errno(chdir(p));
8457         unlock_user(p, arg1, 0);
8458         return ret;
8459 #ifdef TARGET_NR_time
8460     case TARGET_NR_time:
8461         {
8462             time_t host_time;
8463             ret = get_errno(time(&host_time));
8464             if (!is_error(ret)
8465                 && arg1
8466                 && put_user_sal(host_time, arg1))
8467                 return -TARGET_EFAULT;
8468         }
8469         return ret;
8470 #endif
8471 #ifdef TARGET_NR_mknod
8472     case TARGET_NR_mknod:
8473         if (!(p = lock_user_string(arg1)))
8474             return -TARGET_EFAULT;
8475         ret = get_errno(mknod(p, arg2, arg3));
8476         unlock_user(p, arg1, 0);
8477         return ret;
8478 #endif
8479 #if defined(TARGET_NR_mknodat)
8480     case TARGET_NR_mknodat:
8481         if (!(p = lock_user_string(arg2)))
8482             return -TARGET_EFAULT;
8483         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8484         unlock_user(p, arg2, 0);
8485         return ret;
8486 #endif
8487 #ifdef TARGET_NR_chmod
8488     case TARGET_NR_chmod:
8489         if (!(p = lock_user_string(arg1)))
8490             return -TARGET_EFAULT;
8491         ret = get_errno(chmod(p, arg2));
8492         unlock_user(p, arg1, 0);
8493         return ret;
8494 #endif
8495 #ifdef TARGET_NR_lseek
8496     case TARGET_NR_lseek:
8497         return get_errno(lseek(arg1, arg2, arg3));
8498 #endif
8499 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8500     /* Alpha specific */
8501     case TARGET_NR_getxpid:
8502         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8503         return get_errno(getpid());
8504 #endif
8505 #ifdef TARGET_NR_getpid
8506     case TARGET_NR_getpid:
8507         return get_errno(getpid());
8508 #endif
8509     case TARGET_NR_mount:
8510         {
8511             /* need to look at the data field */
8512             void *p2, *p3;
8513 
8514             if (arg1) {
8515                 p = lock_user_string(arg1);
8516                 if (!p) {
8517                     return -TARGET_EFAULT;
8518                 }
8519             } else {
8520                 p = NULL;
8521             }
8522 
8523             p2 = lock_user_string(arg2);
8524             if (!p2) {
8525                 if (arg1) {
8526                     unlock_user(p, arg1, 0);
8527                 }
8528                 return -TARGET_EFAULT;
8529             }
8530 
8531             if (arg3) {
8532                 p3 = lock_user_string(arg3);
8533                 if (!p3) {
8534                     if (arg1) {
8535                         unlock_user(p, arg1, 0);
8536                     }
8537                     unlock_user(p2, arg2, 0);
8538                     return -TARGET_EFAULT;
8539                 }
8540             } else {
8541                 p3 = NULL;
8542             }
8543 
8544             /* FIXME - arg5 should be locked, but it isn't clear how to
8545              * do that since it's not guaranteed to be a NULL-terminated
8546              * string.
8547              */
8548             if (!arg5) {
8549                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8550             } else {
8551                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8552             }
8553             ret = get_errno(ret);
8554 
8555             if (arg1) {
8556                 unlock_user(p, arg1, 0);
8557             }
8558             unlock_user(p2, arg2, 0);
8559             if (arg3) {
8560                 unlock_user(p3, arg3, 0);
8561             }
8562         }
8563         return ret;
8564 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8565 #if defined(TARGET_NR_umount)
8566     case TARGET_NR_umount:
8567 #endif
8568 #if defined(TARGET_NR_oldumount)
8569     case TARGET_NR_oldumount:
8570 #endif
8571         if (!(p = lock_user_string(arg1)))
8572             return -TARGET_EFAULT;
8573         ret = get_errno(umount(p));
8574         unlock_user(p, arg1, 0);
8575         return ret;
8576 #endif
8577 #ifdef TARGET_NR_stime /* not on alpha */
8578     case TARGET_NR_stime:
8579         {
8580             struct timespec ts;
8581             ts.tv_nsec = 0;
8582             if (get_user_sal(ts.tv_sec, arg1)) {
8583                 return -TARGET_EFAULT;
8584             }
8585             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8586         }
8587 #endif
8588 #ifdef TARGET_NR_alarm /* not on alpha */
8589     case TARGET_NR_alarm:
8590         return alarm(arg1);
8591 #endif
8592 #ifdef TARGET_NR_pause /* not on alpha */
8593     case TARGET_NR_pause:
8594         if (!block_signals()) {
8595             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8596         }
8597         return -TARGET_EINTR;
8598 #endif
8599 #ifdef TARGET_NR_utime
8600     case TARGET_NR_utime:
8601         {
8602             struct utimbuf tbuf, *host_tbuf;
8603             struct target_utimbuf *target_tbuf;
8604             if (arg2) {
8605                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8606                     return -TARGET_EFAULT;
8607                 tbuf.actime = tswapal(target_tbuf->actime);
8608                 tbuf.modtime = tswapal(target_tbuf->modtime);
8609                 unlock_user_struct(target_tbuf, arg2, 0);
8610                 host_tbuf = &tbuf;
8611             } else {
8612                 host_tbuf = NULL;
8613             }
8614             if (!(p = lock_user_string(arg1)))
8615                 return -TARGET_EFAULT;
8616             ret = get_errno(utime(p, host_tbuf));
8617             unlock_user(p, arg1, 0);
8618         }
8619         return ret;
8620 #endif
8621 #ifdef TARGET_NR_utimes
8622     case TARGET_NR_utimes:
8623         {
8624             struct timeval *tvp, tv[2];
8625             if (arg2) {
8626                 if (copy_from_user_timeval(&tv[0], arg2)
8627                     || copy_from_user_timeval(&tv[1],
8628                                               arg2 + sizeof(struct target_timeval)))
8629                     return -TARGET_EFAULT;
8630                 tvp = tv;
8631             } else {
8632                 tvp = NULL;
8633             }
8634             if (!(p = lock_user_string(arg1)))
8635                 return -TARGET_EFAULT;
8636             ret = get_errno(utimes(p, tvp));
8637             unlock_user(p, arg1, 0);
8638         }
8639         return ret;
8640 #endif
8641 #if defined(TARGET_NR_futimesat)
8642     case TARGET_NR_futimesat:
8643         {
8644             struct timeval *tvp, tv[2];
8645             if (arg3) {
8646                 if (copy_from_user_timeval(&tv[0], arg3)
8647                     || copy_from_user_timeval(&tv[1],
8648                                               arg3 + sizeof(struct target_timeval)))
8649                     return -TARGET_EFAULT;
8650                 tvp = tv;
8651             } else {
8652                 tvp = NULL;
8653             }
8654             if (!(p = lock_user_string(arg2))) {
8655                 return -TARGET_EFAULT;
8656             }
8657             ret = get_errno(futimesat(arg1, path(p), tvp));
8658             unlock_user(p, arg2, 0);
8659         }
8660         return ret;
8661 #endif
8662 #ifdef TARGET_NR_access
8663     case TARGET_NR_access:
8664         if (!(p = lock_user_string(arg1))) {
8665             return -TARGET_EFAULT;
8666         }
8667         ret = get_errno(access(path(p), arg2));
8668         unlock_user(p, arg1, 0);
8669         return ret;
8670 #endif
8671 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8672     case TARGET_NR_faccessat:
8673         if (!(p = lock_user_string(arg2))) {
8674             return -TARGET_EFAULT;
8675         }
8676         ret = get_errno(faccessat(arg1, p, arg3, 0));
8677         unlock_user(p, arg2, 0);
8678         return ret;
8679 #endif
8680 #ifdef TARGET_NR_nice /* not on alpha */
8681     case TARGET_NR_nice:
8682         return get_errno(nice(arg1));
8683 #endif
8684     case TARGET_NR_sync:
8685         sync();
8686         return 0;
8687 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8688     case TARGET_NR_syncfs:
8689         return get_errno(syncfs(arg1));
8690 #endif
8691     case TARGET_NR_kill:
8692         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8693 #ifdef TARGET_NR_rename
8694     case TARGET_NR_rename:
8695         {
8696             void *p2;
8697             p = lock_user_string(arg1);
8698             p2 = lock_user_string(arg2);
8699             if (!p || !p2)
8700                 ret = -TARGET_EFAULT;
8701             else
8702                 ret = get_errno(rename(p, p2));
8703             unlock_user(p2, arg2, 0);
8704             unlock_user(p, arg1, 0);
8705         }
8706         return ret;
8707 #endif
8708 #if defined(TARGET_NR_renameat)
8709     case TARGET_NR_renameat:
8710         {
8711             void *p2;
8712             p  = lock_user_string(arg2);
8713             p2 = lock_user_string(arg4);
8714             if (!p || !p2)
8715                 ret = -TARGET_EFAULT;
8716             else
8717                 ret = get_errno(renameat(arg1, p, arg3, p2));
8718             unlock_user(p2, arg4, 0);
8719             unlock_user(p, arg2, 0);
8720         }
8721         return ret;
8722 #endif
8723 #if defined(TARGET_NR_renameat2)
8724     case TARGET_NR_renameat2:
8725         {
8726             void *p2;
8727             p  = lock_user_string(arg2);
8728             p2 = lock_user_string(arg4);
8729             if (!p || !p2) {
8730                 ret = -TARGET_EFAULT;
8731             } else {
8732                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8733             }
8734             unlock_user(p2, arg4, 0);
8735             unlock_user(p, arg2, 0);
8736         }
8737         return ret;
8738 #endif
8739 #ifdef TARGET_NR_mkdir
8740     case TARGET_NR_mkdir:
8741         if (!(p = lock_user_string(arg1)))
8742             return -TARGET_EFAULT;
8743         ret = get_errno(mkdir(p, arg2));
8744         unlock_user(p, arg1, 0);
8745         return ret;
8746 #endif
8747 #if defined(TARGET_NR_mkdirat)
8748     case TARGET_NR_mkdirat:
8749         if (!(p = lock_user_string(arg2)))
8750             return -TARGET_EFAULT;
8751         ret = get_errno(mkdirat(arg1, p, arg3));
8752         unlock_user(p, arg2, 0);
8753         return ret;
8754 #endif
8755 #ifdef TARGET_NR_rmdir
8756     case TARGET_NR_rmdir:
8757         if (!(p = lock_user_string(arg1)))
8758             return -TARGET_EFAULT;
8759         ret = get_errno(rmdir(p));
8760         unlock_user(p, arg1, 0);
8761         return ret;
8762 #endif
8763     case TARGET_NR_dup:
8764         ret = get_errno(dup(arg1));
8765         if (ret >= 0) {
8766             fd_trans_dup(arg1, ret);
8767         }
8768         return ret;
8769 #ifdef TARGET_NR_pipe
8770     case TARGET_NR_pipe:
8771         return do_pipe(cpu_env, arg1, 0, 0);
8772 #endif
8773 #ifdef TARGET_NR_pipe2
8774     case TARGET_NR_pipe2:
8775         return do_pipe(cpu_env, arg1,
8776                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8777 #endif
8778     case TARGET_NR_times:
8779         {
8780             struct target_tms *tmsp;
8781             struct tms tms;
8782             ret = get_errno(times(&tms));
8783             if (arg1) {
8784                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8785                 if (!tmsp)
8786                     return -TARGET_EFAULT;
8787                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8788                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8789                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8790                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8791             }
8792             if (!is_error(ret))
8793                 ret = host_to_target_clock_t(ret);
8794         }
8795         return ret;
8796     case TARGET_NR_acct:
8797         if (arg1 == 0) {
8798             ret = get_errno(acct(NULL));
8799         } else {
8800             if (!(p = lock_user_string(arg1))) {
8801                 return -TARGET_EFAULT;
8802             }
8803             ret = get_errno(acct(path(p)));
8804             unlock_user(p, arg1, 0);
8805         }
8806         return ret;
8807 #ifdef TARGET_NR_umount2
8808     case TARGET_NR_umount2:
8809         if (!(p = lock_user_string(arg1)))
8810             return -TARGET_EFAULT;
8811         ret = get_errno(umount2(p, arg2));
8812         unlock_user(p, arg1, 0);
8813         return ret;
8814 #endif
8815     case TARGET_NR_ioctl:
8816         return do_ioctl(arg1, arg2, arg3);
8817 #ifdef TARGET_NR_fcntl
8818     case TARGET_NR_fcntl:
8819         return do_fcntl(arg1, arg2, arg3);
8820 #endif
8821     case TARGET_NR_setpgid:
8822         return get_errno(setpgid(arg1, arg2));
8823     case TARGET_NR_umask:
8824         return get_errno(umask(arg1));
8825     case TARGET_NR_chroot:
8826         if (!(p = lock_user_string(arg1)))
8827             return -TARGET_EFAULT;
8828         ret = get_errno(chroot(p));
8829         unlock_user(p, arg1, 0);
8830         return ret;
8831 #ifdef TARGET_NR_dup2
8832     case TARGET_NR_dup2:
8833         ret = get_errno(dup2(arg1, arg2));
8834         if (ret >= 0) {
8835             fd_trans_dup(arg1, arg2);
8836         }
8837         return ret;
8838 #endif
8839 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8840     case TARGET_NR_dup3:
8841     {
8842         int host_flags;
8843 
8844         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8845             return -EINVAL;
8846         }
8847         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8848         ret = get_errno(dup3(arg1, arg2, host_flags));
8849         if (ret >= 0) {
8850             fd_trans_dup(arg1, arg2);
8851         }
8852         return ret;
8853     }
8854 #endif
8855 #ifdef TARGET_NR_getppid /* not on alpha */
8856     case TARGET_NR_getppid:
8857         return get_errno(getppid());
8858 #endif
8859 #ifdef TARGET_NR_getpgrp
8860     case TARGET_NR_getpgrp:
8861         return get_errno(getpgrp());
8862 #endif
8863     case TARGET_NR_setsid:
8864         return get_errno(setsid());
8865 #ifdef TARGET_NR_sigaction
8866     case TARGET_NR_sigaction:
8867         {
8868 #if defined(TARGET_MIPS)
8869 	    struct target_sigaction act, oact, *pact, *old_act;
8870 
8871 	    if (arg2) {
8872                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8873                     return -TARGET_EFAULT;
8874 		act._sa_handler = old_act->_sa_handler;
8875 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8876 		act.sa_flags = old_act->sa_flags;
8877 		unlock_user_struct(old_act, arg2, 0);
8878 		pact = &act;
8879 	    } else {
8880 		pact = NULL;
8881 	    }
8882 
8883         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8884 
8885 	    if (!is_error(ret) && arg3) {
8886                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8887                     return -TARGET_EFAULT;
8888 		old_act->_sa_handler = oact._sa_handler;
8889 		old_act->sa_flags = oact.sa_flags;
8890 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8891 		old_act->sa_mask.sig[1] = 0;
8892 		old_act->sa_mask.sig[2] = 0;
8893 		old_act->sa_mask.sig[3] = 0;
8894 		unlock_user_struct(old_act, arg3, 1);
8895 	    }
8896 #else
8897             struct target_old_sigaction *old_act;
8898             struct target_sigaction act, oact, *pact;
8899             if (arg2) {
8900                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8901                     return -TARGET_EFAULT;
8902                 act._sa_handler = old_act->_sa_handler;
8903                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8904                 act.sa_flags = old_act->sa_flags;
8905 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8906                 act.sa_restorer = old_act->sa_restorer;
8907 #endif
8908                 unlock_user_struct(old_act, arg2, 0);
8909                 pact = &act;
8910             } else {
8911                 pact = NULL;
8912             }
8913             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8914             if (!is_error(ret) && arg3) {
8915                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8916                     return -TARGET_EFAULT;
8917                 old_act->_sa_handler = oact._sa_handler;
8918                 old_act->sa_mask = oact.sa_mask.sig[0];
8919                 old_act->sa_flags = oact.sa_flags;
8920 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8921                 old_act->sa_restorer = oact.sa_restorer;
8922 #endif
8923                 unlock_user_struct(old_act, arg3, 1);
8924             }
8925 #endif
8926         }
8927         return ret;
8928 #endif
8929     case TARGET_NR_rt_sigaction:
8930         {
8931             /*
8932              * For Alpha and SPARC this is a 5 argument syscall, with
8933              * a 'restorer' parameter which must be copied into the
8934              * sa_restorer field of the sigaction struct.
8935              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8936              * and arg5 is the sigsetsize.
8937              */
8938 #if defined(TARGET_ALPHA)
8939             target_ulong sigsetsize = arg4;
8940             target_ulong restorer = arg5;
8941 #elif defined(TARGET_SPARC)
8942             target_ulong restorer = arg4;
8943             target_ulong sigsetsize = arg5;
8944 #else
8945             target_ulong sigsetsize = arg4;
8946             target_ulong restorer = 0;
8947 #endif
8948             struct target_sigaction *act = NULL;
8949             struct target_sigaction *oact = NULL;
8950 
8951             if (sigsetsize != sizeof(target_sigset_t)) {
8952                 return -TARGET_EINVAL;
8953             }
8954             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8955                 return -TARGET_EFAULT;
8956             }
8957             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8958                 ret = -TARGET_EFAULT;
8959             } else {
8960                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
8961                 if (oact) {
8962                     unlock_user_struct(oact, arg3, 1);
8963                 }
8964             }
8965             if (act) {
8966                 unlock_user_struct(act, arg2, 0);
8967             }
8968         }
8969         return ret;
8970 #ifdef TARGET_NR_sgetmask /* not on alpha */
8971     case TARGET_NR_sgetmask:
8972         {
8973             sigset_t cur_set;
8974             abi_ulong target_set;
8975             ret = do_sigprocmask(0, NULL, &cur_set);
8976             if (!ret) {
8977                 host_to_target_old_sigset(&target_set, &cur_set);
8978                 ret = target_set;
8979             }
8980         }
8981         return ret;
8982 #endif
8983 #ifdef TARGET_NR_ssetmask /* not on alpha */
8984     case TARGET_NR_ssetmask:
8985         {
8986             sigset_t set, oset;
8987             abi_ulong target_set = arg1;
8988             target_to_host_old_sigset(&set, &target_set);
8989             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8990             if (!ret) {
8991                 host_to_target_old_sigset(&target_set, &oset);
8992                 ret = target_set;
8993             }
8994         }
8995         return ret;
8996 #endif
8997 #ifdef TARGET_NR_sigprocmask
8998     case TARGET_NR_sigprocmask:
8999         {
9000 #if defined(TARGET_ALPHA)
9001             sigset_t set, oldset;
9002             abi_ulong mask;
9003             int how;
9004 
9005             switch (arg1) {
9006             case TARGET_SIG_BLOCK:
9007                 how = SIG_BLOCK;
9008                 break;
9009             case TARGET_SIG_UNBLOCK:
9010                 how = SIG_UNBLOCK;
9011                 break;
9012             case TARGET_SIG_SETMASK:
9013                 how = SIG_SETMASK;
9014                 break;
9015             default:
9016                 return -TARGET_EINVAL;
9017             }
9018             mask = arg2;
9019             target_to_host_old_sigset(&set, &mask);
9020 
9021             ret = do_sigprocmask(how, &set, &oldset);
9022             if (!is_error(ret)) {
9023                 host_to_target_old_sigset(&mask, &oldset);
9024                 ret = mask;
9025                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9026             }
9027 #else
9028             sigset_t set, oldset, *set_ptr;
9029             int how;
9030 
9031             if (arg2) {
9032                 switch (arg1) {
9033                 case TARGET_SIG_BLOCK:
9034                     how = SIG_BLOCK;
9035                     break;
9036                 case TARGET_SIG_UNBLOCK:
9037                     how = SIG_UNBLOCK;
9038                     break;
9039                 case TARGET_SIG_SETMASK:
9040                     how = SIG_SETMASK;
9041                     break;
9042                 default:
9043                     return -TARGET_EINVAL;
9044                 }
9045                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9046                     return -TARGET_EFAULT;
9047                 target_to_host_old_sigset(&set, p);
9048                 unlock_user(p, arg2, 0);
9049                 set_ptr = &set;
9050             } else {
9051                 how = 0;
9052                 set_ptr = NULL;
9053             }
9054             ret = do_sigprocmask(how, set_ptr, &oldset);
9055             if (!is_error(ret) && arg3) {
9056                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9057                     return -TARGET_EFAULT;
9058                 host_to_target_old_sigset(p, &oldset);
9059                 unlock_user(p, arg3, sizeof(target_sigset_t));
9060             }
9061 #endif
9062         }
9063         return ret;
9064 #endif
9065     case TARGET_NR_rt_sigprocmask:
9066         {
9067             int how = arg1;
9068             sigset_t set, oldset, *set_ptr;
9069 
9070             if (arg4 != sizeof(target_sigset_t)) {
9071                 return -TARGET_EINVAL;
9072             }
9073 
9074             if (arg2) {
9075                 switch(how) {
9076                 case TARGET_SIG_BLOCK:
9077                     how = SIG_BLOCK;
9078                     break;
9079                 case TARGET_SIG_UNBLOCK:
9080                     how = SIG_UNBLOCK;
9081                     break;
9082                 case TARGET_SIG_SETMASK:
9083                     how = SIG_SETMASK;
9084                     break;
9085                 default:
9086                     return -TARGET_EINVAL;
9087                 }
9088                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9089                     return -TARGET_EFAULT;
9090                 target_to_host_sigset(&set, p);
9091                 unlock_user(p, arg2, 0);
9092                 set_ptr = &set;
9093             } else {
9094                 how = 0;
9095                 set_ptr = NULL;
9096             }
9097             ret = do_sigprocmask(how, set_ptr, &oldset);
9098             if (!is_error(ret) && arg3) {
9099                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9100                     return -TARGET_EFAULT;
9101                 host_to_target_sigset(p, &oldset);
9102                 unlock_user(p, arg3, sizeof(target_sigset_t));
9103             }
9104         }
9105         return ret;
9106 #ifdef TARGET_NR_sigpending
9107     case TARGET_NR_sigpending:
9108         {
9109             sigset_t set;
9110             ret = get_errno(sigpending(&set));
9111             if (!is_error(ret)) {
9112                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9113                     return -TARGET_EFAULT;
9114                 host_to_target_old_sigset(p, &set);
9115                 unlock_user(p, arg1, sizeof(target_sigset_t));
9116             }
9117         }
9118         return ret;
9119 #endif
9120     case TARGET_NR_rt_sigpending:
9121         {
9122             sigset_t set;
9123 
9124             /* Yes, this check is >, not != like most. We follow the kernel's
9125              * logic and it does it like this because it implements
9126              * NR_sigpending through the same code path, and in that case
9127              * the old_sigset_t is smaller in size.
9128              */
9129             if (arg2 > sizeof(target_sigset_t)) {
9130                 return -TARGET_EINVAL;
9131             }
9132 
9133             ret = get_errno(sigpending(&set));
9134             if (!is_error(ret)) {
9135                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9136                     return -TARGET_EFAULT;
9137                 host_to_target_sigset(p, &set);
9138                 unlock_user(p, arg1, sizeof(target_sigset_t));
9139             }
9140         }
9141         return ret;
9142 #ifdef TARGET_NR_sigsuspend
9143     case TARGET_NR_sigsuspend:
9144         {
9145             TaskState *ts = cpu->opaque;
9146 #if defined(TARGET_ALPHA)
9147             abi_ulong mask = arg1;
9148             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9149 #else
9150             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9151                 return -TARGET_EFAULT;
9152             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9153             unlock_user(p, arg1, 0);
9154 #endif
9155             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9156                                                SIGSET_T_SIZE));
9157             if (ret != -TARGET_ERESTARTSYS) {
9158                 ts->in_sigsuspend = 1;
9159             }
9160         }
9161         return ret;
9162 #endif
9163     case TARGET_NR_rt_sigsuspend:
9164         {
9165             TaskState *ts = cpu->opaque;
9166 
9167             if (arg2 != sizeof(target_sigset_t)) {
9168                 return -TARGET_EINVAL;
9169             }
9170             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9171                 return -TARGET_EFAULT;
9172             target_to_host_sigset(&ts->sigsuspend_mask, p);
9173             unlock_user(p, arg1, 0);
9174             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9175                                                SIGSET_T_SIZE));
9176             if (ret != -TARGET_ERESTARTSYS) {
9177                 ts->in_sigsuspend = 1;
9178             }
9179         }
9180         return ret;
9181 #ifdef TARGET_NR_rt_sigtimedwait
9182     case TARGET_NR_rt_sigtimedwait:
9183         {
9184             sigset_t set;
9185             struct timespec uts, *puts;
9186             siginfo_t uinfo;
9187 
9188             if (arg4 != sizeof(target_sigset_t)) {
9189                 return -TARGET_EINVAL;
9190             }
9191 
9192             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9193                 return -TARGET_EFAULT;
9194             target_to_host_sigset(&set, p);
9195             unlock_user(p, arg1, 0);
9196             if (arg3) {
9197                 puts = &uts;
9198                 if (target_to_host_timespec(puts, arg3)) {
9199                     return -TARGET_EFAULT;
9200                 }
9201             } else {
9202                 puts = NULL;
9203             }
9204             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9205                                                  SIGSET_T_SIZE));
9206             if (!is_error(ret)) {
9207                 if (arg2) {
9208                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9209                                   0);
9210                     if (!p) {
9211                         return -TARGET_EFAULT;
9212                     }
9213                     host_to_target_siginfo(p, &uinfo);
9214                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9215                 }
9216                 ret = host_to_target_signal(ret);
9217             }
9218         }
9219         return ret;
9220 #endif
9221 #ifdef TARGET_NR_rt_sigtimedwait_time64
9222     case TARGET_NR_rt_sigtimedwait_time64:
9223         {
9224             sigset_t set;
9225             struct timespec uts, *puts;
9226             siginfo_t uinfo;
9227 
9228             if (arg4 != sizeof(target_sigset_t)) {
9229                 return -TARGET_EINVAL;
9230             }
9231 
9232             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9233             if (!p) {
9234                 return -TARGET_EFAULT;
9235             }
9236             target_to_host_sigset(&set, p);
9237             unlock_user(p, arg1, 0);
9238             if (arg3) {
9239                 puts = &uts;
9240                 if (target_to_host_timespec64(puts, arg3)) {
9241                     return -TARGET_EFAULT;
9242                 }
9243             } else {
9244                 puts = NULL;
9245             }
9246             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9247                                                  SIGSET_T_SIZE));
9248             if (!is_error(ret)) {
9249                 if (arg2) {
9250                     p = lock_user(VERIFY_WRITE, arg2,
9251                                   sizeof(target_siginfo_t), 0);
9252                     if (!p) {
9253                         return -TARGET_EFAULT;
9254                     }
9255                     host_to_target_siginfo(p, &uinfo);
9256                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9257                 }
9258                 ret = host_to_target_signal(ret);
9259             }
9260         }
9261         return ret;
9262 #endif
9263     case TARGET_NR_rt_sigqueueinfo:
9264         {
9265             siginfo_t uinfo;
9266 
9267             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9268             if (!p) {
9269                 return -TARGET_EFAULT;
9270             }
9271             target_to_host_siginfo(&uinfo, p);
9272             unlock_user(p, arg3, 0);
9273             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9274         }
9275         return ret;
9276     case TARGET_NR_rt_tgsigqueueinfo:
9277         {
9278             siginfo_t uinfo;
9279 
9280             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9281             if (!p) {
9282                 return -TARGET_EFAULT;
9283             }
9284             target_to_host_siginfo(&uinfo, p);
9285             unlock_user(p, arg4, 0);
9286             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9287         }
9288         return ret;
9289 #ifdef TARGET_NR_sigreturn
9290     case TARGET_NR_sigreturn:
9291         if (block_signals()) {
9292             return -TARGET_ERESTARTSYS;
9293         }
9294         return do_sigreturn(cpu_env);
9295 #endif
9296     case TARGET_NR_rt_sigreturn:
9297         if (block_signals()) {
9298             return -TARGET_ERESTARTSYS;
9299         }
9300         return do_rt_sigreturn(cpu_env);
9301     case TARGET_NR_sethostname:
9302         if (!(p = lock_user_string(arg1)))
9303             return -TARGET_EFAULT;
9304         ret = get_errno(sethostname(p, arg2));
9305         unlock_user(p, arg1, 0);
9306         return ret;
9307 #ifdef TARGET_NR_setrlimit
9308     case TARGET_NR_setrlimit:
9309         {
9310             int resource = target_to_host_resource(arg1);
9311             struct target_rlimit *target_rlim;
9312             struct rlimit rlim;
9313             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9314                 return -TARGET_EFAULT;
9315             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9316             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9317             unlock_user_struct(target_rlim, arg2, 0);
9318             /*
9319              * If we just passed through resource limit settings for memory then
9320              * they would also apply to QEMU's own allocations, and QEMU will
9321              * crash or hang or die if its allocations fail. Ideally we would
9322              * track the guest allocations in QEMU and apply the limits ourselves.
9323              * For now, just tell the guest the call succeeded but don't actually
9324              * limit anything.
9325              */
9326             if (resource != RLIMIT_AS &&
9327                 resource != RLIMIT_DATA &&
9328                 resource != RLIMIT_STACK) {
9329                 return get_errno(setrlimit(resource, &rlim));
9330             } else {
9331                 return 0;
9332             }
9333         }
9334 #endif
9335 #ifdef TARGET_NR_getrlimit
9336     case TARGET_NR_getrlimit:
9337         {
9338             int resource = target_to_host_resource(arg1);
9339             struct target_rlimit *target_rlim;
9340             struct rlimit rlim;
9341 
9342             ret = get_errno(getrlimit(resource, &rlim));
9343             if (!is_error(ret)) {
9344                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9345                     return -TARGET_EFAULT;
9346                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9347                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9348                 unlock_user_struct(target_rlim, arg2, 1);
9349             }
9350         }
9351         return ret;
9352 #endif
9353     case TARGET_NR_getrusage:
9354         {
9355             struct rusage rusage;
9356             ret = get_errno(getrusage(arg1, &rusage));
9357             if (!is_error(ret)) {
9358                 ret = host_to_target_rusage(arg2, &rusage);
9359             }
9360         }
9361         return ret;
9362 #if defined(TARGET_NR_gettimeofday)
9363     case TARGET_NR_gettimeofday:
9364         {
9365             struct timeval tv;
9366             struct timezone tz;
9367 
9368             ret = get_errno(gettimeofday(&tv, &tz));
9369             if (!is_error(ret)) {
9370                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9371                     return -TARGET_EFAULT;
9372                 }
9373                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9374                     return -TARGET_EFAULT;
9375                 }
9376             }
9377         }
9378         return ret;
9379 #endif
9380 #if defined(TARGET_NR_settimeofday)
9381     case TARGET_NR_settimeofday:
9382         {
9383             struct timeval tv, *ptv = NULL;
9384             struct timezone tz, *ptz = NULL;
9385 
9386             if (arg1) {
9387                 if (copy_from_user_timeval(&tv, arg1)) {
9388                     return -TARGET_EFAULT;
9389                 }
9390                 ptv = &tv;
9391             }
9392 
9393             if (arg2) {
9394                 if (copy_from_user_timezone(&tz, arg2)) {
9395                     return -TARGET_EFAULT;
9396                 }
9397                 ptz = &tz;
9398             }
9399 
9400             return get_errno(settimeofday(ptv, ptz));
9401         }
9402 #endif
9403 #if defined(TARGET_NR_select)
9404     case TARGET_NR_select:
9405 #if defined(TARGET_WANT_NI_OLD_SELECT)
9406         /* some architectures used to have old_select here
9407          * but now ENOSYS it.
9408          */
9409         ret = -TARGET_ENOSYS;
9410 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9411         ret = do_old_select(arg1);
9412 #else
9413         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9414 #endif
9415         return ret;
9416 #endif
9417 #ifdef TARGET_NR_pselect6
9418     case TARGET_NR_pselect6:
9419         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9420 #endif
9421 #ifdef TARGET_NR_pselect6_time64
9422     case TARGET_NR_pselect6_time64:
9423         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9424 #endif
9425 #ifdef TARGET_NR_symlink
9426     case TARGET_NR_symlink:
9427         {
9428             void *p2;
9429             p = lock_user_string(arg1);
9430             p2 = lock_user_string(arg2);
9431             if (!p || !p2)
9432                 ret = -TARGET_EFAULT;
9433             else
9434                 ret = get_errno(symlink(p, p2));
9435             unlock_user(p2, arg2, 0);
9436             unlock_user(p, arg1, 0);
9437         }
9438         return ret;
9439 #endif
9440 #if defined(TARGET_NR_symlinkat)
9441     case TARGET_NR_symlinkat:
9442         {
9443             void *p2;
9444             p  = lock_user_string(arg1);
9445             p2 = lock_user_string(arg3);
9446             if (!p || !p2)
9447                 ret = -TARGET_EFAULT;
9448             else
9449                 ret = get_errno(symlinkat(p, arg2, p2));
9450             unlock_user(p2, arg3, 0);
9451             unlock_user(p, arg1, 0);
9452         }
9453         return ret;
9454 #endif
9455 #ifdef TARGET_NR_readlink
9456     case TARGET_NR_readlink:
9457         {
9458             void *p2;
9459             p = lock_user_string(arg1);
9460             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9461             if (!p || !p2) {
9462                 ret = -TARGET_EFAULT;
9463             } else if (!arg3) {
9464                 /* Short circuit this for the magic exe check. */
9465                 ret = -TARGET_EINVAL;
9466             } else if (is_proc_myself((const char *)p, "exe")) {
9467                 char real[PATH_MAX], *temp;
9468                 temp = realpath(exec_path, real);
9469                 /* Return value is # of bytes that we wrote to the buffer. */
9470                 if (temp == NULL) {
9471                     ret = get_errno(-1);
9472                 } else {
9473                     /* Don't worry about sign mismatch as earlier mapping
9474                      * logic would have thrown a bad address error. */
9475                     ret = MIN(strlen(real), arg3);
9476                     /* We cannot NUL terminate the string. */
9477                     memcpy(p2, real, ret);
9478                 }
9479             } else {
9480                 ret = get_errno(readlink(path(p), p2, arg3));
9481             }
9482             unlock_user(p2, arg2, ret);
9483             unlock_user(p, arg1, 0);
9484         }
9485         return ret;
9486 #endif
9487 #if defined(TARGET_NR_readlinkat)
9488     case TARGET_NR_readlinkat:
9489         {
9490             void *p2;
9491             p  = lock_user_string(arg2);
9492             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9493             if (!p || !p2) {
9494                 ret = -TARGET_EFAULT;
9495             } else if (is_proc_myself((const char *)p, "exe")) {
9496                 char real[PATH_MAX], *temp;
9497                 temp = realpath(exec_path, real);
9498                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9499                 snprintf((char *)p2, arg4, "%s", real);
9500             } else {
9501                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9502             }
9503             unlock_user(p2, arg3, ret);
9504             unlock_user(p, arg2, 0);
9505         }
9506         return ret;
9507 #endif
9508 #ifdef TARGET_NR_swapon
9509     case TARGET_NR_swapon:
9510         if (!(p = lock_user_string(arg1)))
9511             return -TARGET_EFAULT;
9512         ret = get_errno(swapon(p, arg2));
9513         unlock_user(p, arg1, 0);
9514         return ret;
9515 #endif
9516     case TARGET_NR_reboot:
9517         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9518            /* arg4 must be ignored in all other cases */
9519            p = lock_user_string(arg4);
9520            if (!p) {
9521                return -TARGET_EFAULT;
9522            }
9523            ret = get_errno(reboot(arg1, arg2, arg3, p));
9524            unlock_user(p, arg4, 0);
9525         } else {
9526            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9527         }
9528         return ret;
9529 #ifdef TARGET_NR_mmap
9530     case TARGET_NR_mmap:
9531 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9532     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9533     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9534     || defined(TARGET_S390X)
9535         {
9536             abi_ulong *v;
9537             abi_ulong v1, v2, v3, v4, v5, v6;
9538             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9539                 return -TARGET_EFAULT;
9540             v1 = tswapal(v[0]);
9541             v2 = tswapal(v[1]);
9542             v3 = tswapal(v[2]);
9543             v4 = tswapal(v[3]);
9544             v5 = tswapal(v[4]);
9545             v6 = tswapal(v[5]);
9546             unlock_user(v, arg1, 0);
9547             ret = get_errno(target_mmap(v1, v2, v3,
9548                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9549                                         v5, v6));
9550         }
9551 #else
9552         /* mmap pointers are always untagged */
9553         ret = get_errno(target_mmap(arg1, arg2, arg3,
9554                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9555                                     arg5,
9556                                     arg6));
9557 #endif
9558         return ret;
9559 #endif
9560 #ifdef TARGET_NR_mmap2
9561     case TARGET_NR_mmap2:
9562 #ifndef MMAP_SHIFT
9563 #define MMAP_SHIFT 12
9564 #endif
9565         ret = target_mmap(arg1, arg2, arg3,
9566                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9567                           arg5, arg6 << MMAP_SHIFT);
9568         return get_errno(ret);
9569 #endif
9570     case TARGET_NR_munmap:
9571         arg1 = cpu_untagged_addr(cpu, arg1);
9572         return get_errno(target_munmap(arg1, arg2));
9573     case TARGET_NR_mprotect:
9574         arg1 = cpu_untagged_addr(cpu, arg1);
9575         {
9576             TaskState *ts = cpu->opaque;
9577             /* Special hack to detect libc making the stack executable.  */
9578             if ((arg3 & PROT_GROWSDOWN)
9579                 && arg1 >= ts->info->stack_limit
9580                 && arg1 <= ts->info->start_stack) {
9581                 arg3 &= ~PROT_GROWSDOWN;
9582                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9583                 arg1 = ts->info->stack_limit;
9584             }
9585         }
9586         return get_errno(target_mprotect(arg1, arg2, arg3));
9587 #ifdef TARGET_NR_mremap
9588     case TARGET_NR_mremap:
9589         arg1 = cpu_untagged_addr(cpu, arg1);
9590         /* mremap new_addr (arg5) is always untagged */
9591         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9592 #endif
9593         /* ??? msync/mlock/munlock are broken for softmmu.  */
9594 #ifdef TARGET_NR_msync
9595     case TARGET_NR_msync:
9596         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9597 #endif
9598 #ifdef TARGET_NR_mlock
9599     case TARGET_NR_mlock:
9600         return get_errno(mlock(g2h(cpu, arg1), arg2));
9601 #endif
9602 #ifdef TARGET_NR_munlock
9603     case TARGET_NR_munlock:
9604         return get_errno(munlock(g2h(cpu, arg1), arg2));
9605 #endif
9606 #ifdef TARGET_NR_mlockall
9607     case TARGET_NR_mlockall:
9608         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9609 #endif
9610 #ifdef TARGET_NR_munlockall
9611     case TARGET_NR_munlockall:
9612         return get_errno(munlockall());
9613 #endif
9614 #ifdef TARGET_NR_truncate
9615     case TARGET_NR_truncate:
9616         if (!(p = lock_user_string(arg1)))
9617             return -TARGET_EFAULT;
9618         ret = get_errno(truncate(p, arg2));
9619         unlock_user(p, arg1, 0);
9620         return ret;
9621 #endif
9622 #ifdef TARGET_NR_ftruncate
9623     case TARGET_NR_ftruncate:
9624         return get_errno(ftruncate(arg1, arg2));
9625 #endif
9626     case TARGET_NR_fchmod:
9627         return get_errno(fchmod(arg1, arg2));
9628 #if defined(TARGET_NR_fchmodat)
9629     case TARGET_NR_fchmodat:
9630         if (!(p = lock_user_string(arg2)))
9631             return -TARGET_EFAULT;
9632         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9633         unlock_user(p, arg2, 0);
9634         return ret;
9635 #endif
9636     case TARGET_NR_getpriority:
9637         /* Note that negative values are valid for getpriority, so we must
9638            differentiate based on errno settings.  */
9639         errno = 0;
9640         ret = getpriority(arg1, arg2);
9641         if (ret == -1 && errno != 0) {
9642             return -host_to_target_errno(errno);
9643         }
9644 #ifdef TARGET_ALPHA
9645         /* Return value is the unbiased priority.  Signal no error.  */
9646         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9647 #else
9648         /* Return value is a biased priority to avoid negative numbers.  */
9649         ret = 20 - ret;
9650 #endif
9651         return ret;
9652     case TARGET_NR_setpriority:
9653         return get_errno(setpriority(arg1, arg2, arg3));
9654 #ifdef TARGET_NR_statfs
9655     case TARGET_NR_statfs:
9656         if (!(p = lock_user_string(arg1))) {
9657             return -TARGET_EFAULT;
9658         }
9659         ret = get_errno(statfs(path(p), &stfs));
9660         unlock_user(p, arg1, 0);
9661     convert_statfs:
9662         if (!is_error(ret)) {
9663             struct target_statfs *target_stfs;
9664 
9665             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9666                 return -TARGET_EFAULT;
9667             __put_user(stfs.f_type, &target_stfs->f_type);
9668             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9669             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9670             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9671             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9672             __put_user(stfs.f_files, &target_stfs->f_files);
9673             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9674             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9675             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9676             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9677             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9678 #ifdef _STATFS_F_FLAGS
9679             __put_user(stfs.f_flags, &target_stfs->f_flags);
9680 #else
9681             __put_user(0, &target_stfs->f_flags);
9682 #endif
9683             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9684             unlock_user_struct(target_stfs, arg2, 1);
9685         }
9686         return ret;
9687 #endif
9688 #ifdef TARGET_NR_fstatfs
9689     case TARGET_NR_fstatfs:
9690         ret = get_errno(fstatfs(arg1, &stfs));
9691         goto convert_statfs;
9692 #endif
9693 #ifdef TARGET_NR_statfs64
9694     case TARGET_NR_statfs64:
9695         if (!(p = lock_user_string(arg1))) {
9696             return -TARGET_EFAULT;
9697         }
9698         ret = get_errno(statfs(path(p), &stfs));
9699         unlock_user(p, arg1, 0);
9700     convert_statfs64:
9701         if (!is_error(ret)) {
9702             struct target_statfs64 *target_stfs;
9703 
9704             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9705                 return -TARGET_EFAULT;
9706             __put_user(stfs.f_type, &target_stfs->f_type);
9707             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9708             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9709             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9710             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9711             __put_user(stfs.f_files, &target_stfs->f_files);
9712             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9713             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9714             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9715             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9716             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9717 #ifdef _STATFS_F_FLAGS
9718             __put_user(stfs.f_flags, &target_stfs->f_flags);
9719 #else
9720             __put_user(0, &target_stfs->f_flags);
9721 #endif
9722             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9723             unlock_user_struct(target_stfs, arg3, 1);
9724         }
9725         return ret;
9726     case TARGET_NR_fstatfs64:
9727         ret = get_errno(fstatfs(arg1, &stfs));
9728         goto convert_statfs64;
9729 #endif
9730 #ifdef TARGET_NR_socketcall
9731     case TARGET_NR_socketcall:
9732         return do_socketcall(arg1, arg2);
9733 #endif
9734 #ifdef TARGET_NR_accept
9735     case TARGET_NR_accept:
9736         return do_accept4(arg1, arg2, arg3, 0);
9737 #endif
9738 #ifdef TARGET_NR_accept4
9739     case TARGET_NR_accept4:
9740         return do_accept4(arg1, arg2, arg3, arg4);
9741 #endif
9742 #ifdef TARGET_NR_bind
9743     case TARGET_NR_bind:
9744         return do_bind(arg1, arg2, arg3);
9745 #endif
9746 #ifdef TARGET_NR_connect
9747     case TARGET_NR_connect:
9748         return do_connect(arg1, arg2, arg3);
9749 #endif
9750 #ifdef TARGET_NR_getpeername
9751     case TARGET_NR_getpeername:
9752         return do_getpeername(arg1, arg2, arg3);
9753 #endif
9754 #ifdef TARGET_NR_getsockname
9755     case TARGET_NR_getsockname:
9756         return do_getsockname(arg1, arg2, arg3);
9757 #endif
9758 #ifdef TARGET_NR_getsockopt
9759     case TARGET_NR_getsockopt:
9760         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9761 #endif
9762 #ifdef TARGET_NR_listen
9763     case TARGET_NR_listen:
9764         return get_errno(listen(arg1, arg2));
9765 #endif
9766 #ifdef TARGET_NR_recv
9767     case TARGET_NR_recv:
9768         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9769 #endif
9770 #ifdef TARGET_NR_recvfrom
9771     case TARGET_NR_recvfrom:
9772         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9773 #endif
9774 #ifdef TARGET_NR_recvmsg
9775     case TARGET_NR_recvmsg:
9776         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9777 #endif
9778 #ifdef TARGET_NR_send
9779     case TARGET_NR_send:
9780         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9781 #endif
9782 #ifdef TARGET_NR_sendmsg
9783     case TARGET_NR_sendmsg:
9784         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9785 #endif
9786 #ifdef TARGET_NR_sendmmsg
9787     case TARGET_NR_sendmmsg:
9788         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9789 #endif
9790 #ifdef TARGET_NR_recvmmsg
9791     case TARGET_NR_recvmmsg:
9792         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9793 #endif
9794 #ifdef TARGET_NR_sendto
9795     case TARGET_NR_sendto:
9796         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9797 #endif
9798 #ifdef TARGET_NR_shutdown
9799     case TARGET_NR_shutdown:
9800         return get_errno(shutdown(arg1, arg2));
9801 #endif
9802 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9803     case TARGET_NR_getrandom:
9804         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9805         if (!p) {
9806             return -TARGET_EFAULT;
9807         }
9808         ret = get_errno(getrandom(p, arg2, arg3));
9809         unlock_user(p, arg1, ret);
9810         return ret;
9811 #endif
9812 #ifdef TARGET_NR_socket
9813     case TARGET_NR_socket:
9814         return do_socket(arg1, arg2, arg3);
9815 #endif
9816 #ifdef TARGET_NR_socketpair
9817     case TARGET_NR_socketpair:
9818         return do_socketpair(arg1, arg2, arg3, arg4);
9819 #endif
9820 #ifdef TARGET_NR_setsockopt
9821     case TARGET_NR_setsockopt:
9822         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9823 #endif
9824 #if defined(TARGET_NR_syslog)
9825     case TARGET_NR_syslog:
9826         {
9827             int len = arg2;
9828 
9829             switch (arg1) {
9830             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9831             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9832             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9833             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9834             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9835             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9836             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9837             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9838                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9839             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9840             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9841             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9842                 {
9843                     if (len < 0) {
9844                         return -TARGET_EINVAL;
9845                     }
9846                     if (len == 0) {
9847                         return 0;
9848                     }
9849                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9850                     if (!p) {
9851                         return -TARGET_EFAULT;
9852                     }
9853                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9854                     unlock_user(p, arg2, arg3);
9855                 }
9856                 return ret;
9857             default:
9858                 return -TARGET_EINVAL;
9859             }
9860         }
9861         break;
9862 #endif
9863     case TARGET_NR_setitimer:
9864         {
9865             struct itimerval value, ovalue, *pvalue;
9866 
9867             if (arg2) {
9868                 pvalue = &value;
9869                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9870                     || copy_from_user_timeval(&pvalue->it_value,
9871                                               arg2 + sizeof(struct target_timeval)))
9872                     return -TARGET_EFAULT;
9873             } else {
9874                 pvalue = NULL;
9875             }
9876             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9877             if (!is_error(ret) && arg3) {
9878                 if (copy_to_user_timeval(arg3,
9879                                          &ovalue.it_interval)
9880                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9881                                             &ovalue.it_value))
9882                     return -TARGET_EFAULT;
9883             }
9884         }
9885         return ret;
9886     case TARGET_NR_getitimer:
9887         {
9888             struct itimerval value;
9889 
9890             ret = get_errno(getitimer(arg1, &value));
9891             if (!is_error(ret) && arg2) {
9892                 if (copy_to_user_timeval(arg2,
9893                                          &value.it_interval)
9894                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9895                                             &value.it_value))
9896                     return -TARGET_EFAULT;
9897             }
9898         }
9899         return ret;
9900 #ifdef TARGET_NR_stat
9901     case TARGET_NR_stat:
9902         if (!(p = lock_user_string(arg1))) {
9903             return -TARGET_EFAULT;
9904         }
9905         ret = get_errno(stat(path(p), &st));
9906         unlock_user(p, arg1, 0);
9907         goto do_stat;
9908 #endif
9909 #ifdef TARGET_NR_lstat
9910     case TARGET_NR_lstat:
9911         if (!(p = lock_user_string(arg1))) {
9912             return -TARGET_EFAULT;
9913         }
9914         ret = get_errno(lstat(path(p), &st));
9915         unlock_user(p, arg1, 0);
9916         goto do_stat;
9917 #endif
9918 #ifdef TARGET_NR_fstat
9919     case TARGET_NR_fstat:
9920         {
9921             ret = get_errno(fstat(arg1, &st));
9922 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9923         do_stat:
9924 #endif
9925             if (!is_error(ret)) {
9926                 struct target_stat *target_st;
9927 
9928                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9929                     return -TARGET_EFAULT;
9930                 memset(target_st, 0, sizeof(*target_st));
9931                 __put_user(st.st_dev, &target_st->st_dev);
9932                 __put_user(st.st_ino, &target_st->st_ino);
9933                 __put_user(st.st_mode, &target_st->st_mode);
9934                 __put_user(st.st_uid, &target_st->st_uid);
9935                 __put_user(st.st_gid, &target_st->st_gid);
9936                 __put_user(st.st_nlink, &target_st->st_nlink);
9937                 __put_user(st.st_rdev, &target_st->st_rdev);
9938                 __put_user(st.st_size, &target_st->st_size);
9939                 __put_user(st.st_blksize, &target_st->st_blksize);
9940                 __put_user(st.st_blocks, &target_st->st_blocks);
9941                 __put_user(st.st_atime, &target_st->target_st_atime);
9942                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9943                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9944 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9945                 __put_user(st.st_atim.tv_nsec,
9946                            &target_st->target_st_atime_nsec);
9947                 __put_user(st.st_mtim.tv_nsec,
9948                            &target_st->target_st_mtime_nsec);
9949                 __put_user(st.st_ctim.tv_nsec,
9950                            &target_st->target_st_ctime_nsec);
9951 #endif
9952                 unlock_user_struct(target_st, arg2, 1);
9953             }
9954         }
9955         return ret;
9956 #endif
9957     case TARGET_NR_vhangup:
9958         return get_errno(vhangup());
9959 #ifdef TARGET_NR_syscall
9960     case TARGET_NR_syscall:
9961         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9962                           arg6, arg7, arg8, 0);
9963 #endif
9964 #if defined(TARGET_NR_wait4)
9965     case TARGET_NR_wait4:
9966         {
9967             int status;
9968             abi_long status_ptr = arg2;
9969             struct rusage rusage, *rusage_ptr;
9970             abi_ulong target_rusage = arg4;
9971             abi_long rusage_err;
9972             if (target_rusage)
9973                 rusage_ptr = &rusage;
9974             else
9975                 rusage_ptr = NULL;
9976             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9977             if (!is_error(ret)) {
9978                 if (status_ptr && ret) {
9979                     status = host_to_target_waitstatus(status);
9980                     if (put_user_s32(status, status_ptr))
9981                         return -TARGET_EFAULT;
9982                 }
9983                 if (target_rusage) {
9984                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9985                     if (rusage_err) {
9986                         ret = rusage_err;
9987                     }
9988                 }
9989             }
9990         }
9991         return ret;
9992 #endif
9993 #ifdef TARGET_NR_swapoff
9994     case TARGET_NR_swapoff:
9995         if (!(p = lock_user_string(arg1)))
9996             return -TARGET_EFAULT;
9997         ret = get_errno(swapoff(p));
9998         unlock_user(p, arg1, 0);
9999         return ret;
10000 #endif
10001     case TARGET_NR_sysinfo:
10002         {
10003             struct target_sysinfo *target_value;
10004             struct sysinfo value;
10005             ret = get_errno(sysinfo(&value));
10006             if (!is_error(ret) && arg1)
10007             {
10008                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10009                     return -TARGET_EFAULT;
10010                 __put_user(value.uptime, &target_value->uptime);
10011                 __put_user(value.loads[0], &target_value->loads[0]);
10012                 __put_user(value.loads[1], &target_value->loads[1]);
10013                 __put_user(value.loads[2], &target_value->loads[2]);
10014                 __put_user(value.totalram, &target_value->totalram);
10015                 __put_user(value.freeram, &target_value->freeram);
10016                 __put_user(value.sharedram, &target_value->sharedram);
10017                 __put_user(value.bufferram, &target_value->bufferram);
10018                 __put_user(value.totalswap, &target_value->totalswap);
10019                 __put_user(value.freeswap, &target_value->freeswap);
10020                 __put_user(value.procs, &target_value->procs);
10021                 __put_user(value.totalhigh, &target_value->totalhigh);
10022                 __put_user(value.freehigh, &target_value->freehigh);
10023                 __put_user(value.mem_unit, &target_value->mem_unit);
10024                 unlock_user_struct(target_value, arg1, 1);
10025             }
10026         }
10027         return ret;
10028 #ifdef TARGET_NR_ipc
10029     case TARGET_NR_ipc:
10030         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10031 #endif
10032 #ifdef TARGET_NR_semget
10033     case TARGET_NR_semget:
10034         return get_errno(semget(arg1, arg2, arg3));
10035 #endif
10036 #ifdef TARGET_NR_semop
10037     case TARGET_NR_semop:
10038         return do_semtimedop(arg1, arg2, arg3, 0, false);
10039 #endif
10040 #ifdef TARGET_NR_semtimedop
10041     case TARGET_NR_semtimedop:
10042         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10043 #endif
10044 #ifdef TARGET_NR_semtimedop_time64
10045     case TARGET_NR_semtimedop_time64:
10046         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10047 #endif
10048 #ifdef TARGET_NR_semctl
10049     case TARGET_NR_semctl:
10050         return do_semctl(arg1, arg2, arg3, arg4);
10051 #endif
10052 #ifdef TARGET_NR_msgctl
10053     case TARGET_NR_msgctl:
10054         return do_msgctl(arg1, arg2, arg3);
10055 #endif
10056 #ifdef TARGET_NR_msgget
10057     case TARGET_NR_msgget:
10058         return get_errno(msgget(arg1, arg2));
10059 #endif
10060 #ifdef TARGET_NR_msgrcv
10061     case TARGET_NR_msgrcv:
10062         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10063 #endif
10064 #ifdef TARGET_NR_msgsnd
10065     case TARGET_NR_msgsnd:
10066         return do_msgsnd(arg1, arg2, arg3, arg4);
10067 #endif
10068 #ifdef TARGET_NR_shmget
10069     case TARGET_NR_shmget:
10070         return get_errno(shmget(arg1, arg2, arg3));
10071 #endif
10072 #ifdef TARGET_NR_shmctl
10073     case TARGET_NR_shmctl:
10074         return do_shmctl(arg1, arg2, arg3);
10075 #endif
10076 #ifdef TARGET_NR_shmat
10077     case TARGET_NR_shmat:
10078         return do_shmat(cpu_env, arg1, arg2, arg3);
10079 #endif
10080 #ifdef TARGET_NR_shmdt
10081     case TARGET_NR_shmdt:
10082         return do_shmdt(arg1);
10083 #endif
10084     case TARGET_NR_fsync:
10085         return get_errno(fsync(arg1));
10086     case TARGET_NR_clone:
10087         /* Linux manages to have three different orderings for its
10088          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10089          * match the kernel's CONFIG_CLONE_* settings.
10090          * Microblaze is further special in that it uses a sixth
10091          * implicit argument to clone for the TLS pointer.
10092          */
10093 #if defined(TARGET_MICROBLAZE)
10094         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10095 #elif defined(TARGET_CLONE_BACKWARDS)
10096         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10097 #elif defined(TARGET_CLONE_BACKWARDS2)
10098         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10099 #else
10100         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10101 #endif
10102         return ret;
10103 #ifdef __NR_exit_group
10104         /* new thread calls */
10105     case TARGET_NR_exit_group:
10106         preexit_cleanup(cpu_env, arg1);
10107         return get_errno(exit_group(arg1));
10108 #endif
10109     case TARGET_NR_setdomainname:
10110         if (!(p = lock_user_string(arg1)))
10111             return -TARGET_EFAULT;
10112         ret = get_errno(setdomainname(p, arg2));
10113         unlock_user(p, arg1, 0);
10114         return ret;
10115     case TARGET_NR_uname:
10116         /* no need to transcode because we use the linux syscall */
10117         {
10118             struct new_utsname * buf;
10119 
10120             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10121                 return -TARGET_EFAULT;
10122             ret = get_errno(sys_uname(buf));
10123             if (!is_error(ret)) {
10124                 /* Overwrite the native machine name with whatever is being
10125                    emulated. */
10126                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10127                           sizeof(buf->machine));
10128                 /* Allow the user to override the reported release.  */
10129                 if (qemu_uname_release && *qemu_uname_release) {
10130                     g_strlcpy(buf->release, qemu_uname_release,
10131                               sizeof(buf->release));
10132                 }
10133             }
10134             unlock_user_struct(buf, arg1, 1);
10135         }
10136         return ret;
10137 #ifdef TARGET_I386
10138     case TARGET_NR_modify_ldt:
10139         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10140 #if !defined(TARGET_X86_64)
10141     case TARGET_NR_vm86:
10142         return do_vm86(cpu_env, arg1, arg2);
10143 #endif
10144 #endif
10145 #if defined(TARGET_NR_adjtimex)
10146     case TARGET_NR_adjtimex:
10147         {
10148             struct timex host_buf;
10149 
10150             if (target_to_host_timex(&host_buf, arg1) != 0) {
10151                 return -TARGET_EFAULT;
10152             }
10153             ret = get_errno(adjtimex(&host_buf));
10154             if (!is_error(ret)) {
10155                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10156                     return -TARGET_EFAULT;
10157                 }
10158             }
10159         }
10160         return ret;
10161 #endif
10162 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10163     case TARGET_NR_clock_adjtime:
10164         {
10165             struct timex htx, *phtx = &htx;
10166 
10167             if (target_to_host_timex(phtx, arg2) != 0) {
10168                 return -TARGET_EFAULT;
10169             }
10170             ret = get_errno(clock_adjtime(arg1, phtx));
10171             if (!is_error(ret) && phtx) {
10172                 if (host_to_target_timex(arg2, phtx) != 0) {
10173                     return -TARGET_EFAULT;
10174                 }
10175             }
10176         }
10177         return ret;
10178 #endif
10179 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10180     case TARGET_NR_clock_adjtime64:
10181         {
10182             struct timex htx;
10183 
10184             if (target_to_host_timex64(&htx, arg2) != 0) {
10185                 return -TARGET_EFAULT;
10186             }
10187             ret = get_errno(clock_adjtime(arg1, &htx));
10188             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10189                     return -TARGET_EFAULT;
10190             }
10191         }
10192         return ret;
10193 #endif
10194     case TARGET_NR_getpgid:
10195         return get_errno(getpgid(arg1));
10196     case TARGET_NR_fchdir:
10197         return get_errno(fchdir(arg1));
10198     case TARGET_NR_personality:
10199         return get_errno(personality(arg1));
10200 #ifdef TARGET_NR__llseek /* Not on alpha */
10201     case TARGET_NR__llseek:
10202         {
10203             int64_t res;
10204 #if !defined(__NR_llseek)
10205             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10206             if (res == -1) {
10207                 ret = get_errno(res);
10208             } else {
10209                 ret = 0;
10210             }
10211 #else
10212             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10213 #endif
10214             if ((ret == 0) && put_user_s64(res, arg4)) {
10215                 return -TARGET_EFAULT;
10216             }
10217         }
10218         return ret;
10219 #endif
10220 #ifdef TARGET_NR_getdents
10221     case TARGET_NR_getdents:
10222 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10223 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10224         {
10225             struct target_dirent *target_dirp;
10226             struct linux_dirent *dirp;
10227             abi_long count = arg3;
10228 
10229             dirp = g_try_malloc(count);
10230             if (!dirp) {
10231                 return -TARGET_ENOMEM;
10232             }
10233 
10234             ret = get_errno(sys_getdents(arg1, dirp, count));
10235             if (!is_error(ret)) {
10236                 struct linux_dirent *de;
10237 		struct target_dirent *tde;
10238                 int len = ret;
10239                 int reclen, treclen;
10240 		int count1, tnamelen;
10241 
10242 		count1 = 0;
10243                 de = dirp;
10244                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10245                     return -TARGET_EFAULT;
10246 		tde = target_dirp;
10247                 while (len > 0) {
10248                     reclen = de->d_reclen;
10249                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10250                     assert(tnamelen >= 0);
10251                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10252                     assert(count1 + treclen <= count);
10253                     tde->d_reclen = tswap16(treclen);
10254                     tde->d_ino = tswapal(de->d_ino);
10255                     tde->d_off = tswapal(de->d_off);
10256                     memcpy(tde->d_name, de->d_name, tnamelen);
10257                     de = (struct linux_dirent *)((char *)de + reclen);
10258                     len -= reclen;
10259                     tde = (struct target_dirent *)((char *)tde + treclen);
10260 		    count1 += treclen;
10261                 }
10262 		ret = count1;
10263                 unlock_user(target_dirp, arg2, ret);
10264             }
10265             g_free(dirp);
10266         }
10267 #else
10268         {
10269             struct linux_dirent *dirp;
10270             abi_long count = arg3;
10271 
10272             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10273                 return -TARGET_EFAULT;
10274             ret = get_errno(sys_getdents(arg1, dirp, count));
10275             if (!is_error(ret)) {
10276                 struct linux_dirent *de;
10277                 int len = ret;
10278                 int reclen;
10279                 de = dirp;
10280                 while (len > 0) {
10281                     reclen = de->d_reclen;
10282                     if (reclen > len)
10283                         break;
10284                     de->d_reclen = tswap16(reclen);
10285                     tswapls(&de->d_ino);
10286                     tswapls(&de->d_off);
10287                     de = (struct linux_dirent *)((char *)de + reclen);
10288                     len -= reclen;
10289                 }
10290             }
10291             unlock_user(dirp, arg2, ret);
10292         }
10293 #endif
10294 #else
10295         /* Implement getdents in terms of getdents64 */
10296         {
10297             struct linux_dirent64 *dirp;
10298             abi_long count = arg3;
10299 
10300             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10301             if (!dirp) {
10302                 return -TARGET_EFAULT;
10303             }
10304             ret = get_errno(sys_getdents64(arg1, dirp, count));
10305             if (!is_error(ret)) {
10306                 /* Convert the dirent64 structs to target dirent.  We do this
10307                  * in-place, since we can guarantee that a target_dirent is no
10308                  * larger than a dirent64; however this means we have to be
10309                  * careful to read everything before writing in the new format.
10310                  */
10311                 struct linux_dirent64 *de;
10312                 struct target_dirent *tde;
10313                 int len = ret;
10314                 int tlen = 0;
10315 
10316                 de = dirp;
10317                 tde = (struct target_dirent *)dirp;
10318                 while (len > 0) {
10319                     int namelen, treclen;
10320                     int reclen = de->d_reclen;
10321                     uint64_t ino = de->d_ino;
10322                     int64_t off = de->d_off;
10323                     uint8_t type = de->d_type;
10324 
10325                     namelen = strlen(de->d_name);
10326                     treclen = offsetof(struct target_dirent, d_name)
10327                         + namelen + 2;
10328                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10329 
10330                     memmove(tde->d_name, de->d_name, namelen + 1);
10331                     tde->d_ino = tswapal(ino);
10332                     tde->d_off = tswapal(off);
10333                     tde->d_reclen = tswap16(treclen);
10334                     /* The target_dirent type is in what was formerly a padding
10335                      * byte at the end of the structure:
10336                      */
10337                     *(((char *)tde) + treclen - 1) = type;
10338 
10339                     de = (struct linux_dirent64 *)((char *)de + reclen);
10340                     tde = (struct target_dirent *)((char *)tde + treclen);
10341                     len -= reclen;
10342                     tlen += treclen;
10343                 }
10344                 ret = tlen;
10345             }
10346             unlock_user(dirp, arg2, ret);
10347         }
10348 #endif
10349         return ret;
10350 #endif /* TARGET_NR_getdents */
10351 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10352     case TARGET_NR_getdents64:
10353         {
10354             struct linux_dirent64 *dirp;
10355             abi_long count = arg3;
10356             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10357                 return -TARGET_EFAULT;
10358             ret = get_errno(sys_getdents64(arg1, dirp, count));
10359             if (!is_error(ret)) {
10360                 struct linux_dirent64 *de;
10361                 int len = ret;
10362                 int reclen;
10363                 de = dirp;
10364                 while (len > 0) {
10365                     reclen = de->d_reclen;
10366                     if (reclen > len)
10367                         break;
10368                     de->d_reclen = tswap16(reclen);
10369                     tswap64s((uint64_t *)&de->d_ino);
10370                     tswap64s((uint64_t *)&de->d_off);
10371                     de = (struct linux_dirent64 *)((char *)de + reclen);
10372                     len -= reclen;
10373                 }
10374             }
10375             unlock_user(dirp, arg2, ret);
10376         }
10377         return ret;
10378 #endif /* TARGET_NR_getdents64 */
10379 #if defined(TARGET_NR__newselect)
10380     case TARGET_NR__newselect:
10381         return do_select(arg1, arg2, arg3, arg4, arg5);
10382 #endif
10383 #ifdef TARGET_NR_poll
10384     case TARGET_NR_poll:
10385         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10386 #endif
10387 #ifdef TARGET_NR_ppoll
10388     case TARGET_NR_ppoll:
10389         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10390 #endif
10391 #ifdef TARGET_NR_ppoll_time64
10392     case TARGET_NR_ppoll_time64:
10393         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10394 #endif
10395     case TARGET_NR_flock:
10396         /* NOTE: the flock constant seems to be the same for every
10397            Linux platform */
10398         return get_errno(safe_flock(arg1, arg2));
10399     case TARGET_NR_readv:
10400         {
10401             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10402             if (vec != NULL) {
10403                 ret = get_errno(safe_readv(arg1, vec, arg3));
10404                 unlock_iovec(vec, arg2, arg3, 1);
10405             } else {
10406                 ret = -host_to_target_errno(errno);
10407             }
10408         }
10409         return ret;
10410     case TARGET_NR_writev:
10411         {
10412             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10413             if (vec != NULL) {
10414                 ret = get_errno(safe_writev(arg1, vec, arg3));
10415                 unlock_iovec(vec, arg2, arg3, 0);
10416             } else {
10417                 ret = -host_to_target_errno(errno);
10418             }
10419         }
10420         return ret;
10421 #if defined(TARGET_NR_preadv)
10422     case TARGET_NR_preadv:
10423         {
10424             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10425             if (vec != NULL) {
10426                 unsigned long low, high;
10427 
10428                 target_to_host_low_high(arg4, arg5, &low, &high);
10429                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10430                 unlock_iovec(vec, arg2, arg3, 1);
10431             } else {
10432                 ret = -host_to_target_errno(errno);
10433            }
10434         }
10435         return ret;
10436 #endif
10437 #if defined(TARGET_NR_pwritev)
10438     case TARGET_NR_pwritev:
10439         {
10440             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10441             if (vec != NULL) {
10442                 unsigned long low, high;
10443 
10444                 target_to_host_low_high(arg4, arg5, &low, &high);
10445                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10446                 unlock_iovec(vec, arg2, arg3, 0);
10447             } else {
10448                 ret = -host_to_target_errno(errno);
10449            }
10450         }
10451         return ret;
10452 #endif
10453     case TARGET_NR_getsid:
10454         return get_errno(getsid(arg1));
10455 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10456     case TARGET_NR_fdatasync:
10457         return get_errno(fdatasync(arg1));
10458 #endif
10459     case TARGET_NR_sched_getaffinity:
10460         {
10461             unsigned int mask_size;
10462             unsigned long *mask;
10463 
10464             /*
10465              * sched_getaffinity needs multiples of ulong, so need to take
10466              * care of mismatches between target ulong and host ulong sizes.
10467              */
10468             if (arg2 & (sizeof(abi_ulong) - 1)) {
10469                 return -TARGET_EINVAL;
10470             }
10471             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10472 
10473             mask = alloca(mask_size);
10474             memset(mask, 0, mask_size);
10475             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10476 
10477             if (!is_error(ret)) {
10478                 if (ret > arg2) {
10479                     /* More data returned than the caller's buffer will fit.
10480                      * This only happens if sizeof(abi_long) < sizeof(long)
10481                      * and the caller passed us a buffer holding an odd number
10482                      * of abi_longs. If the host kernel is actually using the
10483                      * extra 4 bytes then fail EINVAL; otherwise we can just
10484                      * ignore them and only copy the interesting part.
10485                      */
10486                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10487                     if (numcpus > arg2 * 8) {
10488                         return -TARGET_EINVAL;
10489                     }
10490                     ret = arg2;
10491                 }
10492 
10493                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10494                     return -TARGET_EFAULT;
10495                 }
10496             }
10497         }
10498         return ret;
10499     case TARGET_NR_sched_setaffinity:
10500         {
10501             unsigned int mask_size;
10502             unsigned long *mask;
10503 
10504             /*
10505              * sched_setaffinity needs multiples of ulong, so need to take
10506              * care of mismatches between target ulong and host ulong sizes.
10507              */
10508             if (arg2 & (sizeof(abi_ulong) - 1)) {
10509                 return -TARGET_EINVAL;
10510             }
10511             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10512             mask = alloca(mask_size);
10513 
10514             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10515             if (ret) {
10516                 return ret;
10517             }
10518 
10519             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10520         }
10521     case TARGET_NR_getcpu:
10522         {
10523             unsigned cpu, node;
10524             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10525                                        arg2 ? &node : NULL,
10526                                        NULL));
10527             if (is_error(ret)) {
10528                 return ret;
10529             }
10530             if (arg1 && put_user_u32(cpu, arg1)) {
10531                 return -TARGET_EFAULT;
10532             }
10533             if (arg2 && put_user_u32(node, arg2)) {
10534                 return -TARGET_EFAULT;
10535             }
10536         }
10537         return ret;
10538     case TARGET_NR_sched_setparam:
10539         {
10540             struct sched_param *target_schp;
10541             struct sched_param schp;
10542 
10543             if (arg2 == 0) {
10544                 return -TARGET_EINVAL;
10545             }
10546             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10547                 return -TARGET_EFAULT;
10548             schp.sched_priority = tswap32(target_schp->sched_priority);
10549             unlock_user_struct(target_schp, arg2, 0);
10550             return get_errno(sched_setparam(arg1, &schp));
10551         }
10552     case TARGET_NR_sched_getparam:
10553         {
10554             struct sched_param *target_schp;
10555             struct sched_param schp;
10556 
10557             if (arg2 == 0) {
10558                 return -TARGET_EINVAL;
10559             }
10560             ret = get_errno(sched_getparam(arg1, &schp));
10561             if (!is_error(ret)) {
10562                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10563                     return -TARGET_EFAULT;
10564                 target_schp->sched_priority = tswap32(schp.sched_priority);
10565                 unlock_user_struct(target_schp, arg2, 1);
10566             }
10567         }
10568         return ret;
10569     case TARGET_NR_sched_setscheduler:
10570         {
10571             struct sched_param *target_schp;
10572             struct sched_param schp;
10573             if (arg3 == 0) {
10574                 return -TARGET_EINVAL;
10575             }
10576             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10577                 return -TARGET_EFAULT;
10578             schp.sched_priority = tswap32(target_schp->sched_priority);
10579             unlock_user_struct(target_schp, arg3, 0);
10580             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10581         }
10582     case TARGET_NR_sched_getscheduler:
10583         return get_errno(sched_getscheduler(arg1));
10584     case TARGET_NR_sched_yield:
10585         return get_errno(sched_yield());
10586     case TARGET_NR_sched_get_priority_max:
10587         return get_errno(sched_get_priority_max(arg1));
10588     case TARGET_NR_sched_get_priority_min:
10589         return get_errno(sched_get_priority_min(arg1));
10590 #ifdef TARGET_NR_sched_rr_get_interval
10591     case TARGET_NR_sched_rr_get_interval:
10592         {
10593             struct timespec ts;
10594             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10595             if (!is_error(ret)) {
10596                 ret = host_to_target_timespec(arg2, &ts);
10597             }
10598         }
10599         return ret;
10600 #endif
10601 #ifdef TARGET_NR_sched_rr_get_interval_time64
10602     case TARGET_NR_sched_rr_get_interval_time64:
10603         {
10604             struct timespec ts;
10605             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10606             if (!is_error(ret)) {
10607                 ret = host_to_target_timespec64(arg2, &ts);
10608             }
10609         }
10610         return ret;
10611 #endif
10612 #if defined(TARGET_NR_nanosleep)
10613     case TARGET_NR_nanosleep:
10614         {
10615             struct timespec req, rem;
10616             target_to_host_timespec(&req, arg1);
10617             ret = get_errno(safe_nanosleep(&req, &rem));
10618             if (is_error(ret) && arg2) {
10619                 host_to_target_timespec(arg2, &rem);
10620             }
10621         }
10622         return ret;
10623 #endif
10624     case TARGET_NR_prctl:
10625         switch (arg1) {
10626         case PR_GET_PDEATHSIG:
10627         {
10628             int deathsig;
10629             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10630             if (!is_error(ret) && arg2
10631                 && put_user_s32(deathsig, arg2)) {
10632                 return -TARGET_EFAULT;
10633             }
10634             return ret;
10635         }
10636 #ifdef PR_GET_NAME
10637         case PR_GET_NAME:
10638         {
10639             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10640             if (!name) {
10641                 return -TARGET_EFAULT;
10642             }
10643             ret = get_errno(prctl(arg1, (unsigned long)name,
10644                                   arg3, arg4, arg5));
10645             unlock_user(name, arg2, 16);
10646             return ret;
10647         }
10648         case PR_SET_NAME:
10649         {
10650             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10651             if (!name) {
10652                 return -TARGET_EFAULT;
10653             }
10654             ret = get_errno(prctl(arg1, (unsigned long)name,
10655                                   arg3, arg4, arg5));
10656             unlock_user(name, arg2, 0);
10657             return ret;
10658         }
10659 #endif
10660 #ifdef TARGET_MIPS
10661         case TARGET_PR_GET_FP_MODE:
10662         {
10663             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10664             ret = 0;
10665             if (env->CP0_Status & (1 << CP0St_FR)) {
10666                 ret |= TARGET_PR_FP_MODE_FR;
10667             }
10668             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10669                 ret |= TARGET_PR_FP_MODE_FRE;
10670             }
10671             return ret;
10672         }
10673         case TARGET_PR_SET_FP_MODE:
10674         {
10675             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10676             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10677             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10678             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10679             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10680 
10681             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10682                                             TARGET_PR_FP_MODE_FRE;
10683 
10684             /* If nothing to change, return right away, successfully.  */
10685             if (old_fr == new_fr && old_fre == new_fre) {
10686                 return 0;
10687             }
10688             /* Check the value is valid */
10689             if (arg2 & ~known_bits) {
10690                 return -TARGET_EOPNOTSUPP;
10691             }
10692             /* Setting FRE without FR is not supported.  */
10693             if (new_fre && !new_fr) {
10694                 return -TARGET_EOPNOTSUPP;
10695             }
10696             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10697                 /* FR1 is not supported */
10698                 return -TARGET_EOPNOTSUPP;
10699             }
10700             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10701                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10702                 /* cannot set FR=0 */
10703                 return -TARGET_EOPNOTSUPP;
10704             }
10705             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10706                 /* Cannot set FRE=1 */
10707                 return -TARGET_EOPNOTSUPP;
10708             }
10709 
10710             int i;
10711             fpr_t *fpr = env->active_fpu.fpr;
10712             for (i = 0; i < 32 ; i += 2) {
10713                 if (!old_fr && new_fr) {
10714                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10715                 } else if (old_fr && !new_fr) {
10716                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10717                 }
10718             }
10719 
10720             if (new_fr) {
10721                 env->CP0_Status |= (1 << CP0St_FR);
10722                 env->hflags |= MIPS_HFLAG_F64;
10723             } else {
10724                 env->CP0_Status &= ~(1 << CP0St_FR);
10725                 env->hflags &= ~MIPS_HFLAG_F64;
10726             }
10727             if (new_fre) {
10728                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10729                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10730                     env->hflags |= MIPS_HFLAG_FRE;
10731                 }
10732             } else {
10733                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10734                 env->hflags &= ~MIPS_HFLAG_FRE;
10735             }
10736 
10737             return 0;
10738         }
10739 #endif /* MIPS */
10740 #ifdef TARGET_AARCH64
10741         case TARGET_PR_SVE_SET_VL:
10742             /*
10743              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10744              * PR_SVE_VL_INHERIT.  Note the kernel definition
10745              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10746              * even though the current architectural maximum is VQ=16.
10747              */
10748             ret = -TARGET_EINVAL;
10749             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10750                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10751                 CPUARMState *env = cpu_env;
10752                 ARMCPU *cpu = env_archcpu(env);
10753                 uint32_t vq, old_vq;
10754 
10755                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10756                 vq = MAX(arg2 / 16, 1);
10757                 vq = MIN(vq, cpu->sve_max_vq);
10758 
10759                 if (vq < old_vq) {
10760                     aarch64_sve_narrow_vq(env, vq);
10761                 }
10762                 env->vfp.zcr_el[1] = vq - 1;
10763                 arm_rebuild_hflags(env);
10764                 ret = vq * 16;
10765             }
10766             return ret;
10767         case TARGET_PR_SVE_GET_VL:
10768             ret = -TARGET_EINVAL;
10769             {
10770                 ARMCPU *cpu = env_archcpu(cpu_env);
10771                 if (cpu_isar_feature(aa64_sve, cpu)) {
10772                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10773                 }
10774             }
10775             return ret;
10776         case TARGET_PR_PAC_RESET_KEYS:
10777             {
10778                 CPUARMState *env = cpu_env;
10779                 ARMCPU *cpu = env_archcpu(env);
10780 
10781                 if (arg3 || arg4 || arg5) {
10782                     return -TARGET_EINVAL;
10783                 }
10784                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10785                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10786                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10787                                TARGET_PR_PAC_APGAKEY);
10788                     int ret = 0;
10789                     Error *err = NULL;
10790 
10791                     if (arg2 == 0) {
10792                         arg2 = all;
10793                     } else if (arg2 & ~all) {
10794                         return -TARGET_EINVAL;
10795                     }
10796                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10797                         ret |= qemu_guest_getrandom(&env->keys.apia,
10798                                                     sizeof(ARMPACKey), &err);
10799                     }
10800                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10801                         ret |= qemu_guest_getrandom(&env->keys.apib,
10802                                                     sizeof(ARMPACKey), &err);
10803                     }
10804                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10805                         ret |= qemu_guest_getrandom(&env->keys.apda,
10806                                                     sizeof(ARMPACKey), &err);
10807                     }
10808                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10809                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10810                                                     sizeof(ARMPACKey), &err);
10811                     }
10812                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10813                         ret |= qemu_guest_getrandom(&env->keys.apga,
10814                                                     sizeof(ARMPACKey), &err);
10815                     }
10816                     if (ret != 0) {
10817                         /*
10818                          * Some unknown failure in the crypto.  The best
10819                          * we can do is log it and fail the syscall.
10820                          * The real syscall cannot fail this way.
10821                          */
10822                         qemu_log_mask(LOG_UNIMP,
10823                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10824                                       error_get_pretty(err));
10825                         error_free(err);
10826                         return -TARGET_EIO;
10827                     }
10828                     return 0;
10829                 }
10830             }
10831             return -TARGET_EINVAL;
10832         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10833             {
10834                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10835                 CPUARMState *env = cpu_env;
10836                 ARMCPU *cpu = env_archcpu(env);
10837 
10838                 if (cpu_isar_feature(aa64_mte, cpu)) {
10839                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10840                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10841                 }
10842 
10843                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10844                     return -TARGET_EINVAL;
10845                 }
10846                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10847 
10848                 if (cpu_isar_feature(aa64_mte, cpu)) {
10849                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10850                     case TARGET_PR_MTE_TCF_NONE:
10851                     case TARGET_PR_MTE_TCF_SYNC:
10852                     case TARGET_PR_MTE_TCF_ASYNC:
10853                         break;
10854                     default:
10855                         return -EINVAL;
10856                     }
10857 
10858                     /*
10859                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10860                      * Note that the syscall values are consistent with hw.
10861                      */
10862                     env->cp15.sctlr_el[1] =
10863                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10864                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10865 
10866                     /*
10867                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10868                      * Note that the syscall uses an include mask,
10869                      * and hardware uses an exclude mask -- invert.
10870                      */
10871                     env->cp15.gcr_el1 =
10872                         deposit64(env->cp15.gcr_el1, 0, 16,
10873                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10874                     arm_rebuild_hflags(env);
10875                 }
10876                 return 0;
10877             }
10878         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10879             {
10880                 abi_long ret = 0;
10881                 CPUARMState *env = cpu_env;
10882                 ARMCPU *cpu = env_archcpu(env);
10883 
10884                 if (arg2 || arg3 || arg4 || arg5) {
10885                     return -TARGET_EINVAL;
10886                 }
10887                 if (env->tagged_addr_enable) {
10888                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10889                 }
10890                 if (cpu_isar_feature(aa64_mte, cpu)) {
10891                     /* See above. */
10892                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10893                             << TARGET_PR_MTE_TCF_SHIFT);
10894                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10895                                     ~env->cp15.gcr_el1);
10896                 }
10897                 return ret;
10898             }
10899 #endif /* AARCH64 */
10900         case PR_GET_SECCOMP:
10901         case PR_SET_SECCOMP:
10902             /* Disable seccomp to prevent the target disabling syscalls we
10903              * need. */
10904             return -TARGET_EINVAL;
10905         default:
10906             /* Most prctl options have no pointer arguments */
10907             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10908         }
10909         break;
10910 #ifdef TARGET_NR_arch_prctl
10911     case TARGET_NR_arch_prctl:
10912         return do_arch_prctl(cpu_env, arg1, arg2);
10913 #endif
10914 #ifdef TARGET_NR_pread64
10915     case TARGET_NR_pread64:
10916         if (regpairs_aligned(cpu_env, num)) {
10917             arg4 = arg5;
10918             arg5 = arg6;
10919         }
10920         if (arg2 == 0 && arg3 == 0) {
10921             /* Special-case NULL buffer and zero length, which should succeed */
10922             p = 0;
10923         } else {
10924             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10925             if (!p) {
10926                 return -TARGET_EFAULT;
10927             }
10928         }
10929         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10930         unlock_user(p, arg2, ret);
10931         return ret;
10932     case TARGET_NR_pwrite64:
10933         if (regpairs_aligned(cpu_env, num)) {
10934             arg4 = arg5;
10935             arg5 = arg6;
10936         }
10937         if (arg2 == 0 && arg3 == 0) {
10938             /* Special-case NULL buffer and zero length, which should succeed */
10939             p = 0;
10940         } else {
10941             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10942             if (!p) {
10943                 return -TARGET_EFAULT;
10944             }
10945         }
10946         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10947         unlock_user(p, arg2, 0);
10948         return ret;
10949 #endif
10950     case TARGET_NR_getcwd:
10951         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10952             return -TARGET_EFAULT;
10953         ret = get_errno(sys_getcwd1(p, arg2));
10954         unlock_user(p, arg1, ret);
10955         return ret;
10956     case TARGET_NR_capget:
10957     case TARGET_NR_capset:
10958     {
10959         struct target_user_cap_header *target_header;
10960         struct target_user_cap_data *target_data = NULL;
10961         struct __user_cap_header_struct header;
10962         struct __user_cap_data_struct data[2];
10963         struct __user_cap_data_struct *dataptr = NULL;
10964         int i, target_datalen;
10965         int data_items = 1;
10966 
10967         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10968             return -TARGET_EFAULT;
10969         }
10970         header.version = tswap32(target_header->version);
10971         header.pid = tswap32(target_header->pid);
10972 
10973         if (header.version != _LINUX_CAPABILITY_VERSION) {
10974             /* Version 2 and up takes pointer to two user_data structs */
10975             data_items = 2;
10976         }
10977 
10978         target_datalen = sizeof(*target_data) * data_items;
10979 
10980         if (arg2) {
10981             if (num == TARGET_NR_capget) {
10982                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10983             } else {
10984                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10985             }
10986             if (!target_data) {
10987                 unlock_user_struct(target_header, arg1, 0);
10988                 return -TARGET_EFAULT;
10989             }
10990 
10991             if (num == TARGET_NR_capset) {
10992                 for (i = 0; i < data_items; i++) {
10993                     data[i].effective = tswap32(target_data[i].effective);
10994                     data[i].permitted = tswap32(target_data[i].permitted);
10995                     data[i].inheritable = tswap32(target_data[i].inheritable);
10996                 }
10997             }
10998 
10999             dataptr = data;
11000         }
11001 
11002         if (num == TARGET_NR_capget) {
11003             ret = get_errno(capget(&header, dataptr));
11004         } else {
11005             ret = get_errno(capset(&header, dataptr));
11006         }
11007 
11008         /* The kernel always updates version for both capget and capset */
11009         target_header->version = tswap32(header.version);
11010         unlock_user_struct(target_header, arg1, 1);
11011 
11012         if (arg2) {
11013             if (num == TARGET_NR_capget) {
11014                 for (i = 0; i < data_items; i++) {
11015                     target_data[i].effective = tswap32(data[i].effective);
11016                     target_data[i].permitted = tswap32(data[i].permitted);
11017                     target_data[i].inheritable = tswap32(data[i].inheritable);
11018                 }
11019                 unlock_user(target_data, arg2, target_datalen);
11020             } else {
11021                 unlock_user(target_data, arg2, 0);
11022             }
11023         }
11024         return ret;
11025     }
11026     case TARGET_NR_sigaltstack:
11027         return do_sigaltstack(arg1, arg2, cpu_env);
11028 
11029 #ifdef CONFIG_SENDFILE
11030 #ifdef TARGET_NR_sendfile
11031     case TARGET_NR_sendfile:
11032     {
11033         off_t *offp = NULL;
11034         off_t off;
11035         if (arg3) {
11036             ret = get_user_sal(off, arg3);
11037             if (is_error(ret)) {
11038                 return ret;
11039             }
11040             offp = &off;
11041         }
11042         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11043         if (!is_error(ret) && arg3) {
11044             abi_long ret2 = put_user_sal(off, arg3);
11045             if (is_error(ret2)) {
11046                 ret = ret2;
11047             }
11048         }
11049         return ret;
11050     }
11051 #endif
11052 #ifdef TARGET_NR_sendfile64
11053     case TARGET_NR_sendfile64:
11054     {
11055         off_t *offp = NULL;
11056         off_t off;
11057         if (arg3) {
11058             ret = get_user_s64(off, arg3);
11059             if (is_error(ret)) {
11060                 return ret;
11061             }
11062             offp = &off;
11063         }
11064         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11065         if (!is_error(ret) && arg3) {
11066             abi_long ret2 = put_user_s64(off, arg3);
11067             if (is_error(ret2)) {
11068                 ret = ret2;
11069             }
11070         }
11071         return ret;
11072     }
11073 #endif
11074 #endif
11075 #ifdef TARGET_NR_vfork
11076     case TARGET_NR_vfork:
11077         return get_errno(do_fork(cpu_env,
11078                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11079                          0, 0, 0, 0));
11080 #endif
11081 #ifdef TARGET_NR_ugetrlimit
11082     case TARGET_NR_ugetrlimit:
11083     {
11084 	struct rlimit rlim;
11085 	int resource = target_to_host_resource(arg1);
11086 	ret = get_errno(getrlimit(resource, &rlim));
11087 	if (!is_error(ret)) {
11088 	    struct target_rlimit *target_rlim;
11089             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11090                 return -TARGET_EFAULT;
11091 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11092 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11093             unlock_user_struct(target_rlim, arg2, 1);
11094 	}
11095         return ret;
11096     }
11097 #endif
11098 #ifdef TARGET_NR_truncate64
11099     case TARGET_NR_truncate64:
11100         if (!(p = lock_user_string(arg1)))
11101             return -TARGET_EFAULT;
11102 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11103         unlock_user(p, arg1, 0);
11104         return ret;
11105 #endif
11106 #ifdef TARGET_NR_ftruncate64
11107     case TARGET_NR_ftruncate64:
11108         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11109 #endif
11110 #ifdef TARGET_NR_stat64
11111     case TARGET_NR_stat64:
11112         if (!(p = lock_user_string(arg1))) {
11113             return -TARGET_EFAULT;
11114         }
11115         ret = get_errno(stat(path(p), &st));
11116         unlock_user(p, arg1, 0);
11117         if (!is_error(ret))
11118             ret = host_to_target_stat64(cpu_env, arg2, &st);
11119         return ret;
11120 #endif
11121 #ifdef TARGET_NR_lstat64
11122     case TARGET_NR_lstat64:
11123         if (!(p = lock_user_string(arg1))) {
11124             return -TARGET_EFAULT;
11125         }
11126         ret = get_errno(lstat(path(p), &st));
11127         unlock_user(p, arg1, 0);
11128         if (!is_error(ret))
11129             ret = host_to_target_stat64(cpu_env, arg2, &st);
11130         return ret;
11131 #endif
11132 #ifdef TARGET_NR_fstat64
11133     case TARGET_NR_fstat64:
11134         ret = get_errno(fstat(arg1, &st));
11135         if (!is_error(ret))
11136             ret = host_to_target_stat64(cpu_env, arg2, &st);
11137         return ret;
11138 #endif
11139 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11140 #ifdef TARGET_NR_fstatat64
11141     case TARGET_NR_fstatat64:
11142 #endif
11143 #ifdef TARGET_NR_newfstatat
11144     case TARGET_NR_newfstatat:
11145 #endif
11146         if (!(p = lock_user_string(arg2))) {
11147             return -TARGET_EFAULT;
11148         }
11149         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11150         unlock_user(p, arg2, 0);
11151         if (!is_error(ret))
11152             ret = host_to_target_stat64(cpu_env, arg3, &st);
11153         return ret;
11154 #endif
11155 #if defined(TARGET_NR_statx)
11156     case TARGET_NR_statx:
11157         {
11158             struct target_statx *target_stx;
11159             int dirfd = arg1;
11160             int flags = arg3;
11161 
11162             p = lock_user_string(arg2);
11163             if (p == NULL) {
11164                 return -TARGET_EFAULT;
11165             }
11166 #if defined(__NR_statx)
11167             {
11168                 /*
11169                  * It is assumed that struct statx is architecture independent.
11170                  */
11171                 struct target_statx host_stx;
11172                 int mask = arg4;
11173 
11174                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11175                 if (!is_error(ret)) {
11176                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11177                         unlock_user(p, arg2, 0);
11178                         return -TARGET_EFAULT;
11179                     }
11180                 }
11181 
11182                 if (ret != -TARGET_ENOSYS) {
11183                     unlock_user(p, arg2, 0);
11184                     return ret;
11185                 }
11186             }
11187 #endif
11188             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11189             unlock_user(p, arg2, 0);
11190 
11191             if (!is_error(ret)) {
11192                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11193                     return -TARGET_EFAULT;
11194                 }
11195                 memset(target_stx, 0, sizeof(*target_stx));
11196                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11197                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11198                 __put_user(st.st_ino, &target_stx->stx_ino);
11199                 __put_user(st.st_mode, &target_stx->stx_mode);
11200                 __put_user(st.st_uid, &target_stx->stx_uid);
11201                 __put_user(st.st_gid, &target_stx->stx_gid);
11202                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11203                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11204                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11205                 __put_user(st.st_size, &target_stx->stx_size);
11206                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11207                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11208                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11209                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11210                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11211                 unlock_user_struct(target_stx, arg5, 1);
11212             }
11213         }
11214         return ret;
11215 #endif
11216 #ifdef TARGET_NR_lchown
11217     case TARGET_NR_lchown:
11218         if (!(p = lock_user_string(arg1)))
11219             return -TARGET_EFAULT;
11220         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11221         unlock_user(p, arg1, 0);
11222         return ret;
11223 #endif
11224 #ifdef TARGET_NR_getuid
11225     case TARGET_NR_getuid:
11226         return get_errno(high2lowuid(getuid()));
11227 #endif
11228 #ifdef TARGET_NR_getgid
11229     case TARGET_NR_getgid:
11230         return get_errno(high2lowgid(getgid()));
11231 #endif
11232 #ifdef TARGET_NR_geteuid
11233     case TARGET_NR_geteuid:
11234         return get_errno(high2lowuid(geteuid()));
11235 #endif
11236 #ifdef TARGET_NR_getegid
11237     case TARGET_NR_getegid:
11238         return get_errno(high2lowgid(getegid()));
11239 #endif
11240     case TARGET_NR_setreuid:
11241         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11242     case TARGET_NR_setregid:
11243         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11244     case TARGET_NR_getgroups:
11245         {
11246             int gidsetsize = arg1;
11247             target_id *target_grouplist;
11248             gid_t *grouplist;
11249             int i;
11250 
11251             grouplist = alloca(gidsetsize * sizeof(gid_t));
11252             ret = get_errno(getgroups(gidsetsize, grouplist));
11253             if (gidsetsize == 0)
11254                 return ret;
11255             if (!is_error(ret)) {
11256                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11257                 if (!target_grouplist)
11258                     return -TARGET_EFAULT;
11259                 for(i = 0;i < ret; i++)
11260                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11261                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11262             }
11263         }
11264         return ret;
11265     case TARGET_NR_setgroups:
11266         {
11267             int gidsetsize = arg1;
11268             target_id *target_grouplist;
11269             gid_t *grouplist = NULL;
11270             int i;
11271             if (gidsetsize) {
11272                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11273                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11274                 if (!target_grouplist) {
11275                     return -TARGET_EFAULT;
11276                 }
11277                 for (i = 0; i < gidsetsize; i++) {
11278                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11279                 }
11280                 unlock_user(target_grouplist, arg2, 0);
11281             }
11282             return get_errno(setgroups(gidsetsize, grouplist));
11283         }
11284     case TARGET_NR_fchown:
11285         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11286 #if defined(TARGET_NR_fchownat)
11287     case TARGET_NR_fchownat:
11288         if (!(p = lock_user_string(arg2)))
11289             return -TARGET_EFAULT;
11290         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11291                                  low2highgid(arg4), arg5));
11292         unlock_user(p, arg2, 0);
11293         return ret;
11294 #endif
11295 #ifdef TARGET_NR_setresuid
11296     case TARGET_NR_setresuid:
11297         return get_errno(sys_setresuid(low2highuid(arg1),
11298                                        low2highuid(arg2),
11299                                        low2highuid(arg3)));
11300 #endif
11301 #ifdef TARGET_NR_getresuid
11302     case TARGET_NR_getresuid:
11303         {
11304             uid_t ruid, euid, suid;
11305             ret = get_errno(getresuid(&ruid, &euid, &suid));
11306             if (!is_error(ret)) {
11307                 if (put_user_id(high2lowuid(ruid), arg1)
11308                     || put_user_id(high2lowuid(euid), arg2)
11309                     || put_user_id(high2lowuid(suid), arg3))
11310                     return -TARGET_EFAULT;
11311             }
11312         }
11313         return ret;
11314 #endif
11315 #ifdef TARGET_NR_getresgid
11316     case TARGET_NR_setresgid:
11317         return get_errno(sys_setresgid(low2highgid(arg1),
11318                                        low2highgid(arg2),
11319                                        low2highgid(arg3)));
11320 #endif
11321 #ifdef TARGET_NR_getresgid
11322     case TARGET_NR_getresgid:
11323         {
11324             gid_t rgid, egid, sgid;
11325             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11326             if (!is_error(ret)) {
11327                 if (put_user_id(high2lowgid(rgid), arg1)
11328                     || put_user_id(high2lowgid(egid), arg2)
11329                     || put_user_id(high2lowgid(sgid), arg3))
11330                     return -TARGET_EFAULT;
11331             }
11332         }
11333         return ret;
11334 #endif
11335 #ifdef TARGET_NR_chown
11336     case TARGET_NR_chown:
11337         if (!(p = lock_user_string(arg1)))
11338             return -TARGET_EFAULT;
11339         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11340         unlock_user(p, arg1, 0);
11341         return ret;
11342 #endif
11343     case TARGET_NR_setuid:
11344         return get_errno(sys_setuid(low2highuid(arg1)));
11345     case TARGET_NR_setgid:
11346         return get_errno(sys_setgid(low2highgid(arg1)));
11347     case TARGET_NR_setfsuid:
11348         return get_errno(setfsuid(arg1));
11349     case TARGET_NR_setfsgid:
11350         return get_errno(setfsgid(arg1));
11351 
11352 #ifdef TARGET_NR_lchown32
11353     case TARGET_NR_lchown32:
11354         if (!(p = lock_user_string(arg1)))
11355             return -TARGET_EFAULT;
11356         ret = get_errno(lchown(p, arg2, arg3));
11357         unlock_user(p, arg1, 0);
11358         return ret;
11359 #endif
11360 #ifdef TARGET_NR_getuid32
11361     case TARGET_NR_getuid32:
11362         return get_errno(getuid());
11363 #endif
11364 
11365 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11366    /* Alpha specific */
11367     case TARGET_NR_getxuid:
11368          {
11369             uid_t euid;
11370             euid=geteuid();
11371             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11372          }
11373         return get_errno(getuid());
11374 #endif
11375 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11376    /* Alpha specific */
11377     case TARGET_NR_getxgid:
11378          {
11379             uid_t egid;
11380             egid=getegid();
11381             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11382          }
11383         return get_errno(getgid());
11384 #endif
11385 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11386     /* Alpha specific */
11387     case TARGET_NR_osf_getsysinfo:
11388         ret = -TARGET_EOPNOTSUPP;
11389         switch (arg1) {
11390           case TARGET_GSI_IEEE_FP_CONTROL:
11391             {
11392                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11393                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11394 
11395                 swcr &= ~SWCR_STATUS_MASK;
11396                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11397 
11398                 if (put_user_u64 (swcr, arg2))
11399                         return -TARGET_EFAULT;
11400                 ret = 0;
11401             }
11402             break;
11403 
11404           /* case GSI_IEEE_STATE_AT_SIGNAL:
11405              -- Not implemented in linux kernel.
11406              case GSI_UACPROC:
11407              -- Retrieves current unaligned access state; not much used.
11408              case GSI_PROC_TYPE:
11409              -- Retrieves implver information; surely not used.
11410              case GSI_GET_HWRPB:
11411              -- Grabs a copy of the HWRPB; surely not used.
11412           */
11413         }
11414         return ret;
11415 #endif
11416 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11417     /* Alpha specific */
11418     case TARGET_NR_osf_setsysinfo:
11419         ret = -TARGET_EOPNOTSUPP;
11420         switch (arg1) {
11421           case TARGET_SSI_IEEE_FP_CONTROL:
11422             {
11423                 uint64_t swcr, fpcr;
11424 
11425                 if (get_user_u64 (swcr, arg2)) {
11426                     return -TARGET_EFAULT;
11427                 }
11428 
11429                 /*
11430                  * The kernel calls swcr_update_status to update the
11431                  * status bits from the fpcr at every point that it
11432                  * could be queried.  Therefore, we store the status
11433                  * bits only in FPCR.
11434                  */
11435                 ((CPUAlphaState *)cpu_env)->swcr
11436                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11437 
11438                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11439                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11440                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11441                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11442                 ret = 0;
11443             }
11444             break;
11445 
11446           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11447             {
11448                 uint64_t exc, fpcr, fex;
11449 
11450                 if (get_user_u64(exc, arg2)) {
11451                     return -TARGET_EFAULT;
11452                 }
11453                 exc &= SWCR_STATUS_MASK;
11454                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11455 
11456                 /* Old exceptions are not signaled.  */
11457                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11458                 fex = exc & ~fex;
11459                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11460                 fex &= ((CPUArchState *)cpu_env)->swcr;
11461 
11462                 /* Update the hardware fpcr.  */
11463                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11464                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11465 
11466                 if (fex) {
11467                     int si_code = TARGET_FPE_FLTUNK;
11468                     target_siginfo_t info;
11469 
11470                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11471                         si_code = TARGET_FPE_FLTUND;
11472                     }
11473                     if (fex & SWCR_TRAP_ENABLE_INE) {
11474                         si_code = TARGET_FPE_FLTRES;
11475                     }
11476                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11477                         si_code = TARGET_FPE_FLTUND;
11478                     }
11479                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11480                         si_code = TARGET_FPE_FLTOVF;
11481                     }
11482                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11483                         si_code = TARGET_FPE_FLTDIV;
11484                     }
11485                     if (fex & SWCR_TRAP_ENABLE_INV) {
11486                         si_code = TARGET_FPE_FLTINV;
11487                     }
11488 
11489                     info.si_signo = SIGFPE;
11490                     info.si_errno = 0;
11491                     info.si_code = si_code;
11492                     info._sifields._sigfault._addr
11493                         = ((CPUArchState *)cpu_env)->pc;
11494                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11495                                  QEMU_SI_FAULT, &info);
11496                 }
11497                 ret = 0;
11498             }
11499             break;
11500 
11501           /* case SSI_NVPAIRS:
11502              -- Used with SSIN_UACPROC to enable unaligned accesses.
11503              case SSI_IEEE_STATE_AT_SIGNAL:
11504              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11505              -- Not implemented in linux kernel
11506           */
11507         }
11508         return ret;
11509 #endif
11510 #ifdef TARGET_NR_osf_sigprocmask
11511     /* Alpha specific.  */
11512     case TARGET_NR_osf_sigprocmask:
11513         {
11514             abi_ulong mask;
11515             int how;
11516             sigset_t set, oldset;
11517 
11518             switch(arg1) {
11519             case TARGET_SIG_BLOCK:
11520                 how = SIG_BLOCK;
11521                 break;
11522             case TARGET_SIG_UNBLOCK:
11523                 how = SIG_UNBLOCK;
11524                 break;
11525             case TARGET_SIG_SETMASK:
11526                 how = SIG_SETMASK;
11527                 break;
11528             default:
11529                 return -TARGET_EINVAL;
11530             }
11531             mask = arg2;
11532             target_to_host_old_sigset(&set, &mask);
11533             ret = do_sigprocmask(how, &set, &oldset);
11534             if (!ret) {
11535                 host_to_target_old_sigset(&mask, &oldset);
11536                 ret = mask;
11537             }
11538         }
11539         return ret;
11540 #endif
11541 
11542 #ifdef TARGET_NR_getgid32
11543     case TARGET_NR_getgid32:
11544         return get_errno(getgid());
11545 #endif
11546 #ifdef TARGET_NR_geteuid32
11547     case TARGET_NR_geteuid32:
11548         return get_errno(geteuid());
11549 #endif
11550 #ifdef TARGET_NR_getegid32
11551     case TARGET_NR_getegid32:
11552         return get_errno(getegid());
11553 #endif
11554 #ifdef TARGET_NR_setreuid32
11555     case TARGET_NR_setreuid32:
11556         return get_errno(setreuid(arg1, arg2));
11557 #endif
11558 #ifdef TARGET_NR_setregid32
11559     case TARGET_NR_setregid32:
11560         return get_errno(setregid(arg1, arg2));
11561 #endif
11562 #ifdef TARGET_NR_getgroups32
11563     case TARGET_NR_getgroups32:
11564         {
11565             int gidsetsize = arg1;
11566             uint32_t *target_grouplist;
11567             gid_t *grouplist;
11568             int i;
11569 
11570             grouplist = alloca(gidsetsize * sizeof(gid_t));
11571             ret = get_errno(getgroups(gidsetsize, grouplist));
11572             if (gidsetsize == 0)
11573                 return ret;
11574             if (!is_error(ret)) {
11575                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11576                 if (!target_grouplist) {
11577                     return -TARGET_EFAULT;
11578                 }
11579                 for(i = 0;i < ret; i++)
11580                     target_grouplist[i] = tswap32(grouplist[i]);
11581                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11582             }
11583         }
11584         return ret;
11585 #endif
11586 #ifdef TARGET_NR_setgroups32
11587     case TARGET_NR_setgroups32:
11588         {
11589             int gidsetsize = arg1;
11590             uint32_t *target_grouplist;
11591             gid_t *grouplist;
11592             int i;
11593 
11594             grouplist = alloca(gidsetsize * sizeof(gid_t));
11595             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11596             if (!target_grouplist) {
11597                 return -TARGET_EFAULT;
11598             }
11599             for(i = 0;i < gidsetsize; i++)
11600                 grouplist[i] = tswap32(target_grouplist[i]);
11601             unlock_user(target_grouplist, arg2, 0);
11602             return get_errno(setgroups(gidsetsize, grouplist));
11603         }
11604 #endif
11605 #ifdef TARGET_NR_fchown32
11606     case TARGET_NR_fchown32:
11607         return get_errno(fchown(arg1, arg2, arg3));
11608 #endif
11609 #ifdef TARGET_NR_setresuid32
11610     case TARGET_NR_setresuid32:
11611         return get_errno(sys_setresuid(arg1, arg2, arg3));
11612 #endif
11613 #ifdef TARGET_NR_getresuid32
11614     case TARGET_NR_getresuid32:
11615         {
11616             uid_t ruid, euid, suid;
11617             ret = get_errno(getresuid(&ruid, &euid, &suid));
11618             if (!is_error(ret)) {
11619                 if (put_user_u32(ruid, arg1)
11620                     || put_user_u32(euid, arg2)
11621                     || put_user_u32(suid, arg3))
11622                     return -TARGET_EFAULT;
11623             }
11624         }
11625         return ret;
11626 #endif
11627 #ifdef TARGET_NR_setresgid32
11628     case TARGET_NR_setresgid32:
11629         return get_errno(sys_setresgid(arg1, arg2, arg3));
11630 #endif
11631 #ifdef TARGET_NR_getresgid32
11632     case TARGET_NR_getresgid32:
11633         {
11634             gid_t rgid, egid, sgid;
11635             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11636             if (!is_error(ret)) {
11637                 if (put_user_u32(rgid, arg1)
11638                     || put_user_u32(egid, arg2)
11639                     || put_user_u32(sgid, arg3))
11640                     return -TARGET_EFAULT;
11641             }
11642         }
11643         return ret;
11644 #endif
11645 #ifdef TARGET_NR_chown32
11646     case TARGET_NR_chown32:
11647         if (!(p = lock_user_string(arg1)))
11648             return -TARGET_EFAULT;
11649         ret = get_errno(chown(p, arg2, arg3));
11650         unlock_user(p, arg1, 0);
11651         return ret;
11652 #endif
11653 #ifdef TARGET_NR_setuid32
11654     case TARGET_NR_setuid32:
11655         return get_errno(sys_setuid(arg1));
11656 #endif
11657 #ifdef TARGET_NR_setgid32
11658     case TARGET_NR_setgid32:
11659         return get_errno(sys_setgid(arg1));
11660 #endif
11661 #ifdef TARGET_NR_setfsuid32
11662     case TARGET_NR_setfsuid32:
11663         return get_errno(setfsuid(arg1));
11664 #endif
11665 #ifdef TARGET_NR_setfsgid32
11666     case TARGET_NR_setfsgid32:
11667         return get_errno(setfsgid(arg1));
11668 #endif
11669 #ifdef TARGET_NR_mincore
11670     case TARGET_NR_mincore:
11671         {
11672             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11673             if (!a) {
11674                 return -TARGET_ENOMEM;
11675             }
11676             p = lock_user_string(arg3);
11677             if (!p) {
11678                 ret = -TARGET_EFAULT;
11679             } else {
11680                 ret = get_errno(mincore(a, arg2, p));
11681                 unlock_user(p, arg3, ret);
11682             }
11683             unlock_user(a, arg1, 0);
11684         }
11685         return ret;
11686 #endif
11687 #ifdef TARGET_NR_arm_fadvise64_64
11688     case TARGET_NR_arm_fadvise64_64:
11689         /* arm_fadvise64_64 looks like fadvise64_64 but
11690          * with different argument order: fd, advice, offset, len
11691          * rather than the usual fd, offset, len, advice.
11692          * Note that offset and len are both 64-bit so appear as
11693          * pairs of 32-bit registers.
11694          */
11695         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11696                             target_offset64(arg5, arg6), arg2);
11697         return -host_to_target_errno(ret);
11698 #endif
11699 
11700 #if TARGET_ABI_BITS == 32
11701 
11702 #ifdef TARGET_NR_fadvise64_64
11703     case TARGET_NR_fadvise64_64:
11704 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11705         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11706         ret = arg2;
11707         arg2 = arg3;
11708         arg3 = arg4;
11709         arg4 = arg5;
11710         arg5 = arg6;
11711         arg6 = ret;
11712 #else
11713         /* 6 args: fd, offset (high, low), len (high, low), advice */
11714         if (regpairs_aligned(cpu_env, num)) {
11715             /* offset is in (3,4), len in (5,6) and advice in 7 */
11716             arg2 = arg3;
11717             arg3 = arg4;
11718             arg4 = arg5;
11719             arg5 = arg6;
11720             arg6 = arg7;
11721         }
11722 #endif
11723         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11724                             target_offset64(arg4, arg5), arg6);
11725         return -host_to_target_errno(ret);
11726 #endif
11727 
11728 #ifdef TARGET_NR_fadvise64
11729     case TARGET_NR_fadvise64:
11730         /* 5 args: fd, offset (high, low), len, advice */
11731         if (regpairs_aligned(cpu_env, num)) {
11732             /* offset is in (3,4), len in 5 and advice in 6 */
11733             arg2 = arg3;
11734             arg3 = arg4;
11735             arg4 = arg5;
11736             arg5 = arg6;
11737         }
11738         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11739         return -host_to_target_errno(ret);
11740 #endif
11741 
11742 #else /* not a 32-bit ABI */
11743 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11744 #ifdef TARGET_NR_fadvise64_64
11745     case TARGET_NR_fadvise64_64:
11746 #endif
11747 #ifdef TARGET_NR_fadvise64
11748     case TARGET_NR_fadvise64:
11749 #endif
11750 #ifdef TARGET_S390X
11751         switch (arg4) {
11752         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11753         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11754         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11755         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11756         default: break;
11757         }
11758 #endif
11759         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11760 #endif
11761 #endif /* end of 64-bit ABI fadvise handling */
11762 
11763 #ifdef TARGET_NR_madvise
11764     case TARGET_NR_madvise:
11765         /* A straight passthrough may not be safe because qemu sometimes
11766            turns private file-backed mappings into anonymous mappings.
11767            This will break MADV_DONTNEED.
11768            This is a hint, so ignoring and returning success is ok.  */
11769         return 0;
11770 #endif
11771 #ifdef TARGET_NR_fcntl64
11772     case TARGET_NR_fcntl64:
11773     {
11774         int cmd;
11775         struct flock64 fl;
11776         from_flock64_fn *copyfrom = copy_from_user_flock64;
11777         to_flock64_fn *copyto = copy_to_user_flock64;
11778 
11779 #ifdef TARGET_ARM
11780         if (!((CPUARMState *)cpu_env)->eabi) {
11781             copyfrom = copy_from_user_oabi_flock64;
11782             copyto = copy_to_user_oabi_flock64;
11783         }
11784 #endif
11785 
11786         cmd = target_to_host_fcntl_cmd(arg2);
11787         if (cmd == -TARGET_EINVAL) {
11788             return cmd;
11789         }
11790 
11791         switch(arg2) {
11792         case TARGET_F_GETLK64:
11793             ret = copyfrom(&fl, arg3);
11794             if (ret) {
11795                 break;
11796             }
11797             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11798             if (ret == 0) {
11799                 ret = copyto(arg3, &fl);
11800             }
11801 	    break;
11802 
11803         case TARGET_F_SETLK64:
11804         case TARGET_F_SETLKW64:
11805             ret = copyfrom(&fl, arg3);
11806             if (ret) {
11807                 break;
11808             }
11809             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11810 	    break;
11811         default:
11812             ret = do_fcntl(arg1, arg2, arg3);
11813             break;
11814         }
11815         return ret;
11816     }
11817 #endif
11818 #ifdef TARGET_NR_cacheflush
11819     case TARGET_NR_cacheflush:
11820         /* self-modifying code is handled automatically, so nothing needed */
11821         return 0;
11822 #endif
11823 #ifdef TARGET_NR_getpagesize
11824     case TARGET_NR_getpagesize:
11825         return TARGET_PAGE_SIZE;
11826 #endif
11827     case TARGET_NR_gettid:
11828         return get_errno(sys_gettid());
11829 #ifdef TARGET_NR_readahead
11830     case TARGET_NR_readahead:
11831 #if TARGET_ABI_BITS == 32
11832         if (regpairs_aligned(cpu_env, num)) {
11833             arg2 = arg3;
11834             arg3 = arg4;
11835             arg4 = arg5;
11836         }
11837         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11838 #else
11839         ret = get_errno(readahead(arg1, arg2, arg3));
11840 #endif
11841         return ret;
11842 #endif
11843 #ifdef CONFIG_ATTR
11844 #ifdef TARGET_NR_setxattr
11845     case TARGET_NR_listxattr:
11846     case TARGET_NR_llistxattr:
11847     {
11848         void *p, *b = 0;
11849         if (arg2) {
11850             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11851             if (!b) {
11852                 return -TARGET_EFAULT;
11853             }
11854         }
11855         p = lock_user_string(arg1);
11856         if (p) {
11857             if (num == TARGET_NR_listxattr) {
11858                 ret = get_errno(listxattr(p, b, arg3));
11859             } else {
11860                 ret = get_errno(llistxattr(p, b, arg3));
11861             }
11862         } else {
11863             ret = -TARGET_EFAULT;
11864         }
11865         unlock_user(p, arg1, 0);
11866         unlock_user(b, arg2, arg3);
11867         return ret;
11868     }
11869     case TARGET_NR_flistxattr:
11870     {
11871         void *b = 0;
11872         if (arg2) {
11873             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11874             if (!b) {
11875                 return -TARGET_EFAULT;
11876             }
11877         }
11878         ret = get_errno(flistxattr(arg1, b, arg3));
11879         unlock_user(b, arg2, arg3);
11880         return ret;
11881     }
11882     case TARGET_NR_setxattr:
11883     case TARGET_NR_lsetxattr:
11884         {
11885             void *p, *n, *v = 0;
11886             if (arg3) {
11887                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11888                 if (!v) {
11889                     return -TARGET_EFAULT;
11890                 }
11891             }
11892             p = lock_user_string(arg1);
11893             n = lock_user_string(arg2);
11894             if (p && n) {
11895                 if (num == TARGET_NR_setxattr) {
11896                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11897                 } else {
11898                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11899                 }
11900             } else {
11901                 ret = -TARGET_EFAULT;
11902             }
11903             unlock_user(p, arg1, 0);
11904             unlock_user(n, arg2, 0);
11905             unlock_user(v, arg3, 0);
11906         }
11907         return ret;
11908     case TARGET_NR_fsetxattr:
11909         {
11910             void *n, *v = 0;
11911             if (arg3) {
11912                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11913                 if (!v) {
11914                     return -TARGET_EFAULT;
11915                 }
11916             }
11917             n = lock_user_string(arg2);
11918             if (n) {
11919                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11920             } else {
11921                 ret = -TARGET_EFAULT;
11922             }
11923             unlock_user(n, arg2, 0);
11924             unlock_user(v, arg3, 0);
11925         }
11926         return ret;
11927     case TARGET_NR_getxattr:
11928     case TARGET_NR_lgetxattr:
11929         {
11930             void *p, *n, *v = 0;
11931             if (arg3) {
11932                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11933                 if (!v) {
11934                     return -TARGET_EFAULT;
11935                 }
11936             }
11937             p = lock_user_string(arg1);
11938             n = lock_user_string(arg2);
11939             if (p && n) {
11940                 if (num == TARGET_NR_getxattr) {
11941                     ret = get_errno(getxattr(p, n, v, arg4));
11942                 } else {
11943                     ret = get_errno(lgetxattr(p, n, v, arg4));
11944                 }
11945             } else {
11946                 ret = -TARGET_EFAULT;
11947             }
11948             unlock_user(p, arg1, 0);
11949             unlock_user(n, arg2, 0);
11950             unlock_user(v, arg3, arg4);
11951         }
11952         return ret;
11953     case TARGET_NR_fgetxattr:
11954         {
11955             void *n, *v = 0;
11956             if (arg3) {
11957                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11958                 if (!v) {
11959                     return -TARGET_EFAULT;
11960                 }
11961             }
11962             n = lock_user_string(arg2);
11963             if (n) {
11964                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11965             } else {
11966                 ret = -TARGET_EFAULT;
11967             }
11968             unlock_user(n, arg2, 0);
11969             unlock_user(v, arg3, arg4);
11970         }
11971         return ret;
11972     case TARGET_NR_removexattr:
11973     case TARGET_NR_lremovexattr:
11974         {
11975             void *p, *n;
11976             p = lock_user_string(arg1);
11977             n = lock_user_string(arg2);
11978             if (p && n) {
11979                 if (num == TARGET_NR_removexattr) {
11980                     ret = get_errno(removexattr(p, n));
11981                 } else {
11982                     ret = get_errno(lremovexattr(p, n));
11983                 }
11984             } else {
11985                 ret = -TARGET_EFAULT;
11986             }
11987             unlock_user(p, arg1, 0);
11988             unlock_user(n, arg2, 0);
11989         }
11990         return ret;
11991     case TARGET_NR_fremovexattr:
11992         {
11993             void *n;
11994             n = lock_user_string(arg2);
11995             if (n) {
11996                 ret = get_errno(fremovexattr(arg1, n));
11997             } else {
11998                 ret = -TARGET_EFAULT;
11999             }
12000             unlock_user(n, arg2, 0);
12001         }
12002         return ret;
12003 #endif
12004 #endif /* CONFIG_ATTR */
12005 #ifdef TARGET_NR_set_thread_area
12006     case TARGET_NR_set_thread_area:
12007 #if defined(TARGET_MIPS)
12008       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12009       return 0;
12010 #elif defined(TARGET_CRIS)
12011       if (arg1 & 0xff)
12012           ret = -TARGET_EINVAL;
12013       else {
12014           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12015           ret = 0;
12016       }
12017       return ret;
12018 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12019       return do_set_thread_area(cpu_env, arg1);
12020 #elif defined(TARGET_M68K)
12021       {
12022           TaskState *ts = cpu->opaque;
12023           ts->tp_value = arg1;
12024           return 0;
12025       }
12026 #else
12027       return -TARGET_ENOSYS;
12028 #endif
12029 #endif
12030 #ifdef TARGET_NR_get_thread_area
12031     case TARGET_NR_get_thread_area:
12032 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12033         return do_get_thread_area(cpu_env, arg1);
12034 #elif defined(TARGET_M68K)
12035         {
12036             TaskState *ts = cpu->opaque;
12037             return ts->tp_value;
12038         }
12039 #else
12040         return -TARGET_ENOSYS;
12041 #endif
12042 #endif
12043 #ifdef TARGET_NR_getdomainname
12044     case TARGET_NR_getdomainname:
12045         return -TARGET_ENOSYS;
12046 #endif
12047 
12048 #ifdef TARGET_NR_clock_settime
12049     case TARGET_NR_clock_settime:
12050     {
12051         struct timespec ts;
12052 
12053         ret = target_to_host_timespec(&ts, arg2);
12054         if (!is_error(ret)) {
12055             ret = get_errno(clock_settime(arg1, &ts));
12056         }
12057         return ret;
12058     }
12059 #endif
12060 #ifdef TARGET_NR_clock_settime64
12061     case TARGET_NR_clock_settime64:
12062     {
12063         struct timespec ts;
12064 
12065         ret = target_to_host_timespec64(&ts, arg2);
12066         if (!is_error(ret)) {
12067             ret = get_errno(clock_settime(arg1, &ts));
12068         }
12069         return ret;
12070     }
12071 #endif
12072 #ifdef TARGET_NR_clock_gettime
12073     case TARGET_NR_clock_gettime:
12074     {
12075         struct timespec ts;
12076         ret = get_errno(clock_gettime(arg1, &ts));
12077         if (!is_error(ret)) {
12078             ret = host_to_target_timespec(arg2, &ts);
12079         }
12080         return ret;
12081     }
12082 #endif
12083 #ifdef TARGET_NR_clock_gettime64
12084     case TARGET_NR_clock_gettime64:
12085     {
12086         struct timespec ts;
12087         ret = get_errno(clock_gettime(arg1, &ts));
12088         if (!is_error(ret)) {
12089             ret = host_to_target_timespec64(arg2, &ts);
12090         }
12091         return ret;
12092     }
12093 #endif
12094 #ifdef TARGET_NR_clock_getres
12095     case TARGET_NR_clock_getres:
12096     {
12097         struct timespec ts;
12098         ret = get_errno(clock_getres(arg1, &ts));
12099         if (!is_error(ret)) {
12100             host_to_target_timespec(arg2, &ts);
12101         }
12102         return ret;
12103     }
12104 #endif
12105 #ifdef TARGET_NR_clock_getres_time64
12106     case TARGET_NR_clock_getres_time64:
12107     {
12108         struct timespec ts;
12109         ret = get_errno(clock_getres(arg1, &ts));
12110         if (!is_error(ret)) {
12111             host_to_target_timespec64(arg2, &ts);
12112         }
12113         return ret;
12114     }
12115 #endif
12116 #ifdef TARGET_NR_clock_nanosleep
12117     case TARGET_NR_clock_nanosleep:
12118     {
12119         struct timespec ts;
12120         if (target_to_host_timespec(&ts, arg3)) {
12121             return -TARGET_EFAULT;
12122         }
12123         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12124                                              &ts, arg4 ? &ts : NULL));
12125         /*
12126          * if the call is interrupted by a signal handler, it fails
12127          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12128          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12129          */
12130         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12131             host_to_target_timespec(arg4, &ts)) {
12132               return -TARGET_EFAULT;
12133         }
12134 
12135         return ret;
12136     }
12137 #endif
12138 #ifdef TARGET_NR_clock_nanosleep_time64
12139     case TARGET_NR_clock_nanosleep_time64:
12140     {
12141         struct timespec ts;
12142 
12143         if (target_to_host_timespec64(&ts, arg3)) {
12144             return -TARGET_EFAULT;
12145         }
12146 
12147         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12148                                              &ts, arg4 ? &ts : NULL));
12149 
12150         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12151             host_to_target_timespec64(arg4, &ts)) {
12152             return -TARGET_EFAULT;
12153         }
12154         return ret;
12155     }
12156 #endif
12157 
12158 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12159     case TARGET_NR_set_tid_address:
12160         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12161 #endif
12162 
12163     case TARGET_NR_tkill:
12164         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12165 
12166     case TARGET_NR_tgkill:
12167         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12168                          target_to_host_signal(arg3)));
12169 
12170 #ifdef TARGET_NR_set_robust_list
12171     case TARGET_NR_set_robust_list:
12172     case TARGET_NR_get_robust_list:
12173         /* The ABI for supporting robust futexes has userspace pass
12174          * the kernel a pointer to a linked list which is updated by
12175          * userspace after the syscall; the list is walked by the kernel
12176          * when the thread exits. Since the linked list in QEMU guest
12177          * memory isn't a valid linked list for the host and we have
12178          * no way to reliably intercept the thread-death event, we can't
12179          * support these. Silently return ENOSYS so that guest userspace
12180          * falls back to a non-robust futex implementation (which should
12181          * be OK except in the corner case of the guest crashing while
12182          * holding a mutex that is shared with another process via
12183          * shared memory).
12184          */
12185         return -TARGET_ENOSYS;
12186 #endif
12187 
12188 #if defined(TARGET_NR_utimensat)
12189     case TARGET_NR_utimensat:
12190         {
12191             struct timespec *tsp, ts[2];
12192             if (!arg3) {
12193                 tsp = NULL;
12194             } else {
12195                 if (target_to_host_timespec(ts, arg3)) {
12196                     return -TARGET_EFAULT;
12197                 }
12198                 if (target_to_host_timespec(ts + 1, arg3 +
12199                                             sizeof(struct target_timespec))) {
12200                     return -TARGET_EFAULT;
12201                 }
12202                 tsp = ts;
12203             }
12204             if (!arg2)
12205                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12206             else {
12207                 if (!(p = lock_user_string(arg2))) {
12208                     return -TARGET_EFAULT;
12209                 }
12210                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12211                 unlock_user(p, arg2, 0);
12212             }
12213         }
12214         return ret;
12215 #endif
12216 #ifdef TARGET_NR_utimensat_time64
12217     case TARGET_NR_utimensat_time64:
12218         {
12219             struct timespec *tsp, ts[2];
12220             if (!arg3) {
12221                 tsp = NULL;
12222             } else {
12223                 if (target_to_host_timespec64(ts, arg3)) {
12224                     return -TARGET_EFAULT;
12225                 }
12226                 if (target_to_host_timespec64(ts + 1, arg3 +
12227                                      sizeof(struct target__kernel_timespec))) {
12228                     return -TARGET_EFAULT;
12229                 }
12230                 tsp = ts;
12231             }
12232             if (!arg2)
12233                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12234             else {
12235                 p = lock_user_string(arg2);
12236                 if (!p) {
12237                     return -TARGET_EFAULT;
12238                 }
12239                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12240                 unlock_user(p, arg2, 0);
12241             }
12242         }
12243         return ret;
12244 #endif
12245 #ifdef TARGET_NR_futex
12246     case TARGET_NR_futex:
12247         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12248 #endif
12249 #ifdef TARGET_NR_futex_time64
12250     case TARGET_NR_futex_time64:
12251         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12252 #endif
12253 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12254     case TARGET_NR_inotify_init:
12255         ret = get_errno(sys_inotify_init());
12256         if (ret >= 0) {
12257             fd_trans_register(ret, &target_inotify_trans);
12258         }
12259         return ret;
12260 #endif
12261 #ifdef CONFIG_INOTIFY1
12262 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12263     case TARGET_NR_inotify_init1:
12264         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12265                                           fcntl_flags_tbl)));
12266         if (ret >= 0) {
12267             fd_trans_register(ret, &target_inotify_trans);
12268         }
12269         return ret;
12270 #endif
12271 #endif
12272 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12273     case TARGET_NR_inotify_add_watch:
12274         p = lock_user_string(arg2);
12275         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12276         unlock_user(p, arg2, 0);
12277         return ret;
12278 #endif
12279 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12280     case TARGET_NR_inotify_rm_watch:
12281         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12282 #endif
12283 
12284 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12285     case TARGET_NR_mq_open:
12286         {
12287             struct mq_attr posix_mq_attr;
12288             struct mq_attr *pposix_mq_attr;
12289             int host_flags;
12290 
12291             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12292             pposix_mq_attr = NULL;
12293             if (arg4) {
12294                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12295                     return -TARGET_EFAULT;
12296                 }
12297                 pposix_mq_attr = &posix_mq_attr;
12298             }
12299             p = lock_user_string(arg1 - 1);
12300             if (!p) {
12301                 return -TARGET_EFAULT;
12302             }
12303             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12304             unlock_user (p, arg1, 0);
12305         }
12306         return ret;
12307 
12308     case TARGET_NR_mq_unlink:
12309         p = lock_user_string(arg1 - 1);
12310         if (!p) {
12311             return -TARGET_EFAULT;
12312         }
12313         ret = get_errno(mq_unlink(p));
12314         unlock_user (p, arg1, 0);
12315         return ret;
12316 
12317 #ifdef TARGET_NR_mq_timedsend
12318     case TARGET_NR_mq_timedsend:
12319         {
12320             struct timespec ts;
12321 
12322             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12323             if (arg5 != 0) {
12324                 if (target_to_host_timespec(&ts, arg5)) {
12325                     return -TARGET_EFAULT;
12326                 }
12327                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12328                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12329                     return -TARGET_EFAULT;
12330                 }
12331             } else {
12332                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12333             }
12334             unlock_user (p, arg2, arg3);
12335         }
12336         return ret;
12337 #endif
12338 #ifdef TARGET_NR_mq_timedsend_time64
12339     case TARGET_NR_mq_timedsend_time64:
12340         {
12341             struct timespec ts;
12342 
12343             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12344             if (arg5 != 0) {
12345                 if (target_to_host_timespec64(&ts, arg5)) {
12346                     return -TARGET_EFAULT;
12347                 }
12348                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12349                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12350                     return -TARGET_EFAULT;
12351                 }
12352             } else {
12353                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12354             }
12355             unlock_user(p, arg2, arg3);
12356         }
12357         return ret;
12358 #endif
12359 
12360 #ifdef TARGET_NR_mq_timedreceive
12361     case TARGET_NR_mq_timedreceive:
12362         {
12363             struct timespec ts;
12364             unsigned int prio;
12365 
12366             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12367             if (arg5 != 0) {
12368                 if (target_to_host_timespec(&ts, arg5)) {
12369                     return -TARGET_EFAULT;
12370                 }
12371                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12372                                                      &prio, &ts));
12373                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12374                     return -TARGET_EFAULT;
12375                 }
12376             } else {
12377                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12378                                                      &prio, NULL));
12379             }
12380             unlock_user (p, arg2, arg3);
12381             if (arg4 != 0)
12382                 put_user_u32(prio, arg4);
12383         }
12384         return ret;
12385 #endif
12386 #ifdef TARGET_NR_mq_timedreceive_time64
12387     case TARGET_NR_mq_timedreceive_time64:
12388         {
12389             struct timespec ts;
12390             unsigned int prio;
12391 
12392             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12393             if (arg5 != 0) {
12394                 if (target_to_host_timespec64(&ts, arg5)) {
12395                     return -TARGET_EFAULT;
12396                 }
12397                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12398                                                      &prio, &ts));
12399                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12400                     return -TARGET_EFAULT;
12401                 }
12402             } else {
12403                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12404                                                      &prio, NULL));
12405             }
12406             unlock_user(p, arg2, arg3);
12407             if (arg4 != 0) {
12408                 put_user_u32(prio, arg4);
12409             }
12410         }
12411         return ret;
12412 #endif
12413 
12414     /* Not implemented for now... */
12415 /*     case TARGET_NR_mq_notify: */
12416 /*         break; */
12417 
12418     case TARGET_NR_mq_getsetattr:
12419         {
12420             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12421             ret = 0;
12422             if (arg2 != 0) {
12423                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12424                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12425                                            &posix_mq_attr_out));
12426             } else if (arg3 != 0) {
12427                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12428             }
12429             if (ret == 0 && arg3 != 0) {
12430                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12431             }
12432         }
12433         return ret;
12434 #endif
12435 
12436 #ifdef CONFIG_SPLICE
12437 #ifdef TARGET_NR_tee
12438     case TARGET_NR_tee:
12439         {
12440             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12441         }
12442         return ret;
12443 #endif
12444 #ifdef TARGET_NR_splice
12445     case TARGET_NR_splice:
12446         {
12447             loff_t loff_in, loff_out;
12448             loff_t *ploff_in = NULL, *ploff_out = NULL;
12449             if (arg2) {
12450                 if (get_user_u64(loff_in, arg2)) {
12451                     return -TARGET_EFAULT;
12452                 }
12453                 ploff_in = &loff_in;
12454             }
12455             if (arg4) {
12456                 if (get_user_u64(loff_out, arg4)) {
12457                     return -TARGET_EFAULT;
12458                 }
12459                 ploff_out = &loff_out;
12460             }
12461             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12462             if (arg2) {
12463                 if (put_user_u64(loff_in, arg2)) {
12464                     return -TARGET_EFAULT;
12465                 }
12466             }
12467             if (arg4) {
12468                 if (put_user_u64(loff_out, arg4)) {
12469                     return -TARGET_EFAULT;
12470                 }
12471             }
12472         }
12473         return ret;
12474 #endif
12475 #ifdef TARGET_NR_vmsplice
12476 	case TARGET_NR_vmsplice:
12477         {
12478             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12479             if (vec != NULL) {
12480                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12481                 unlock_iovec(vec, arg2, arg3, 0);
12482             } else {
12483                 ret = -host_to_target_errno(errno);
12484             }
12485         }
12486         return ret;
12487 #endif
12488 #endif /* CONFIG_SPLICE */
12489 #ifdef CONFIG_EVENTFD
12490 #if defined(TARGET_NR_eventfd)
12491     case TARGET_NR_eventfd:
12492         ret = get_errno(eventfd(arg1, 0));
12493         if (ret >= 0) {
12494             fd_trans_register(ret, &target_eventfd_trans);
12495         }
12496         return ret;
12497 #endif
12498 #if defined(TARGET_NR_eventfd2)
12499     case TARGET_NR_eventfd2:
12500     {
12501         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12502         if (arg2 & TARGET_O_NONBLOCK) {
12503             host_flags |= O_NONBLOCK;
12504         }
12505         if (arg2 & TARGET_O_CLOEXEC) {
12506             host_flags |= O_CLOEXEC;
12507         }
12508         ret = get_errno(eventfd(arg1, host_flags));
12509         if (ret >= 0) {
12510             fd_trans_register(ret, &target_eventfd_trans);
12511         }
12512         return ret;
12513     }
12514 #endif
12515 #endif /* CONFIG_EVENTFD  */
12516 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12517     case TARGET_NR_fallocate:
12518 #if TARGET_ABI_BITS == 32
12519         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12520                                   target_offset64(arg5, arg6)));
12521 #else
12522         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12523 #endif
12524         return ret;
12525 #endif
12526 #if defined(CONFIG_SYNC_FILE_RANGE)
12527 #if defined(TARGET_NR_sync_file_range)
12528     case TARGET_NR_sync_file_range:
12529 #if TARGET_ABI_BITS == 32
12530 #if defined(TARGET_MIPS)
12531         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12532                                         target_offset64(arg5, arg6), arg7));
12533 #else
12534         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12535                                         target_offset64(arg4, arg5), arg6));
12536 #endif /* !TARGET_MIPS */
12537 #else
12538         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12539 #endif
12540         return ret;
12541 #endif
12542 #if defined(TARGET_NR_sync_file_range2) || \
12543     defined(TARGET_NR_arm_sync_file_range)
12544 #if defined(TARGET_NR_sync_file_range2)
12545     case TARGET_NR_sync_file_range2:
12546 #endif
12547 #if defined(TARGET_NR_arm_sync_file_range)
12548     case TARGET_NR_arm_sync_file_range:
12549 #endif
12550         /* This is like sync_file_range but the arguments are reordered */
12551 #if TARGET_ABI_BITS == 32
12552         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12553                                         target_offset64(arg5, arg6), arg2));
12554 #else
12555         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12556 #endif
12557         return ret;
12558 #endif
12559 #endif
12560 #if defined(TARGET_NR_signalfd4)
12561     case TARGET_NR_signalfd4:
12562         return do_signalfd4(arg1, arg2, arg4);
12563 #endif
12564 #if defined(TARGET_NR_signalfd)
12565     case TARGET_NR_signalfd:
12566         return do_signalfd4(arg1, arg2, 0);
12567 #endif
12568 #if defined(CONFIG_EPOLL)
12569 #if defined(TARGET_NR_epoll_create)
12570     case TARGET_NR_epoll_create:
12571         return get_errno(epoll_create(arg1));
12572 #endif
12573 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12574     case TARGET_NR_epoll_create1:
12575         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12576 #endif
12577 #if defined(TARGET_NR_epoll_ctl)
12578     case TARGET_NR_epoll_ctl:
12579     {
12580         struct epoll_event ep;
12581         struct epoll_event *epp = 0;
12582         if (arg4) {
12583             if (arg2 != EPOLL_CTL_DEL) {
12584                 struct target_epoll_event *target_ep;
12585                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12586                     return -TARGET_EFAULT;
12587                 }
12588                 ep.events = tswap32(target_ep->events);
12589                 /*
12590                  * The epoll_data_t union is just opaque data to the kernel,
12591                  * so we transfer all 64 bits across and need not worry what
12592                  * actual data type it is.
12593                  */
12594                 ep.data.u64 = tswap64(target_ep->data.u64);
12595                 unlock_user_struct(target_ep, arg4, 0);
12596             }
12597             /*
12598              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12599              * non-null pointer, even though this argument is ignored.
12600              *
12601              */
12602             epp = &ep;
12603         }
12604         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12605     }
12606 #endif
12607 
12608 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12609 #if defined(TARGET_NR_epoll_wait)
12610     case TARGET_NR_epoll_wait:
12611 #endif
12612 #if defined(TARGET_NR_epoll_pwait)
12613     case TARGET_NR_epoll_pwait:
12614 #endif
12615     {
12616         struct target_epoll_event *target_ep;
12617         struct epoll_event *ep;
12618         int epfd = arg1;
12619         int maxevents = arg3;
12620         int timeout = arg4;
12621 
12622         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12623             return -TARGET_EINVAL;
12624         }
12625 
12626         target_ep = lock_user(VERIFY_WRITE, arg2,
12627                               maxevents * sizeof(struct target_epoll_event), 1);
12628         if (!target_ep) {
12629             return -TARGET_EFAULT;
12630         }
12631 
12632         ep = g_try_new(struct epoll_event, maxevents);
12633         if (!ep) {
12634             unlock_user(target_ep, arg2, 0);
12635             return -TARGET_ENOMEM;
12636         }
12637 
12638         switch (num) {
12639 #if defined(TARGET_NR_epoll_pwait)
12640         case TARGET_NR_epoll_pwait:
12641         {
12642             target_sigset_t *target_set;
12643             sigset_t _set, *set = &_set;
12644 
12645             if (arg5) {
12646                 if (arg6 != sizeof(target_sigset_t)) {
12647                     ret = -TARGET_EINVAL;
12648                     break;
12649                 }
12650 
12651                 target_set = lock_user(VERIFY_READ, arg5,
12652                                        sizeof(target_sigset_t), 1);
12653                 if (!target_set) {
12654                     ret = -TARGET_EFAULT;
12655                     break;
12656                 }
12657                 target_to_host_sigset(set, target_set);
12658                 unlock_user(target_set, arg5, 0);
12659             } else {
12660                 set = NULL;
12661             }
12662 
12663             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12664                                              set, SIGSET_T_SIZE));
12665             break;
12666         }
12667 #endif
12668 #if defined(TARGET_NR_epoll_wait)
12669         case TARGET_NR_epoll_wait:
12670             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12671                                              NULL, 0));
12672             break;
12673 #endif
12674         default:
12675             ret = -TARGET_ENOSYS;
12676         }
12677         if (!is_error(ret)) {
12678             int i;
12679             for (i = 0; i < ret; i++) {
12680                 target_ep[i].events = tswap32(ep[i].events);
12681                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12682             }
12683             unlock_user(target_ep, arg2,
12684                         ret * sizeof(struct target_epoll_event));
12685         } else {
12686             unlock_user(target_ep, arg2, 0);
12687         }
12688         g_free(ep);
12689         return ret;
12690     }
12691 #endif
12692 #endif
12693 #ifdef TARGET_NR_prlimit64
12694     case TARGET_NR_prlimit64:
12695     {
12696         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12697         struct target_rlimit64 *target_rnew, *target_rold;
12698         struct host_rlimit64 rnew, rold, *rnewp = 0;
12699         int resource = target_to_host_resource(arg2);
12700 
12701         if (arg3 && (resource != RLIMIT_AS &&
12702                      resource != RLIMIT_DATA &&
12703                      resource != RLIMIT_STACK)) {
12704             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12705                 return -TARGET_EFAULT;
12706             }
12707             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12708             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12709             unlock_user_struct(target_rnew, arg3, 0);
12710             rnewp = &rnew;
12711         }
12712 
12713         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12714         if (!is_error(ret) && arg4) {
12715             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12716                 return -TARGET_EFAULT;
12717             }
12718             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12719             target_rold->rlim_max = tswap64(rold.rlim_max);
12720             unlock_user_struct(target_rold, arg4, 1);
12721         }
12722         return ret;
12723     }
12724 #endif
12725 #ifdef TARGET_NR_gethostname
12726     case TARGET_NR_gethostname:
12727     {
12728         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12729         if (name) {
12730             ret = get_errno(gethostname(name, arg2));
12731             unlock_user(name, arg1, arg2);
12732         } else {
12733             ret = -TARGET_EFAULT;
12734         }
12735         return ret;
12736     }
12737 #endif
12738 #ifdef TARGET_NR_atomic_cmpxchg_32
12739     case TARGET_NR_atomic_cmpxchg_32:
12740     {
12741         /* should use start_exclusive from main.c */
12742         abi_ulong mem_value;
12743         if (get_user_u32(mem_value, arg6)) {
12744             target_siginfo_t info;
12745             info.si_signo = SIGSEGV;
12746             info.si_errno = 0;
12747             info.si_code = TARGET_SEGV_MAPERR;
12748             info._sifields._sigfault._addr = arg6;
12749             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12750                          QEMU_SI_FAULT, &info);
12751             ret = 0xdeadbeef;
12752 
12753         }
12754         if (mem_value == arg2)
12755             put_user_u32(arg1, arg6);
12756         return mem_value;
12757     }
12758 #endif
12759 #ifdef TARGET_NR_atomic_barrier
12760     case TARGET_NR_atomic_barrier:
12761         /* Like the kernel implementation and the
12762            qemu arm barrier, no-op this? */
12763         return 0;
12764 #endif
12765 
12766 #ifdef TARGET_NR_timer_create
12767     case TARGET_NR_timer_create:
12768     {
12769         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12770 
12771         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12772 
12773         int clkid = arg1;
12774         int timer_index = next_free_host_timer();
12775 
12776         if (timer_index < 0) {
12777             ret = -TARGET_EAGAIN;
12778         } else {
12779             timer_t *phtimer = g_posix_timers  + timer_index;
12780 
12781             if (arg2) {
12782                 phost_sevp = &host_sevp;
12783                 ret = target_to_host_sigevent(phost_sevp, arg2);
12784                 if (ret != 0) {
12785                     return ret;
12786                 }
12787             }
12788 
12789             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12790             if (ret) {
12791                 phtimer = NULL;
12792             } else {
12793                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12794                     return -TARGET_EFAULT;
12795                 }
12796             }
12797         }
12798         return ret;
12799     }
12800 #endif
12801 
12802 #ifdef TARGET_NR_timer_settime
12803     case TARGET_NR_timer_settime:
12804     {
12805         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12806          * struct itimerspec * old_value */
12807         target_timer_t timerid = get_timer_id(arg1);
12808 
12809         if (timerid < 0) {
12810             ret = timerid;
12811         } else if (arg3 == 0) {
12812             ret = -TARGET_EINVAL;
12813         } else {
12814             timer_t htimer = g_posix_timers[timerid];
12815             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12816 
12817             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12818                 return -TARGET_EFAULT;
12819             }
12820             ret = get_errno(
12821                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12822             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12823                 return -TARGET_EFAULT;
12824             }
12825         }
12826         return ret;
12827     }
12828 #endif
12829 
12830 #ifdef TARGET_NR_timer_settime64
12831     case TARGET_NR_timer_settime64:
12832     {
12833         target_timer_t timerid = get_timer_id(arg1);
12834 
12835         if (timerid < 0) {
12836             ret = timerid;
12837         } else if (arg3 == 0) {
12838             ret = -TARGET_EINVAL;
12839         } else {
12840             timer_t htimer = g_posix_timers[timerid];
12841             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12842 
12843             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12844                 return -TARGET_EFAULT;
12845             }
12846             ret = get_errno(
12847                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12848             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12849                 return -TARGET_EFAULT;
12850             }
12851         }
12852         return ret;
12853     }
12854 #endif
12855 
12856 #ifdef TARGET_NR_timer_gettime
12857     case TARGET_NR_timer_gettime:
12858     {
12859         /* args: timer_t timerid, struct itimerspec *curr_value */
12860         target_timer_t timerid = get_timer_id(arg1);
12861 
12862         if (timerid < 0) {
12863             ret = timerid;
12864         } else if (!arg2) {
12865             ret = -TARGET_EFAULT;
12866         } else {
12867             timer_t htimer = g_posix_timers[timerid];
12868             struct itimerspec hspec;
12869             ret = get_errno(timer_gettime(htimer, &hspec));
12870 
12871             if (host_to_target_itimerspec(arg2, &hspec)) {
12872                 ret = -TARGET_EFAULT;
12873             }
12874         }
12875         return ret;
12876     }
12877 #endif
12878 
12879 #ifdef TARGET_NR_timer_gettime64
12880     case TARGET_NR_timer_gettime64:
12881     {
12882         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12883         target_timer_t timerid = get_timer_id(arg1);
12884 
12885         if (timerid < 0) {
12886             ret = timerid;
12887         } else if (!arg2) {
12888             ret = -TARGET_EFAULT;
12889         } else {
12890             timer_t htimer = g_posix_timers[timerid];
12891             struct itimerspec hspec;
12892             ret = get_errno(timer_gettime(htimer, &hspec));
12893 
12894             if (host_to_target_itimerspec64(arg2, &hspec)) {
12895                 ret = -TARGET_EFAULT;
12896             }
12897         }
12898         return ret;
12899     }
12900 #endif
12901 
12902 #ifdef TARGET_NR_timer_getoverrun
12903     case TARGET_NR_timer_getoverrun:
12904     {
12905         /* args: timer_t timerid */
12906         target_timer_t timerid = get_timer_id(arg1);
12907 
12908         if (timerid < 0) {
12909             ret = timerid;
12910         } else {
12911             timer_t htimer = g_posix_timers[timerid];
12912             ret = get_errno(timer_getoverrun(htimer));
12913         }
12914         return ret;
12915     }
12916 #endif
12917 
12918 #ifdef TARGET_NR_timer_delete
12919     case TARGET_NR_timer_delete:
12920     {
12921         /* args: timer_t timerid */
12922         target_timer_t timerid = get_timer_id(arg1);
12923 
12924         if (timerid < 0) {
12925             ret = timerid;
12926         } else {
12927             timer_t htimer = g_posix_timers[timerid];
12928             ret = get_errno(timer_delete(htimer));
12929             g_posix_timers[timerid] = 0;
12930         }
12931         return ret;
12932     }
12933 #endif
12934 
12935 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12936     case TARGET_NR_timerfd_create:
12937         return get_errno(timerfd_create(arg1,
12938                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12939 #endif
12940 
12941 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12942     case TARGET_NR_timerfd_gettime:
12943         {
12944             struct itimerspec its_curr;
12945 
12946             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12947 
12948             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12949                 return -TARGET_EFAULT;
12950             }
12951         }
12952         return ret;
12953 #endif
12954 
12955 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12956     case TARGET_NR_timerfd_gettime64:
12957         {
12958             struct itimerspec its_curr;
12959 
12960             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12961 
12962             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12963                 return -TARGET_EFAULT;
12964             }
12965         }
12966         return ret;
12967 #endif
12968 
12969 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12970     case TARGET_NR_timerfd_settime:
12971         {
12972             struct itimerspec its_new, its_old, *p_new;
12973 
12974             if (arg3) {
12975                 if (target_to_host_itimerspec(&its_new, arg3)) {
12976                     return -TARGET_EFAULT;
12977                 }
12978                 p_new = &its_new;
12979             } else {
12980                 p_new = NULL;
12981             }
12982 
12983             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12984 
12985             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12986                 return -TARGET_EFAULT;
12987             }
12988         }
12989         return ret;
12990 #endif
12991 
12992 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12993     case TARGET_NR_timerfd_settime64:
12994         {
12995             struct itimerspec its_new, its_old, *p_new;
12996 
12997             if (arg3) {
12998                 if (target_to_host_itimerspec64(&its_new, arg3)) {
12999                     return -TARGET_EFAULT;
13000                 }
13001                 p_new = &its_new;
13002             } else {
13003                 p_new = NULL;
13004             }
13005 
13006             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13007 
13008             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13009                 return -TARGET_EFAULT;
13010             }
13011         }
13012         return ret;
13013 #endif
13014 
13015 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13016     case TARGET_NR_ioprio_get:
13017         return get_errno(ioprio_get(arg1, arg2));
13018 #endif
13019 
13020 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13021     case TARGET_NR_ioprio_set:
13022         return get_errno(ioprio_set(arg1, arg2, arg3));
13023 #endif
13024 
13025 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13026     case TARGET_NR_setns:
13027         return get_errno(setns(arg1, arg2));
13028 #endif
13029 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13030     case TARGET_NR_unshare:
13031         return get_errno(unshare(arg1));
13032 #endif
13033 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13034     case TARGET_NR_kcmp:
13035         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13036 #endif
13037 #ifdef TARGET_NR_swapcontext
13038     case TARGET_NR_swapcontext:
13039         /* PowerPC specific.  */
13040         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13041 #endif
13042 #ifdef TARGET_NR_memfd_create
13043     case TARGET_NR_memfd_create:
13044         p = lock_user_string(arg1);
13045         if (!p) {
13046             return -TARGET_EFAULT;
13047         }
13048         ret = get_errno(memfd_create(p, arg2));
13049         fd_trans_unregister(ret);
13050         unlock_user(p, arg1, 0);
13051         return ret;
13052 #endif
13053 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13054     case TARGET_NR_membarrier:
13055         return get_errno(membarrier(arg1, arg2));
13056 #endif
13057 
13058 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13059     case TARGET_NR_copy_file_range:
13060         {
13061             loff_t inoff, outoff;
13062             loff_t *pinoff = NULL, *poutoff = NULL;
13063 
13064             if (arg2) {
13065                 if (get_user_u64(inoff, arg2)) {
13066                     return -TARGET_EFAULT;
13067                 }
13068                 pinoff = &inoff;
13069             }
13070             if (arg4) {
13071                 if (get_user_u64(outoff, arg4)) {
13072                     return -TARGET_EFAULT;
13073                 }
13074                 poutoff = &outoff;
13075             }
13076             /* Do not sign-extend the count parameter. */
13077             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13078                                                  (abi_ulong)arg5, arg6));
13079             if (!is_error(ret) && ret > 0) {
13080                 if (arg2) {
13081                     if (put_user_u64(inoff, arg2)) {
13082                         return -TARGET_EFAULT;
13083                     }
13084                 }
13085                 if (arg4) {
13086                     if (put_user_u64(outoff, arg4)) {
13087                         return -TARGET_EFAULT;
13088                     }
13089                 }
13090             }
13091         }
13092         return ret;
13093 #endif
13094 
13095 #if defined(TARGET_NR_pivot_root)
13096     case TARGET_NR_pivot_root:
13097         {
13098             void *p2;
13099             p = lock_user_string(arg1); /* new_root */
13100             p2 = lock_user_string(arg2); /* put_old */
13101             if (!p || !p2) {
13102                 ret = -TARGET_EFAULT;
13103             } else {
13104                 ret = get_errno(pivot_root(p, p2));
13105             }
13106             unlock_user(p2, arg2, 0);
13107             unlock_user(p, arg1, 0);
13108         }
13109         return ret;
13110 #endif
13111 
13112     default:
13113         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13114         return -TARGET_ENOSYS;
13115     }
13116     return ret;
13117 }
13118 
13119 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13120                     abi_long arg2, abi_long arg3, abi_long arg4,
13121                     abi_long arg5, abi_long arg6, abi_long arg7,
13122                     abi_long arg8)
13123 {
13124     CPUState *cpu = env_cpu(cpu_env);
13125     abi_long ret;
13126 
13127 #ifdef DEBUG_ERESTARTSYS
13128     /* Debug-only code for exercising the syscall-restart code paths
13129      * in the per-architecture cpu main loops: restart every syscall
13130      * the guest makes once before letting it through.
13131      */
13132     {
13133         static bool flag;
13134         flag = !flag;
13135         if (flag) {
13136             return -TARGET_ERESTARTSYS;
13137         }
13138     }
13139 #endif
13140 
13141     record_syscall_start(cpu, num, arg1,
13142                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13143 
13144     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13145         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13146     }
13147 
13148     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13149                       arg5, arg6, arg7, arg8);
13150 
13151     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13152         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13153                           arg3, arg4, arg5, arg6);
13154     }
13155 
13156     record_syscall_return(cpu, num, ret);
13157     return ret;
13158 }
13159