xref: /openbmc/qemu/linux-user/syscall.c (revision 62fffaa6)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "qemu/guest-random.h"
131 #include "qemu/selfmap.h"
132 #include "user/syscall-trace.h"
133 #include "qapi/error.h"
134 #include "fd-trans.h"
135 #include "tcg/tcg.h"
136 
137 #ifndef CLONE_IO
138 #define CLONE_IO                0x80000000      /* Clone io context */
139 #endif
140 
141 /* We can't directly call the host clone syscall, because this will
142  * badly confuse libc (breaking mutexes, for example). So we must
143  * divide clone flags into:
144  *  * flag combinations that look like pthread_create()
145  *  * flag combinations that look like fork()
146  *  * flags we can implement within QEMU itself
147  *  * flags we can't support and will return an error for
148  */
149 /* For thread creation, all these flags must be present; for
150  * fork, none must be present.
151  */
152 #define CLONE_THREAD_FLAGS                              \
153     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
154      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
155 
156 /* These flags are ignored:
157  * CLONE_DETACHED is now ignored by the kernel;
158  * CLONE_IO is just an optimisation hint to the I/O scheduler
159  */
160 #define CLONE_IGNORED_FLAGS                     \
161     (CLONE_DETACHED | CLONE_IO)
162 
163 /* Flags for fork which we can implement within QEMU itself */
164 #define CLONE_OPTIONAL_FORK_FLAGS               \
165     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
166      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
167 
168 /* Flags for thread creation which we can implement within QEMU itself */
169 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
170     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
171      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
172 
173 #define CLONE_INVALID_FORK_FLAGS                                        \
174     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
175 
176 #define CLONE_INVALID_THREAD_FLAGS                                      \
177     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
178        CLONE_IGNORED_FLAGS))
179 
180 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
181  * have almost all been allocated. We cannot support any of
182  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
183  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
184  * The checks against the invalid thread masks above will catch these.
185  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
186  */
187 
188 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
189  * once. This exercises the codepaths for restart.
190  */
191 //#define DEBUG_ERESTARTSYS
192 
193 //#include <linux/msdos_fs.h>
194 #define	VFAT_IOCTL_READDIR_BOTH		_IOR('r', 1, struct linux_dirent [2])
195 #define	VFAT_IOCTL_READDIR_SHORT	_IOR('r', 2, struct linux_dirent [2])
196 
197 #undef _syscall0
198 #undef _syscall1
199 #undef _syscall2
200 #undef _syscall3
201 #undef _syscall4
202 #undef _syscall5
203 #undef _syscall6
204 
205 #define _syscall0(type,name)		\
206 static type name (void)			\
207 {					\
208 	return syscall(__NR_##name);	\
209 }
210 
211 #define _syscall1(type,name,type1,arg1)		\
212 static type name (type1 arg1)			\
213 {						\
214 	return syscall(__NR_##name, arg1);	\
215 }
216 
217 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
218 static type name (type1 arg1,type2 arg2)		\
219 {							\
220 	return syscall(__NR_##name, arg1, arg2);	\
221 }
222 
223 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
224 static type name (type1 arg1,type2 arg2,type3 arg3)		\
225 {								\
226 	return syscall(__NR_##name, arg1, arg2, arg3);		\
227 }
228 
229 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
230 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
231 {										\
232 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
233 }
234 
235 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
236 		  type5,arg5)							\
237 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
238 {										\
239 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
240 }
241 
242 
243 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
244 		  type5,arg5,type6,arg6)					\
245 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
246                   type6 arg6)							\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
249 }
250 
251 
252 #define __NR_sys_uname __NR_uname
253 #define __NR_sys_getcwd1 __NR_getcwd
254 #define __NR_sys_getdents __NR_getdents
255 #define __NR_sys_getdents64 __NR_getdents64
256 #define __NR_sys_getpriority __NR_getpriority
257 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
258 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
259 #define __NR_sys_syslog __NR_syslog
260 #if defined(__NR_futex)
261 # define __NR_sys_futex __NR_futex
262 #endif
263 #if defined(__NR_futex_time64)
264 # define __NR_sys_futex_time64 __NR_futex_time64
265 #endif
266 #define __NR_sys_inotify_init __NR_inotify_init
267 #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
268 #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
269 #define __NR_sys_statx __NR_statx
270 
271 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
272 #define __NR__llseek __NR_lseek
273 #endif
274 
275 /* Newer kernel ports have llseek() instead of _llseek() */
276 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
277 #define TARGET_NR__llseek TARGET_NR_llseek
278 #endif
279 
280 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
281 #ifndef TARGET_O_NONBLOCK_MASK
282 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
283 #endif
284 
285 #define __NR_sys_gettid __NR_gettid
286 _syscall0(int, sys_gettid)
287 
288 /* For the 64-bit guest on 32-bit host case we must emulate
289  * getdents using getdents64, because otherwise the host
290  * might hand us back more dirent records than we can fit
291  * into the guest buffer after structure format conversion.
292  * Otherwise we emulate getdents with getdents if the host has it.
293  */
294 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
295 #define EMULATE_GETDENTS_WITH_GETDENTS
296 #endif
297 
298 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
299 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
300 #endif
301 #if (defined(TARGET_NR_getdents) && \
302       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
303     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
304 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
305 #endif
306 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
307 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
308           loff_t *, res, uint, wh);
309 #endif
310 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
311 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
312           siginfo_t *, uinfo)
313 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
314 #ifdef __NR_exit_group
315 _syscall1(int,exit_group,int,error_code)
316 #endif
317 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
318 _syscall1(int,set_tid_address,int *,tidptr)
319 #endif
320 #if defined(__NR_futex)
321 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
322           const struct timespec *,timeout,int *,uaddr2,int,val3)
323 #endif
324 #if defined(__NR_futex_time64)
325 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
326           const struct timespec *,timeout,int *,uaddr2,int,val3)
327 #endif
328 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
329 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
330           unsigned long *, user_mask_ptr);
331 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
332 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_getcpu __NR_getcpu
335 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
336 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
337           void *, arg);
338 _syscall2(int, capget, struct __user_cap_header_struct *, header,
339           struct __user_cap_data_struct *, data);
340 _syscall2(int, capset, struct __user_cap_header_struct *, header,
341           struct __user_cap_data_struct *, data);
342 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
343 _syscall2(int, ioprio_get, int, which, int, who)
344 #endif
345 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
346 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
347 #endif
348 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
349 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
350 #endif
351 
352 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
353 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
354           unsigned long, idx1, unsigned long, idx2)
355 #endif
356 
357 /*
358  * It is assumed that struct statx is architecture independent.
359  */
360 #if defined(TARGET_NR_statx) && defined(__NR_statx)
361 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
362           unsigned int, mask, struct target_statx *, statxbuf)
363 #endif
364 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
365 _syscall2(int, membarrier, int, cmd, int, flags)
366 #endif
367 
368 static const bitmask_transtbl fcntl_flags_tbl[] = {
369   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
370   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
371   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
372   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
373   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
374   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
375   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
376   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
377   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
378   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
379   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
380   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
381   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
382 #if defined(O_DIRECT)
383   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
384 #endif
385 #if defined(O_NOATIME)
386   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
387 #endif
388 #if defined(O_CLOEXEC)
389   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
390 #endif
391 #if defined(O_PATH)
392   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
393 #endif
394 #if defined(O_TMPFILE)
395   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
396 #endif
397   /* Don't terminate the list prematurely on 64-bit host+guest.  */
398 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
399   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
400 #endif
401   { 0, 0, 0, 0 }
402 };
403 
404 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
405 
406 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
407 #if defined(__NR_utimensat)
408 #define __NR_sys_utimensat __NR_utimensat
409 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
410           const struct timespec *,tsp,int,flags)
411 #else
412 static int sys_utimensat(int dirfd, const char *pathname,
413                          const struct timespec times[2], int flags)
414 {
415     errno = ENOSYS;
416     return -1;
417 }
418 #endif
419 #endif /* TARGET_NR_utimensat */
420 
421 #ifdef TARGET_NR_renameat2
422 #if defined(__NR_renameat2)
423 #define __NR_sys_renameat2 __NR_renameat2
424 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
425           const char *, new, unsigned int, flags)
426 #else
427 static int sys_renameat2(int oldfd, const char *old,
428                          int newfd, const char *new, int flags)
429 {
430     if (flags == 0) {
431         return renameat(oldfd, old, newfd, new);
432     }
433     errno = ENOSYS;
434     return -1;
435 }
436 #endif
437 #endif /* TARGET_NR_renameat2 */
438 
439 #ifdef CONFIG_INOTIFY
440 #include <sys/inotify.h>
441 
442 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
443 static int sys_inotify_init(void)
444 {
445   return (inotify_init());
446 }
447 #endif
448 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
449 static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
450 {
451   return (inotify_add_watch(fd, pathname, mask));
452 }
453 #endif
454 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
455 static int sys_inotify_rm_watch(int fd, int32_t wd)
456 {
457   return (inotify_rm_watch(fd, wd));
458 }
459 #endif
460 #ifdef CONFIG_INOTIFY1
461 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
462 static int sys_inotify_init1(int flags)
463 {
464   return (inotify_init1(flags));
465 }
466 #endif
467 #endif
468 #else
469 /* Userspace can usually survive runtime without inotify */
470 #undef TARGET_NR_inotify_init
471 #undef TARGET_NR_inotify_init1
472 #undef TARGET_NR_inotify_add_watch
473 #undef TARGET_NR_inotify_rm_watch
474 #endif /* CONFIG_INOTIFY  */
475 
476 #if defined(TARGET_NR_prlimit64)
477 #ifndef __NR_prlimit64
478 # define __NR_prlimit64 -1
479 #endif
480 #define __NR_sys_prlimit64 __NR_prlimit64
481 /* The glibc rlimit structure may not be that used by the underlying syscall */
482 struct host_rlimit64 {
483     uint64_t rlim_cur;
484     uint64_t rlim_max;
485 };
486 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
487           const struct host_rlimit64 *, new_limit,
488           struct host_rlimit64 *, old_limit)
489 #endif
490 
491 
492 #if defined(TARGET_NR_timer_create)
493 /* Maximum of 32 active POSIX timers allowed at any one time. */
494 static timer_t g_posix_timers[32] = { 0, } ;
495 
496 static inline int next_free_host_timer(void)
497 {
498     int k ;
499     /* FIXME: Does finding the next free slot require a lock? */
500     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
501         if (g_posix_timers[k] == 0) {
502             g_posix_timers[k] = (timer_t) 1;
503             return k;
504         }
505     }
506     return -1;
507 }
508 #endif
509 
510 static inline int host_to_target_errno(int host_errno)
511 {
512     switch (host_errno) {
513 #define E(X)  case X: return TARGET_##X;
514 #include "errnos.c.inc"
515 #undef E
516     default:
517         return host_errno;
518     }
519 }
520 
521 static inline int target_to_host_errno(int target_errno)
522 {
523     switch (target_errno) {
524 #define E(X)  case TARGET_##X: return X;
525 #include "errnos.c.inc"
526 #undef E
527     default:
528         return target_errno;
529     }
530 }
531 
532 static inline abi_long get_errno(abi_long ret)
533 {
534     if (ret == -1)
535         return -host_to_target_errno(errno);
536     else
537         return ret;
538 }
539 
540 const char *target_strerror(int err)
541 {
542     if (err == TARGET_ERESTARTSYS) {
543         return "To be restarted";
544     }
545     if (err == TARGET_QEMU_ESIGRETURN) {
546         return "Successful exit from sigreturn";
547     }
548 
549     return strerror(target_to_host_errno(err));
550 }
551 
552 #define safe_syscall0(type, name) \
553 static type safe_##name(void) \
554 { \
555     return safe_syscall(__NR_##name); \
556 }
557 
558 #define safe_syscall1(type, name, type1, arg1) \
559 static type safe_##name(type1 arg1) \
560 { \
561     return safe_syscall(__NR_##name, arg1); \
562 }
563 
564 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
565 static type safe_##name(type1 arg1, type2 arg2) \
566 { \
567     return safe_syscall(__NR_##name, arg1, arg2); \
568 }
569 
570 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
571 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
572 { \
573     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
574 }
575 
576 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
577     type4, arg4) \
578 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
579 { \
580     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
581 }
582 
583 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
584     type4, arg4, type5, arg5) \
585 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
586     type5 arg5) \
587 { \
588     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
589 }
590 
591 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
592     type4, arg4, type5, arg5, type6, arg6) \
593 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
594     type5 arg5, type6 arg6) \
595 { \
596     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
597 }
598 
599 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
600 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
601 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
602               int, flags, mode_t, mode)
603 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
604 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
605               struct rusage *, rusage)
606 #endif
607 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
608               int, options, struct rusage *, rusage)
609 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
610 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
611     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
612 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
613               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
614 #endif
615 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
616 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
617               struct timespec *, tsp, const sigset_t *, sigmask,
618               size_t, sigsetsize)
619 #endif
620 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
621               int, maxevents, int, timeout, const sigset_t *, sigmask,
622               size_t, sigsetsize)
623 #if defined(__NR_futex)
624 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
625               const struct timespec *,timeout,int *,uaddr2,int,val3)
626 #endif
627 #if defined(__NR_futex_time64)
628 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
629               const struct timespec *,timeout,int *,uaddr2,int,val3)
630 #endif
631 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
632 safe_syscall2(int, kill, pid_t, pid, int, sig)
633 safe_syscall2(int, tkill, int, tid, int, sig)
634 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
635 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
636 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
637 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
638               unsigned long, pos_l, unsigned long, pos_h)
639 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
640               unsigned long, pos_l, unsigned long, pos_h)
641 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
642               socklen_t, addrlen)
643 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
644               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
645 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
646               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
647 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
648 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
649 safe_syscall2(int, flock, int, fd, int, operation)
650 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
651 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
652               const struct timespec *, uts, size_t, sigsetsize)
653 #endif
654 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
655               int, flags)
656 #if defined(TARGET_NR_nanosleep)
657 safe_syscall2(int, nanosleep, const struct timespec *, req,
658               struct timespec *, rem)
659 #endif
660 #if defined(TARGET_NR_clock_nanosleep) || \
661     defined(TARGET_NR_clock_nanosleep_time64)
662 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
663               const struct timespec *, req, struct timespec *, rem)
664 #endif
665 #ifdef __NR_ipc
666 #ifdef __s390x__
667 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
668               void *, ptr)
669 #else
670 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
671               void *, ptr, long, fifth)
672 #endif
673 #endif
674 #ifdef __NR_msgsnd
675 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
676               int, flags)
677 #endif
678 #ifdef __NR_msgrcv
679 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
680               long, msgtype, int, flags)
681 #endif
682 #ifdef __NR_semtimedop
683 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
684               unsigned, nsops, const struct timespec *, timeout)
685 #endif
686 #if defined(TARGET_NR_mq_timedsend) || \
687     defined(TARGET_NR_mq_timedsend_time64)
688 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
689               size_t, len, unsigned, prio, const struct timespec *, timeout)
690 #endif
691 #if defined(TARGET_NR_mq_timedreceive) || \
692     defined(TARGET_NR_mq_timedreceive_time64)
693 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
694               size_t, len, unsigned *, prio, const struct timespec *, timeout)
695 #endif
696 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
697 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
698               int, outfd, loff_t *, poutoff, size_t, length,
699               unsigned int, flags)
700 #endif
701 
702 /* We do ioctl like this rather than via safe_syscall3 to preserve the
703  * "third argument might be integer or pointer or not present" behaviour of
704  * the libc function.
705  */
706 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
707 /* Similarly for fcntl. Note that callers must always:
708  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
709  *  use the flock64 struct rather than unsuffixed flock
710  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
711  */
712 #ifdef __NR_fcntl64
713 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
714 #else
715 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
716 #endif
717 
718 static inline int host_to_target_sock_type(int host_type)
719 {
720     int target_type;
721 
722     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
723     case SOCK_DGRAM:
724         target_type = TARGET_SOCK_DGRAM;
725         break;
726     case SOCK_STREAM:
727         target_type = TARGET_SOCK_STREAM;
728         break;
729     default:
730         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
731         break;
732     }
733 
734 #if defined(SOCK_CLOEXEC)
735     if (host_type & SOCK_CLOEXEC) {
736         target_type |= TARGET_SOCK_CLOEXEC;
737     }
738 #endif
739 
740 #if defined(SOCK_NONBLOCK)
741     if (host_type & SOCK_NONBLOCK) {
742         target_type |= TARGET_SOCK_NONBLOCK;
743     }
744 #endif
745 
746     return target_type;
747 }
748 
749 static abi_ulong target_brk;
750 static abi_ulong target_original_brk;
751 static abi_ulong brk_page;
752 
753 void target_set_brk(abi_ulong new_brk)
754 {
755     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
756     brk_page = HOST_PAGE_ALIGN(target_brk);
757 }
758 
759 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
760 #define DEBUGF_BRK(message, args...)
761 
762 /* do_brk() must return target values and target errnos. */
763 abi_long do_brk(abi_ulong new_brk)
764 {
765     abi_long mapped_addr;
766     abi_ulong new_alloc_size;
767 
768     /* brk pointers are always untagged */
769 
770     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
771 
772     if (!new_brk) {
773         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
774         return target_brk;
775     }
776     if (new_brk < target_original_brk) {
777         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
778                    target_brk);
779         return target_brk;
780     }
781 
782     /* If the new brk is less than the highest page reserved to the
783      * target heap allocation, set it and we're almost done...  */
784     if (new_brk <= brk_page) {
785         /* Heap contents are initialized to zero, as for anonymous
786          * mapped pages.  */
787         if (new_brk > target_brk) {
788             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
789         }
790 	target_brk = new_brk;
791         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
792 	return target_brk;
793     }
794 
795     /* We need to allocate more memory after the brk... Note that
796      * we don't use MAP_FIXED because that will map over the top of
797      * any existing mapping (like the one with the host libc or qemu
798      * itself); instead we treat "mapped but at wrong address" as
799      * a failure and unmap again.
800      */
801     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
802     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
803                                         PROT_READ|PROT_WRITE,
804                                         MAP_ANON|MAP_PRIVATE, 0, 0));
805 
806     if (mapped_addr == brk_page) {
807         /* Heap contents are initialized to zero, as for anonymous
808          * mapped pages.  Technically the new pages are already
809          * initialized to zero since they *are* anonymous mapped
810          * pages, however we have to take care with the contents that
811          * come from the remaining part of the previous page: it may
812          * contains garbage data due to a previous heap usage (grown
813          * then shrunken).  */
814         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
815 
816         target_brk = new_brk;
817         brk_page = HOST_PAGE_ALIGN(target_brk);
818         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
819             target_brk);
820         return target_brk;
821     } else if (mapped_addr != -1) {
822         /* Mapped but at wrong address, meaning there wasn't actually
823          * enough space for this brk.
824          */
825         target_munmap(mapped_addr, new_alloc_size);
826         mapped_addr = -1;
827         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
828     }
829     else {
830         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
831     }
832 
833 #if defined(TARGET_ALPHA)
834     /* We (partially) emulate OSF/1 on Alpha, which requires we
835        return a proper errno, not an unchanged brk value.  */
836     return -TARGET_ENOMEM;
837 #endif
838     /* For everything else, return the previous break. */
839     return target_brk;
840 }
841 
842 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
843     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
844 static inline abi_long copy_from_user_fdset(fd_set *fds,
845                                             abi_ulong target_fds_addr,
846                                             int n)
847 {
848     int i, nw, j, k;
849     abi_ulong b, *target_fds;
850 
851     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
852     if (!(target_fds = lock_user(VERIFY_READ,
853                                  target_fds_addr,
854                                  sizeof(abi_ulong) * nw,
855                                  1)))
856         return -TARGET_EFAULT;
857 
858     FD_ZERO(fds);
859     k = 0;
860     for (i = 0; i < nw; i++) {
861         /* grab the abi_ulong */
862         __get_user(b, &target_fds[i]);
863         for (j = 0; j < TARGET_ABI_BITS; j++) {
864             /* check the bit inside the abi_ulong */
865             if ((b >> j) & 1)
866                 FD_SET(k, fds);
867             k++;
868         }
869     }
870 
871     unlock_user(target_fds, target_fds_addr, 0);
872 
873     return 0;
874 }
875 
876 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
877                                                  abi_ulong target_fds_addr,
878                                                  int n)
879 {
880     if (target_fds_addr) {
881         if (copy_from_user_fdset(fds, target_fds_addr, n))
882             return -TARGET_EFAULT;
883         *fds_ptr = fds;
884     } else {
885         *fds_ptr = NULL;
886     }
887     return 0;
888 }
889 
890 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
891                                           const fd_set *fds,
892                                           int n)
893 {
894     int i, nw, j, k;
895     abi_long v;
896     abi_ulong *target_fds;
897 
898     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
899     if (!(target_fds = lock_user(VERIFY_WRITE,
900                                  target_fds_addr,
901                                  sizeof(abi_ulong) * nw,
902                                  0)))
903         return -TARGET_EFAULT;
904 
905     k = 0;
906     for (i = 0; i < nw; i++) {
907         v = 0;
908         for (j = 0; j < TARGET_ABI_BITS; j++) {
909             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
910             k++;
911         }
912         __put_user(v, &target_fds[i]);
913     }
914 
915     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
916 
917     return 0;
918 }
919 #endif
920 
921 #if defined(__alpha__)
922 #define HOST_HZ 1024
923 #else
924 #define HOST_HZ 100
925 #endif
926 
927 static inline abi_long host_to_target_clock_t(long ticks)
928 {
929 #if HOST_HZ == TARGET_HZ
930     return ticks;
931 #else
932     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
933 #endif
934 }
935 
936 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
937                                              const struct rusage *rusage)
938 {
939     struct target_rusage *target_rusage;
940 
941     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
942         return -TARGET_EFAULT;
943     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
944     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
945     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
946     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
947     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
948     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
949     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
950     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
951     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
952     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
953     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
954     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
955     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
956     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
957     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
958     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
959     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
960     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
961     unlock_user_struct(target_rusage, target_addr, 1);
962 
963     return 0;
964 }
965 
966 #ifdef TARGET_NR_setrlimit
967 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
968 {
969     abi_ulong target_rlim_swap;
970     rlim_t result;
971 
972     target_rlim_swap = tswapal(target_rlim);
973     if (target_rlim_swap == TARGET_RLIM_INFINITY)
974         return RLIM_INFINITY;
975 
976     result = target_rlim_swap;
977     if (target_rlim_swap != (rlim_t)result)
978         return RLIM_INFINITY;
979 
980     return result;
981 }
982 #endif
983 
984 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
985 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
986 {
987     abi_ulong target_rlim_swap;
988     abi_ulong result;
989 
990     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
991         target_rlim_swap = TARGET_RLIM_INFINITY;
992     else
993         target_rlim_swap = rlim;
994     result = tswapal(target_rlim_swap);
995 
996     return result;
997 }
998 #endif
999 
1000 static inline int target_to_host_resource(int code)
1001 {
1002     switch (code) {
1003     case TARGET_RLIMIT_AS:
1004         return RLIMIT_AS;
1005     case TARGET_RLIMIT_CORE:
1006         return RLIMIT_CORE;
1007     case TARGET_RLIMIT_CPU:
1008         return RLIMIT_CPU;
1009     case TARGET_RLIMIT_DATA:
1010         return RLIMIT_DATA;
1011     case TARGET_RLIMIT_FSIZE:
1012         return RLIMIT_FSIZE;
1013     case TARGET_RLIMIT_LOCKS:
1014         return RLIMIT_LOCKS;
1015     case TARGET_RLIMIT_MEMLOCK:
1016         return RLIMIT_MEMLOCK;
1017     case TARGET_RLIMIT_MSGQUEUE:
1018         return RLIMIT_MSGQUEUE;
1019     case TARGET_RLIMIT_NICE:
1020         return RLIMIT_NICE;
1021     case TARGET_RLIMIT_NOFILE:
1022         return RLIMIT_NOFILE;
1023     case TARGET_RLIMIT_NPROC:
1024         return RLIMIT_NPROC;
1025     case TARGET_RLIMIT_RSS:
1026         return RLIMIT_RSS;
1027     case TARGET_RLIMIT_RTPRIO:
1028         return RLIMIT_RTPRIO;
1029     case TARGET_RLIMIT_SIGPENDING:
1030         return RLIMIT_SIGPENDING;
1031     case TARGET_RLIMIT_STACK:
1032         return RLIMIT_STACK;
1033     default:
1034         return code;
1035     }
1036 }
1037 
1038 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1039                                               abi_ulong target_tv_addr)
1040 {
1041     struct target_timeval *target_tv;
1042 
1043     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1044         return -TARGET_EFAULT;
1045     }
1046 
1047     __get_user(tv->tv_sec, &target_tv->tv_sec);
1048     __get_user(tv->tv_usec, &target_tv->tv_usec);
1049 
1050     unlock_user_struct(target_tv, target_tv_addr, 0);
1051 
1052     return 0;
1053 }
1054 
1055 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1056                                             const struct timeval *tv)
1057 {
1058     struct target_timeval *target_tv;
1059 
1060     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1061         return -TARGET_EFAULT;
1062     }
1063 
1064     __put_user(tv->tv_sec, &target_tv->tv_sec);
1065     __put_user(tv->tv_usec, &target_tv->tv_usec);
1066 
1067     unlock_user_struct(target_tv, target_tv_addr, 1);
1068 
1069     return 0;
1070 }
1071 
1072 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1073 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1074                                                 abi_ulong target_tv_addr)
1075 {
1076     struct target__kernel_sock_timeval *target_tv;
1077 
1078     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1079         return -TARGET_EFAULT;
1080     }
1081 
1082     __get_user(tv->tv_sec, &target_tv->tv_sec);
1083     __get_user(tv->tv_usec, &target_tv->tv_usec);
1084 
1085     unlock_user_struct(target_tv, target_tv_addr, 0);
1086 
1087     return 0;
1088 }
1089 #endif
1090 
1091 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1092                                               const struct timeval *tv)
1093 {
1094     struct target__kernel_sock_timeval *target_tv;
1095 
1096     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1097         return -TARGET_EFAULT;
1098     }
1099 
1100     __put_user(tv->tv_sec, &target_tv->tv_sec);
1101     __put_user(tv->tv_usec, &target_tv->tv_usec);
1102 
1103     unlock_user_struct(target_tv, target_tv_addr, 1);
1104 
1105     return 0;
1106 }
1107 
1108 #if defined(TARGET_NR_futex) || \
1109     defined(TARGET_NR_rt_sigtimedwait) || \
1110     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1111     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1112     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1113     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1114     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1115     defined(TARGET_NR_timer_settime) || \
1116     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1117 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1118                                                abi_ulong target_addr)
1119 {
1120     struct target_timespec *target_ts;
1121 
1122     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1123         return -TARGET_EFAULT;
1124     }
1125     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1126     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1127     unlock_user_struct(target_ts, target_addr, 0);
1128     return 0;
1129 }
1130 #endif
1131 
1132 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1133     defined(TARGET_NR_timer_settime64) || \
1134     defined(TARGET_NR_mq_timedsend_time64) || \
1135     defined(TARGET_NR_mq_timedreceive_time64) || \
1136     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1137     defined(TARGET_NR_clock_nanosleep_time64) || \
1138     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1139     defined(TARGET_NR_utimensat) || \
1140     defined(TARGET_NR_utimensat_time64) || \
1141     defined(TARGET_NR_semtimedop_time64) || \
1142     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1143 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1144                                                  abi_ulong target_addr)
1145 {
1146     struct target__kernel_timespec *target_ts;
1147 
1148     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1149         return -TARGET_EFAULT;
1150     }
1151     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1152     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1153     /* in 32bit mode, this drops the padding */
1154     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1155     unlock_user_struct(target_ts, target_addr, 0);
1156     return 0;
1157 }
1158 #endif
1159 
1160 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1161                                                struct timespec *host_ts)
1162 {
1163     struct target_timespec *target_ts;
1164 
1165     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1166         return -TARGET_EFAULT;
1167     }
1168     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1169     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1170     unlock_user_struct(target_ts, target_addr, 1);
1171     return 0;
1172 }
1173 
1174 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1175                                                  struct timespec *host_ts)
1176 {
1177     struct target__kernel_timespec *target_ts;
1178 
1179     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1180         return -TARGET_EFAULT;
1181     }
1182     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1183     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1184     unlock_user_struct(target_ts, target_addr, 1);
1185     return 0;
1186 }
1187 
1188 #if defined(TARGET_NR_gettimeofday)
1189 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1190                                              struct timezone *tz)
1191 {
1192     struct target_timezone *target_tz;
1193 
1194     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1195         return -TARGET_EFAULT;
1196     }
1197 
1198     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1199     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1200 
1201     unlock_user_struct(target_tz, target_tz_addr, 1);
1202 
1203     return 0;
1204 }
1205 #endif
1206 
1207 #if defined(TARGET_NR_settimeofday)
1208 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1209                                                abi_ulong target_tz_addr)
1210 {
1211     struct target_timezone *target_tz;
1212 
1213     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1214         return -TARGET_EFAULT;
1215     }
1216 
1217     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1218     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1219 
1220     unlock_user_struct(target_tz, target_tz_addr, 0);
1221 
1222     return 0;
1223 }
1224 #endif
1225 
1226 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1227 #include <mqueue.h>
1228 
1229 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1230                                               abi_ulong target_mq_attr_addr)
1231 {
1232     struct target_mq_attr *target_mq_attr;
1233 
1234     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1235                           target_mq_attr_addr, 1))
1236         return -TARGET_EFAULT;
1237 
1238     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1239     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1240     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1241     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1242 
1243     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1244 
1245     return 0;
1246 }
1247 
1248 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1249                                             const struct mq_attr *attr)
1250 {
1251     struct target_mq_attr *target_mq_attr;
1252 
1253     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1254                           target_mq_attr_addr, 0))
1255         return -TARGET_EFAULT;
1256 
1257     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1258     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1259     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1260     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1261 
1262     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1263 
1264     return 0;
1265 }
1266 #endif
1267 
1268 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1269 /* do_select() must return target values and target errnos. */
1270 static abi_long do_select(int n,
1271                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1272                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1273 {
1274     fd_set rfds, wfds, efds;
1275     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1276     struct timeval tv;
1277     struct timespec ts, *ts_ptr;
1278     abi_long ret;
1279 
1280     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1281     if (ret) {
1282         return ret;
1283     }
1284     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1285     if (ret) {
1286         return ret;
1287     }
1288     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1289     if (ret) {
1290         return ret;
1291     }
1292 
1293     if (target_tv_addr) {
1294         if (copy_from_user_timeval(&tv, target_tv_addr))
1295             return -TARGET_EFAULT;
1296         ts.tv_sec = tv.tv_sec;
1297         ts.tv_nsec = tv.tv_usec * 1000;
1298         ts_ptr = &ts;
1299     } else {
1300         ts_ptr = NULL;
1301     }
1302 
1303     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1304                                   ts_ptr, NULL));
1305 
1306     if (!is_error(ret)) {
1307         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1308             return -TARGET_EFAULT;
1309         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1310             return -TARGET_EFAULT;
1311         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1312             return -TARGET_EFAULT;
1313 
1314         if (target_tv_addr) {
1315             tv.tv_sec = ts.tv_sec;
1316             tv.tv_usec = ts.tv_nsec / 1000;
1317             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1318                 return -TARGET_EFAULT;
1319             }
1320         }
1321     }
1322 
1323     return ret;
1324 }
1325 
1326 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1327 static abi_long do_old_select(abi_ulong arg1)
1328 {
1329     struct target_sel_arg_struct *sel;
1330     abi_ulong inp, outp, exp, tvp;
1331     long nsel;
1332 
1333     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1334         return -TARGET_EFAULT;
1335     }
1336 
1337     nsel = tswapal(sel->n);
1338     inp = tswapal(sel->inp);
1339     outp = tswapal(sel->outp);
1340     exp = tswapal(sel->exp);
1341     tvp = tswapal(sel->tvp);
1342 
1343     unlock_user_struct(sel, arg1, 0);
1344 
1345     return do_select(nsel, inp, outp, exp, tvp);
1346 }
1347 #endif
1348 #endif
1349 
1350 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1351 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1352                             abi_long arg4, abi_long arg5, abi_long arg6,
1353                             bool time64)
1354 {
1355     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1356     fd_set rfds, wfds, efds;
1357     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1358     struct timespec ts, *ts_ptr;
1359     abi_long ret;
1360 
1361     /*
1362      * The 6th arg is actually two args smashed together,
1363      * so we cannot use the C library.
1364      */
1365     sigset_t set;
1366     struct {
1367         sigset_t *set;
1368         size_t size;
1369     } sig, *sig_ptr;
1370 
1371     abi_ulong arg_sigset, arg_sigsize, *arg7;
1372     target_sigset_t *target_sigset;
1373 
1374     n = arg1;
1375     rfd_addr = arg2;
1376     wfd_addr = arg3;
1377     efd_addr = arg4;
1378     ts_addr = arg5;
1379 
1380     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1381     if (ret) {
1382         return ret;
1383     }
1384     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1385     if (ret) {
1386         return ret;
1387     }
1388     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1389     if (ret) {
1390         return ret;
1391     }
1392 
1393     /*
1394      * This takes a timespec, and not a timeval, so we cannot
1395      * use the do_select() helper ...
1396      */
1397     if (ts_addr) {
1398         if (time64) {
1399             if (target_to_host_timespec64(&ts, ts_addr)) {
1400                 return -TARGET_EFAULT;
1401             }
1402         } else {
1403             if (target_to_host_timespec(&ts, ts_addr)) {
1404                 return -TARGET_EFAULT;
1405             }
1406         }
1407             ts_ptr = &ts;
1408     } else {
1409         ts_ptr = NULL;
1410     }
1411 
1412     /* Extract the two packed args for the sigset */
1413     if (arg6) {
1414         sig_ptr = &sig;
1415         sig.size = SIGSET_T_SIZE;
1416 
1417         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1418         if (!arg7) {
1419             return -TARGET_EFAULT;
1420         }
1421         arg_sigset = tswapal(arg7[0]);
1422         arg_sigsize = tswapal(arg7[1]);
1423         unlock_user(arg7, arg6, 0);
1424 
1425         if (arg_sigset) {
1426             sig.set = &set;
1427             if (arg_sigsize != sizeof(*target_sigset)) {
1428                 /* Like the kernel, we enforce correct size sigsets */
1429                 return -TARGET_EINVAL;
1430             }
1431             target_sigset = lock_user(VERIFY_READ, arg_sigset,
1432                                       sizeof(*target_sigset), 1);
1433             if (!target_sigset) {
1434                 return -TARGET_EFAULT;
1435             }
1436             target_to_host_sigset(&set, target_sigset);
1437             unlock_user(target_sigset, arg_sigset, 0);
1438         } else {
1439             sig.set = NULL;
1440         }
1441     } else {
1442         sig_ptr = NULL;
1443     }
1444 
1445     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1446                                   ts_ptr, sig_ptr));
1447 
1448     if (!is_error(ret)) {
1449         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1450             return -TARGET_EFAULT;
1451         }
1452         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1453             return -TARGET_EFAULT;
1454         }
1455         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1456             return -TARGET_EFAULT;
1457         }
1458         if (time64) {
1459             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1460                 return -TARGET_EFAULT;
1461             }
1462         } else {
1463             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1464                 return -TARGET_EFAULT;
1465             }
1466         }
1467     }
1468     return ret;
1469 }
1470 #endif
1471 
1472 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1473     defined(TARGET_NR_ppoll_time64)
1474 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1475                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1476 {
1477     struct target_pollfd *target_pfd;
1478     unsigned int nfds = arg2;
1479     struct pollfd *pfd;
1480     unsigned int i;
1481     abi_long ret;
1482 
1483     pfd = NULL;
1484     target_pfd = NULL;
1485     if (nfds) {
1486         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1487             return -TARGET_EINVAL;
1488         }
1489         target_pfd = lock_user(VERIFY_WRITE, arg1,
1490                                sizeof(struct target_pollfd) * nfds, 1);
1491         if (!target_pfd) {
1492             return -TARGET_EFAULT;
1493         }
1494 
1495         pfd = alloca(sizeof(struct pollfd) * nfds);
1496         for (i = 0; i < nfds; i++) {
1497             pfd[i].fd = tswap32(target_pfd[i].fd);
1498             pfd[i].events = tswap16(target_pfd[i].events);
1499         }
1500     }
1501     if (ppoll) {
1502         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1503         target_sigset_t *target_set;
1504         sigset_t _set, *set = &_set;
1505 
1506         if (arg3) {
1507             if (time64) {
1508                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1509                     unlock_user(target_pfd, arg1, 0);
1510                     return -TARGET_EFAULT;
1511                 }
1512             } else {
1513                 if (target_to_host_timespec(timeout_ts, arg3)) {
1514                     unlock_user(target_pfd, arg1, 0);
1515                     return -TARGET_EFAULT;
1516                 }
1517             }
1518         } else {
1519             timeout_ts = NULL;
1520         }
1521 
1522         if (arg4) {
1523             if (arg5 != sizeof(target_sigset_t)) {
1524                 unlock_user(target_pfd, arg1, 0);
1525                 return -TARGET_EINVAL;
1526             }
1527 
1528             target_set = lock_user(VERIFY_READ, arg4,
1529                                    sizeof(target_sigset_t), 1);
1530             if (!target_set) {
1531                 unlock_user(target_pfd, arg1, 0);
1532                 return -TARGET_EFAULT;
1533             }
1534             target_to_host_sigset(set, target_set);
1535         } else {
1536             set = NULL;
1537         }
1538 
1539         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1540                                    set, SIGSET_T_SIZE));
1541 
1542         if (!is_error(ret) && arg3) {
1543             if (time64) {
1544                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1545                     return -TARGET_EFAULT;
1546                 }
1547             } else {
1548                 if (host_to_target_timespec(arg3, timeout_ts)) {
1549                     return -TARGET_EFAULT;
1550                 }
1551             }
1552         }
1553         if (arg4) {
1554             unlock_user(target_set, arg4, 0);
1555         }
1556     } else {
1557           struct timespec ts, *pts;
1558 
1559           if (arg3 >= 0) {
1560               /* Convert ms to secs, ns */
1561               ts.tv_sec = arg3 / 1000;
1562               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1563               pts = &ts;
1564           } else {
1565               /* -ve poll() timeout means "infinite" */
1566               pts = NULL;
1567           }
1568           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1569     }
1570 
1571     if (!is_error(ret)) {
1572         for (i = 0; i < nfds; i++) {
1573             target_pfd[i].revents = tswap16(pfd[i].revents);
1574         }
1575     }
1576     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1577     return ret;
1578 }
1579 #endif
1580 
1581 static abi_long do_pipe2(int host_pipe[], int flags)
1582 {
1583 #ifdef CONFIG_PIPE2
1584     return pipe2(host_pipe, flags);
1585 #else
1586     return -ENOSYS;
1587 #endif
1588 }
1589 
1590 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1591                         int flags, int is_pipe2)
1592 {
1593     int host_pipe[2];
1594     abi_long ret;
1595     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1596 
1597     if (is_error(ret))
1598         return get_errno(ret);
1599 
1600     /* Several targets have special calling conventions for the original
1601        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1602     if (!is_pipe2) {
1603 #if defined(TARGET_ALPHA)
1604         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1605         return host_pipe[0];
1606 #elif defined(TARGET_MIPS)
1607         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1608         return host_pipe[0];
1609 #elif defined(TARGET_SH4)
1610         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1611         return host_pipe[0];
1612 #elif defined(TARGET_SPARC)
1613         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1614         return host_pipe[0];
1615 #endif
1616     }
1617 
1618     if (put_user_s32(host_pipe[0], pipedes)
1619         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1620         return -TARGET_EFAULT;
1621     return get_errno(ret);
1622 }
1623 
1624 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1625                                               abi_ulong target_addr,
1626                                               socklen_t len)
1627 {
1628     struct target_ip_mreqn *target_smreqn;
1629 
1630     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1631     if (!target_smreqn)
1632         return -TARGET_EFAULT;
1633     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1634     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1635     if (len == sizeof(struct target_ip_mreqn))
1636         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1637     unlock_user(target_smreqn, target_addr, 0);
1638 
1639     return 0;
1640 }
1641 
1642 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1643                                                abi_ulong target_addr,
1644                                                socklen_t len)
1645 {
1646     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1647     sa_family_t sa_family;
1648     struct target_sockaddr *target_saddr;
1649 
1650     if (fd_trans_target_to_host_addr(fd)) {
1651         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1652     }
1653 
1654     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1655     if (!target_saddr)
1656         return -TARGET_EFAULT;
1657 
1658     sa_family = tswap16(target_saddr->sa_family);
1659 
1660     /* Oops. The caller might send a incomplete sun_path; sun_path
1661      * must be terminated by \0 (see the manual page), but
1662      * unfortunately it is quite common to specify sockaddr_un
1663      * length as "strlen(x->sun_path)" while it should be
1664      * "strlen(...) + 1". We'll fix that here if needed.
1665      * Linux kernel has a similar feature.
1666      */
1667 
1668     if (sa_family == AF_UNIX) {
1669         if (len < unix_maxlen && len > 0) {
1670             char *cp = (char*)target_saddr;
1671 
1672             if ( cp[len-1] && !cp[len] )
1673                 len++;
1674         }
1675         if (len > unix_maxlen)
1676             len = unix_maxlen;
1677     }
1678 
1679     memcpy(addr, target_saddr, len);
1680     addr->sa_family = sa_family;
1681     if (sa_family == AF_NETLINK) {
1682         struct sockaddr_nl *nladdr;
1683 
1684         nladdr = (struct sockaddr_nl *)addr;
1685         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1686         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1687     } else if (sa_family == AF_PACKET) {
1688 	struct target_sockaddr_ll *lladdr;
1689 
1690 	lladdr = (struct target_sockaddr_ll *)addr;
1691 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1692 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1693     }
1694     unlock_user(target_saddr, target_addr, 0);
1695 
1696     return 0;
1697 }
1698 
1699 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1700                                                struct sockaddr *addr,
1701                                                socklen_t len)
1702 {
1703     struct target_sockaddr *target_saddr;
1704 
1705     if (len == 0) {
1706         return 0;
1707     }
1708     assert(addr);
1709 
1710     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1711     if (!target_saddr)
1712         return -TARGET_EFAULT;
1713     memcpy(target_saddr, addr, len);
1714     if (len >= offsetof(struct target_sockaddr, sa_family) +
1715         sizeof(target_saddr->sa_family)) {
1716         target_saddr->sa_family = tswap16(addr->sa_family);
1717     }
1718     if (addr->sa_family == AF_NETLINK &&
1719         len >= sizeof(struct target_sockaddr_nl)) {
1720         struct target_sockaddr_nl *target_nl =
1721                (struct target_sockaddr_nl *)target_saddr;
1722         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1723         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1724     } else if (addr->sa_family == AF_PACKET) {
1725         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1726         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1727         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1728     } else if (addr->sa_family == AF_INET6 &&
1729                len >= sizeof(struct target_sockaddr_in6)) {
1730         struct target_sockaddr_in6 *target_in6 =
1731                (struct target_sockaddr_in6 *)target_saddr;
1732         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1733     }
1734     unlock_user(target_saddr, target_addr, len);
1735 
1736     return 0;
1737 }
1738 
1739 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1740                                            struct target_msghdr *target_msgh)
1741 {
1742     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1743     abi_long msg_controllen;
1744     abi_ulong target_cmsg_addr;
1745     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1746     socklen_t space = 0;
1747 
1748     msg_controllen = tswapal(target_msgh->msg_controllen);
1749     if (msg_controllen < sizeof (struct target_cmsghdr))
1750         goto the_end;
1751     target_cmsg_addr = tswapal(target_msgh->msg_control);
1752     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1753     target_cmsg_start = target_cmsg;
1754     if (!target_cmsg)
1755         return -TARGET_EFAULT;
1756 
1757     while (cmsg && target_cmsg) {
1758         void *data = CMSG_DATA(cmsg);
1759         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1760 
1761         int len = tswapal(target_cmsg->cmsg_len)
1762             - sizeof(struct target_cmsghdr);
1763 
1764         space += CMSG_SPACE(len);
1765         if (space > msgh->msg_controllen) {
1766             space -= CMSG_SPACE(len);
1767             /* This is a QEMU bug, since we allocated the payload
1768              * area ourselves (unlike overflow in host-to-target
1769              * conversion, which is just the guest giving us a buffer
1770              * that's too small). It can't happen for the payload types
1771              * we currently support; if it becomes an issue in future
1772              * we would need to improve our allocation strategy to
1773              * something more intelligent than "twice the size of the
1774              * target buffer we're reading from".
1775              */
1776             qemu_log_mask(LOG_UNIMP,
1777                           ("Unsupported ancillary data %d/%d: "
1778                            "unhandled msg size\n"),
1779                           tswap32(target_cmsg->cmsg_level),
1780                           tswap32(target_cmsg->cmsg_type));
1781             break;
1782         }
1783 
1784         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1785             cmsg->cmsg_level = SOL_SOCKET;
1786         } else {
1787             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1788         }
1789         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1790         cmsg->cmsg_len = CMSG_LEN(len);
1791 
1792         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1793             int *fd = (int *)data;
1794             int *target_fd = (int *)target_data;
1795             int i, numfds = len / sizeof(int);
1796 
1797             for (i = 0; i < numfds; i++) {
1798                 __get_user(fd[i], target_fd + i);
1799             }
1800         } else if (cmsg->cmsg_level == SOL_SOCKET
1801                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1802             struct ucred *cred = (struct ucred *)data;
1803             struct target_ucred *target_cred =
1804                 (struct target_ucred *)target_data;
1805 
1806             __get_user(cred->pid, &target_cred->pid);
1807             __get_user(cred->uid, &target_cred->uid);
1808             __get_user(cred->gid, &target_cred->gid);
1809         } else {
1810             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1811                           cmsg->cmsg_level, cmsg->cmsg_type);
1812             memcpy(data, target_data, len);
1813         }
1814 
1815         cmsg = CMSG_NXTHDR(msgh, cmsg);
1816         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1817                                          target_cmsg_start);
1818     }
1819     unlock_user(target_cmsg, target_cmsg_addr, 0);
1820  the_end:
1821     msgh->msg_controllen = space;
1822     return 0;
1823 }
1824 
1825 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1826                                            struct msghdr *msgh)
1827 {
1828     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1829     abi_long msg_controllen;
1830     abi_ulong target_cmsg_addr;
1831     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1832     socklen_t space = 0;
1833 
1834     msg_controllen = tswapal(target_msgh->msg_controllen);
1835     if (msg_controllen < sizeof (struct target_cmsghdr))
1836         goto the_end;
1837     target_cmsg_addr = tswapal(target_msgh->msg_control);
1838     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1839     target_cmsg_start = target_cmsg;
1840     if (!target_cmsg)
1841         return -TARGET_EFAULT;
1842 
1843     while (cmsg && target_cmsg) {
1844         void *data = CMSG_DATA(cmsg);
1845         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1846 
1847         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1848         int tgt_len, tgt_space;
1849 
1850         /* We never copy a half-header but may copy half-data;
1851          * this is Linux's behaviour in put_cmsg(). Note that
1852          * truncation here is a guest problem (which we report
1853          * to the guest via the CTRUNC bit), unlike truncation
1854          * in target_to_host_cmsg, which is a QEMU bug.
1855          */
1856         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1857             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1858             break;
1859         }
1860 
1861         if (cmsg->cmsg_level == SOL_SOCKET) {
1862             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1863         } else {
1864             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1865         }
1866         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1867 
1868         /* Payload types which need a different size of payload on
1869          * the target must adjust tgt_len here.
1870          */
1871         tgt_len = len;
1872         switch (cmsg->cmsg_level) {
1873         case SOL_SOCKET:
1874             switch (cmsg->cmsg_type) {
1875             case SO_TIMESTAMP:
1876                 tgt_len = sizeof(struct target_timeval);
1877                 break;
1878             default:
1879                 break;
1880             }
1881             break;
1882         default:
1883             break;
1884         }
1885 
1886         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1887             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1888             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1889         }
1890 
1891         /* We must now copy-and-convert len bytes of payload
1892          * into tgt_len bytes of destination space. Bear in mind
1893          * that in both source and destination we may be dealing
1894          * with a truncated value!
1895          */
1896         switch (cmsg->cmsg_level) {
1897         case SOL_SOCKET:
1898             switch (cmsg->cmsg_type) {
1899             case SCM_RIGHTS:
1900             {
1901                 int *fd = (int *)data;
1902                 int *target_fd = (int *)target_data;
1903                 int i, numfds = tgt_len / sizeof(int);
1904 
1905                 for (i = 0; i < numfds; i++) {
1906                     __put_user(fd[i], target_fd + i);
1907                 }
1908                 break;
1909             }
1910             case SO_TIMESTAMP:
1911             {
1912                 struct timeval *tv = (struct timeval *)data;
1913                 struct target_timeval *target_tv =
1914                     (struct target_timeval *)target_data;
1915 
1916                 if (len != sizeof(struct timeval) ||
1917                     tgt_len != sizeof(struct target_timeval)) {
1918                     goto unimplemented;
1919                 }
1920 
1921                 /* copy struct timeval to target */
1922                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1923                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1924                 break;
1925             }
1926             case SCM_CREDENTIALS:
1927             {
1928                 struct ucred *cred = (struct ucred *)data;
1929                 struct target_ucred *target_cred =
1930                     (struct target_ucred *)target_data;
1931 
1932                 __put_user(cred->pid, &target_cred->pid);
1933                 __put_user(cred->uid, &target_cred->uid);
1934                 __put_user(cred->gid, &target_cred->gid);
1935                 break;
1936             }
1937             default:
1938                 goto unimplemented;
1939             }
1940             break;
1941 
1942         case SOL_IP:
1943             switch (cmsg->cmsg_type) {
1944             case IP_TTL:
1945             {
1946                 uint32_t *v = (uint32_t *)data;
1947                 uint32_t *t_int = (uint32_t *)target_data;
1948 
1949                 if (len != sizeof(uint32_t) ||
1950                     tgt_len != sizeof(uint32_t)) {
1951                     goto unimplemented;
1952                 }
1953                 __put_user(*v, t_int);
1954                 break;
1955             }
1956             case IP_RECVERR:
1957             {
1958                 struct errhdr_t {
1959                    struct sock_extended_err ee;
1960                    struct sockaddr_in offender;
1961                 };
1962                 struct errhdr_t *errh = (struct errhdr_t *)data;
1963                 struct errhdr_t *target_errh =
1964                     (struct errhdr_t *)target_data;
1965 
1966                 if (len != sizeof(struct errhdr_t) ||
1967                     tgt_len != sizeof(struct errhdr_t)) {
1968                     goto unimplemented;
1969                 }
1970                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1971                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1972                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1973                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1974                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1975                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1976                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1977                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1978                     (void *) &errh->offender, sizeof(errh->offender));
1979                 break;
1980             }
1981             default:
1982                 goto unimplemented;
1983             }
1984             break;
1985 
1986         case SOL_IPV6:
1987             switch (cmsg->cmsg_type) {
1988             case IPV6_HOPLIMIT:
1989             {
1990                 uint32_t *v = (uint32_t *)data;
1991                 uint32_t *t_int = (uint32_t *)target_data;
1992 
1993                 if (len != sizeof(uint32_t) ||
1994                     tgt_len != sizeof(uint32_t)) {
1995                     goto unimplemented;
1996                 }
1997                 __put_user(*v, t_int);
1998                 break;
1999             }
2000             case IPV6_RECVERR:
2001             {
2002                 struct errhdr6_t {
2003                    struct sock_extended_err ee;
2004                    struct sockaddr_in6 offender;
2005                 };
2006                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2007                 struct errhdr6_t *target_errh =
2008                     (struct errhdr6_t *)target_data;
2009 
2010                 if (len != sizeof(struct errhdr6_t) ||
2011                     tgt_len != sizeof(struct errhdr6_t)) {
2012                     goto unimplemented;
2013                 }
2014                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2015                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2016                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2017                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2018                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2019                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2020                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2021                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2022                     (void *) &errh->offender, sizeof(errh->offender));
2023                 break;
2024             }
2025             default:
2026                 goto unimplemented;
2027             }
2028             break;
2029 
2030         default:
2031         unimplemented:
2032             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2033                           cmsg->cmsg_level, cmsg->cmsg_type);
2034             memcpy(target_data, data, MIN(len, tgt_len));
2035             if (tgt_len > len) {
2036                 memset(target_data + len, 0, tgt_len - len);
2037             }
2038         }
2039 
2040         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2041         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2042         if (msg_controllen < tgt_space) {
2043             tgt_space = msg_controllen;
2044         }
2045         msg_controllen -= tgt_space;
2046         space += tgt_space;
2047         cmsg = CMSG_NXTHDR(msgh, cmsg);
2048         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2049                                          target_cmsg_start);
2050     }
2051     unlock_user(target_cmsg, target_cmsg_addr, space);
2052  the_end:
2053     target_msgh->msg_controllen = tswapal(space);
2054     return 0;
2055 }
2056 
2057 /* do_setsockopt() Must return target values and target errnos. */
2058 static abi_long do_setsockopt(int sockfd, int level, int optname,
2059                               abi_ulong optval_addr, socklen_t optlen)
2060 {
2061     abi_long ret;
2062     int val;
2063     struct ip_mreqn *ip_mreq;
2064     struct ip_mreq_source *ip_mreq_source;
2065 
2066     switch(level) {
2067     case SOL_TCP:
2068     case SOL_UDP:
2069         /* TCP and UDP options all take an 'int' value.  */
2070         if (optlen < sizeof(uint32_t))
2071             return -TARGET_EINVAL;
2072 
2073         if (get_user_u32(val, optval_addr))
2074             return -TARGET_EFAULT;
2075         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2076         break;
2077     case SOL_IP:
2078         switch(optname) {
2079         case IP_TOS:
2080         case IP_TTL:
2081         case IP_HDRINCL:
2082         case IP_ROUTER_ALERT:
2083         case IP_RECVOPTS:
2084         case IP_RETOPTS:
2085         case IP_PKTINFO:
2086         case IP_MTU_DISCOVER:
2087         case IP_RECVERR:
2088         case IP_RECVTTL:
2089         case IP_RECVTOS:
2090 #ifdef IP_FREEBIND
2091         case IP_FREEBIND:
2092 #endif
2093         case IP_MULTICAST_TTL:
2094         case IP_MULTICAST_LOOP:
2095             val = 0;
2096             if (optlen >= sizeof(uint32_t)) {
2097                 if (get_user_u32(val, optval_addr))
2098                     return -TARGET_EFAULT;
2099             } else if (optlen >= 1) {
2100                 if (get_user_u8(val, optval_addr))
2101                     return -TARGET_EFAULT;
2102             }
2103             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2104             break;
2105         case IP_ADD_MEMBERSHIP:
2106         case IP_DROP_MEMBERSHIP:
2107             if (optlen < sizeof (struct target_ip_mreq) ||
2108                 optlen > sizeof (struct target_ip_mreqn))
2109                 return -TARGET_EINVAL;
2110 
2111             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2112             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2113             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2114             break;
2115 
2116         case IP_BLOCK_SOURCE:
2117         case IP_UNBLOCK_SOURCE:
2118         case IP_ADD_SOURCE_MEMBERSHIP:
2119         case IP_DROP_SOURCE_MEMBERSHIP:
2120             if (optlen != sizeof (struct target_ip_mreq_source))
2121                 return -TARGET_EINVAL;
2122 
2123             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2124             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2125             unlock_user (ip_mreq_source, optval_addr, 0);
2126             break;
2127 
2128         default:
2129             goto unimplemented;
2130         }
2131         break;
2132     case SOL_IPV6:
2133         switch (optname) {
2134         case IPV6_MTU_DISCOVER:
2135         case IPV6_MTU:
2136         case IPV6_V6ONLY:
2137         case IPV6_RECVPKTINFO:
2138         case IPV6_UNICAST_HOPS:
2139         case IPV6_MULTICAST_HOPS:
2140         case IPV6_MULTICAST_LOOP:
2141         case IPV6_RECVERR:
2142         case IPV6_RECVHOPLIMIT:
2143         case IPV6_2292HOPLIMIT:
2144         case IPV6_CHECKSUM:
2145         case IPV6_ADDRFORM:
2146         case IPV6_2292PKTINFO:
2147         case IPV6_RECVTCLASS:
2148         case IPV6_RECVRTHDR:
2149         case IPV6_2292RTHDR:
2150         case IPV6_RECVHOPOPTS:
2151         case IPV6_2292HOPOPTS:
2152         case IPV6_RECVDSTOPTS:
2153         case IPV6_2292DSTOPTS:
2154         case IPV6_TCLASS:
2155         case IPV6_ADDR_PREFERENCES:
2156 #ifdef IPV6_RECVPATHMTU
2157         case IPV6_RECVPATHMTU:
2158 #endif
2159 #ifdef IPV6_TRANSPARENT
2160         case IPV6_TRANSPARENT:
2161 #endif
2162 #ifdef IPV6_FREEBIND
2163         case IPV6_FREEBIND:
2164 #endif
2165 #ifdef IPV6_RECVORIGDSTADDR
2166         case IPV6_RECVORIGDSTADDR:
2167 #endif
2168             val = 0;
2169             if (optlen < sizeof(uint32_t)) {
2170                 return -TARGET_EINVAL;
2171             }
2172             if (get_user_u32(val, optval_addr)) {
2173                 return -TARGET_EFAULT;
2174             }
2175             ret = get_errno(setsockopt(sockfd, level, optname,
2176                                        &val, sizeof(val)));
2177             break;
2178         case IPV6_PKTINFO:
2179         {
2180             struct in6_pktinfo pki;
2181 
2182             if (optlen < sizeof(pki)) {
2183                 return -TARGET_EINVAL;
2184             }
2185 
2186             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2187                 return -TARGET_EFAULT;
2188             }
2189 
2190             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2191 
2192             ret = get_errno(setsockopt(sockfd, level, optname,
2193                                        &pki, sizeof(pki)));
2194             break;
2195         }
2196         case IPV6_ADD_MEMBERSHIP:
2197         case IPV6_DROP_MEMBERSHIP:
2198         {
2199             struct ipv6_mreq ipv6mreq;
2200 
2201             if (optlen < sizeof(ipv6mreq)) {
2202                 return -TARGET_EINVAL;
2203             }
2204 
2205             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2206                 return -TARGET_EFAULT;
2207             }
2208 
2209             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2210 
2211             ret = get_errno(setsockopt(sockfd, level, optname,
2212                                        &ipv6mreq, sizeof(ipv6mreq)));
2213             break;
2214         }
2215         default:
2216             goto unimplemented;
2217         }
2218         break;
2219     case SOL_ICMPV6:
2220         switch (optname) {
2221         case ICMPV6_FILTER:
2222         {
2223             struct icmp6_filter icmp6f;
2224 
2225             if (optlen > sizeof(icmp6f)) {
2226                 optlen = sizeof(icmp6f);
2227             }
2228 
2229             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2230                 return -TARGET_EFAULT;
2231             }
2232 
2233             for (val = 0; val < 8; val++) {
2234                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2235             }
2236 
2237             ret = get_errno(setsockopt(sockfd, level, optname,
2238                                        &icmp6f, optlen));
2239             break;
2240         }
2241         default:
2242             goto unimplemented;
2243         }
2244         break;
2245     case SOL_RAW:
2246         switch (optname) {
2247         case ICMP_FILTER:
2248         case IPV6_CHECKSUM:
2249             /* those take an u32 value */
2250             if (optlen < sizeof(uint32_t)) {
2251                 return -TARGET_EINVAL;
2252             }
2253 
2254             if (get_user_u32(val, optval_addr)) {
2255                 return -TARGET_EFAULT;
2256             }
2257             ret = get_errno(setsockopt(sockfd, level, optname,
2258                                        &val, sizeof(val)));
2259             break;
2260 
2261         default:
2262             goto unimplemented;
2263         }
2264         break;
2265 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2266     case SOL_ALG:
2267         switch (optname) {
2268         case ALG_SET_KEY:
2269         {
2270             char *alg_key = g_malloc(optlen);
2271 
2272             if (!alg_key) {
2273                 return -TARGET_ENOMEM;
2274             }
2275             if (copy_from_user(alg_key, optval_addr, optlen)) {
2276                 g_free(alg_key);
2277                 return -TARGET_EFAULT;
2278             }
2279             ret = get_errno(setsockopt(sockfd, level, optname,
2280                                        alg_key, optlen));
2281             g_free(alg_key);
2282             break;
2283         }
2284         case ALG_SET_AEAD_AUTHSIZE:
2285         {
2286             ret = get_errno(setsockopt(sockfd, level, optname,
2287                                        NULL, optlen));
2288             break;
2289         }
2290         default:
2291             goto unimplemented;
2292         }
2293         break;
2294 #endif
2295     case TARGET_SOL_SOCKET:
2296         switch (optname) {
2297         case TARGET_SO_RCVTIMEO:
2298         {
2299                 struct timeval tv;
2300 
2301                 optname = SO_RCVTIMEO;
2302 
2303 set_timeout:
2304                 if (optlen != sizeof(struct target_timeval)) {
2305                     return -TARGET_EINVAL;
2306                 }
2307 
2308                 if (copy_from_user_timeval(&tv, optval_addr)) {
2309                     return -TARGET_EFAULT;
2310                 }
2311 
2312                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2313                                 &tv, sizeof(tv)));
2314                 return ret;
2315         }
2316         case TARGET_SO_SNDTIMEO:
2317                 optname = SO_SNDTIMEO;
2318                 goto set_timeout;
2319         case TARGET_SO_ATTACH_FILTER:
2320         {
2321                 struct target_sock_fprog *tfprog;
2322                 struct target_sock_filter *tfilter;
2323                 struct sock_fprog fprog;
2324                 struct sock_filter *filter;
2325                 int i;
2326 
2327                 if (optlen != sizeof(*tfprog)) {
2328                     return -TARGET_EINVAL;
2329                 }
2330                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2331                     return -TARGET_EFAULT;
2332                 }
2333                 if (!lock_user_struct(VERIFY_READ, tfilter,
2334                                       tswapal(tfprog->filter), 0)) {
2335                     unlock_user_struct(tfprog, optval_addr, 1);
2336                     return -TARGET_EFAULT;
2337                 }
2338 
2339                 fprog.len = tswap16(tfprog->len);
2340                 filter = g_try_new(struct sock_filter, fprog.len);
2341                 if (filter == NULL) {
2342                     unlock_user_struct(tfilter, tfprog->filter, 1);
2343                     unlock_user_struct(tfprog, optval_addr, 1);
2344                     return -TARGET_ENOMEM;
2345                 }
2346                 for (i = 0; i < fprog.len; i++) {
2347                     filter[i].code = tswap16(tfilter[i].code);
2348                     filter[i].jt = tfilter[i].jt;
2349                     filter[i].jf = tfilter[i].jf;
2350                     filter[i].k = tswap32(tfilter[i].k);
2351                 }
2352                 fprog.filter = filter;
2353 
2354                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2355                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2356                 g_free(filter);
2357 
2358                 unlock_user_struct(tfilter, tfprog->filter, 1);
2359                 unlock_user_struct(tfprog, optval_addr, 1);
2360                 return ret;
2361         }
2362 	case TARGET_SO_BINDTODEVICE:
2363 	{
2364 		char *dev_ifname, *addr_ifname;
2365 
2366 		if (optlen > IFNAMSIZ - 1) {
2367 		    optlen = IFNAMSIZ - 1;
2368 		}
2369 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2370 		if (!dev_ifname) {
2371 		    return -TARGET_EFAULT;
2372 		}
2373 		optname = SO_BINDTODEVICE;
2374 		addr_ifname = alloca(IFNAMSIZ);
2375 		memcpy(addr_ifname, dev_ifname, optlen);
2376 		addr_ifname[optlen] = 0;
2377 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2378                                            addr_ifname, optlen));
2379 		unlock_user (dev_ifname, optval_addr, 0);
2380 		return ret;
2381 	}
2382         case TARGET_SO_LINGER:
2383         {
2384                 struct linger lg;
2385                 struct target_linger *tlg;
2386 
2387                 if (optlen != sizeof(struct target_linger)) {
2388                     return -TARGET_EINVAL;
2389                 }
2390                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2391                     return -TARGET_EFAULT;
2392                 }
2393                 __get_user(lg.l_onoff, &tlg->l_onoff);
2394                 __get_user(lg.l_linger, &tlg->l_linger);
2395                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2396                                 &lg, sizeof(lg)));
2397                 unlock_user_struct(tlg, optval_addr, 0);
2398                 return ret;
2399         }
2400             /* Options with 'int' argument.  */
2401         case TARGET_SO_DEBUG:
2402 		optname = SO_DEBUG;
2403 		break;
2404         case TARGET_SO_REUSEADDR:
2405 		optname = SO_REUSEADDR;
2406 		break;
2407 #ifdef SO_REUSEPORT
2408         case TARGET_SO_REUSEPORT:
2409                 optname = SO_REUSEPORT;
2410                 break;
2411 #endif
2412         case TARGET_SO_TYPE:
2413 		optname = SO_TYPE;
2414 		break;
2415         case TARGET_SO_ERROR:
2416 		optname = SO_ERROR;
2417 		break;
2418         case TARGET_SO_DONTROUTE:
2419 		optname = SO_DONTROUTE;
2420 		break;
2421         case TARGET_SO_BROADCAST:
2422 		optname = SO_BROADCAST;
2423 		break;
2424         case TARGET_SO_SNDBUF:
2425 		optname = SO_SNDBUF;
2426 		break;
2427         case TARGET_SO_SNDBUFFORCE:
2428                 optname = SO_SNDBUFFORCE;
2429                 break;
2430         case TARGET_SO_RCVBUF:
2431 		optname = SO_RCVBUF;
2432 		break;
2433         case TARGET_SO_RCVBUFFORCE:
2434                 optname = SO_RCVBUFFORCE;
2435                 break;
2436         case TARGET_SO_KEEPALIVE:
2437 		optname = SO_KEEPALIVE;
2438 		break;
2439         case TARGET_SO_OOBINLINE:
2440 		optname = SO_OOBINLINE;
2441 		break;
2442         case TARGET_SO_NO_CHECK:
2443 		optname = SO_NO_CHECK;
2444 		break;
2445         case TARGET_SO_PRIORITY:
2446 		optname = SO_PRIORITY;
2447 		break;
2448 #ifdef SO_BSDCOMPAT
2449         case TARGET_SO_BSDCOMPAT:
2450 		optname = SO_BSDCOMPAT;
2451 		break;
2452 #endif
2453         case TARGET_SO_PASSCRED:
2454 		optname = SO_PASSCRED;
2455 		break;
2456         case TARGET_SO_PASSSEC:
2457                 optname = SO_PASSSEC;
2458                 break;
2459         case TARGET_SO_TIMESTAMP:
2460 		optname = SO_TIMESTAMP;
2461 		break;
2462         case TARGET_SO_RCVLOWAT:
2463 		optname = SO_RCVLOWAT;
2464 		break;
2465         default:
2466             goto unimplemented;
2467         }
2468 	if (optlen < sizeof(uint32_t))
2469             return -TARGET_EINVAL;
2470 
2471 	if (get_user_u32(val, optval_addr))
2472             return -TARGET_EFAULT;
2473 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2474         break;
2475 #ifdef SOL_NETLINK
2476     case SOL_NETLINK:
2477         switch (optname) {
2478         case NETLINK_PKTINFO:
2479         case NETLINK_ADD_MEMBERSHIP:
2480         case NETLINK_DROP_MEMBERSHIP:
2481         case NETLINK_BROADCAST_ERROR:
2482         case NETLINK_NO_ENOBUFS:
2483 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2484         case NETLINK_LISTEN_ALL_NSID:
2485         case NETLINK_CAP_ACK:
2486 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2487 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2488         case NETLINK_EXT_ACK:
2489 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2490 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2491         case NETLINK_GET_STRICT_CHK:
2492 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2493             break;
2494         default:
2495             goto unimplemented;
2496         }
2497         val = 0;
2498         if (optlen < sizeof(uint32_t)) {
2499             return -TARGET_EINVAL;
2500         }
2501         if (get_user_u32(val, optval_addr)) {
2502             return -TARGET_EFAULT;
2503         }
2504         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2505                                    sizeof(val)));
2506         break;
2507 #endif /* SOL_NETLINK */
2508     default:
2509     unimplemented:
2510         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2511                       level, optname);
2512         ret = -TARGET_ENOPROTOOPT;
2513     }
2514     return ret;
2515 }
2516 
2517 /* do_getsockopt() Must return target values and target errnos. */
2518 static abi_long do_getsockopt(int sockfd, int level, int optname,
2519                               abi_ulong optval_addr, abi_ulong optlen)
2520 {
2521     abi_long ret;
2522     int len, val;
2523     socklen_t lv;
2524 
2525     switch(level) {
2526     case TARGET_SOL_SOCKET:
2527         level = SOL_SOCKET;
2528         switch (optname) {
2529         /* These don't just return a single integer */
2530         case TARGET_SO_PEERNAME:
2531             goto unimplemented;
2532         case TARGET_SO_RCVTIMEO: {
2533             struct timeval tv;
2534             socklen_t tvlen;
2535 
2536             optname = SO_RCVTIMEO;
2537 
2538 get_timeout:
2539             if (get_user_u32(len, optlen)) {
2540                 return -TARGET_EFAULT;
2541             }
2542             if (len < 0) {
2543                 return -TARGET_EINVAL;
2544             }
2545 
2546             tvlen = sizeof(tv);
2547             ret = get_errno(getsockopt(sockfd, level, optname,
2548                                        &tv, &tvlen));
2549             if (ret < 0) {
2550                 return ret;
2551             }
2552             if (len > sizeof(struct target_timeval)) {
2553                 len = sizeof(struct target_timeval);
2554             }
2555             if (copy_to_user_timeval(optval_addr, &tv)) {
2556                 return -TARGET_EFAULT;
2557             }
2558             if (put_user_u32(len, optlen)) {
2559                 return -TARGET_EFAULT;
2560             }
2561             break;
2562         }
2563         case TARGET_SO_SNDTIMEO:
2564             optname = SO_SNDTIMEO;
2565             goto get_timeout;
2566         case TARGET_SO_PEERCRED: {
2567             struct ucred cr;
2568             socklen_t crlen;
2569             struct target_ucred *tcr;
2570 
2571             if (get_user_u32(len, optlen)) {
2572                 return -TARGET_EFAULT;
2573             }
2574             if (len < 0) {
2575                 return -TARGET_EINVAL;
2576             }
2577 
2578             crlen = sizeof(cr);
2579             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2580                                        &cr, &crlen));
2581             if (ret < 0) {
2582                 return ret;
2583             }
2584             if (len > crlen) {
2585                 len = crlen;
2586             }
2587             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2588                 return -TARGET_EFAULT;
2589             }
2590             __put_user(cr.pid, &tcr->pid);
2591             __put_user(cr.uid, &tcr->uid);
2592             __put_user(cr.gid, &tcr->gid);
2593             unlock_user_struct(tcr, optval_addr, 1);
2594             if (put_user_u32(len, optlen)) {
2595                 return -TARGET_EFAULT;
2596             }
2597             break;
2598         }
2599         case TARGET_SO_PEERSEC: {
2600             char *name;
2601 
2602             if (get_user_u32(len, optlen)) {
2603                 return -TARGET_EFAULT;
2604             }
2605             if (len < 0) {
2606                 return -TARGET_EINVAL;
2607             }
2608             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2609             if (!name) {
2610                 return -TARGET_EFAULT;
2611             }
2612             lv = len;
2613             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2614                                        name, &lv));
2615             if (put_user_u32(lv, optlen)) {
2616                 ret = -TARGET_EFAULT;
2617             }
2618             unlock_user(name, optval_addr, lv);
2619             break;
2620         }
2621         case TARGET_SO_LINGER:
2622         {
2623             struct linger lg;
2624             socklen_t lglen;
2625             struct target_linger *tlg;
2626 
2627             if (get_user_u32(len, optlen)) {
2628                 return -TARGET_EFAULT;
2629             }
2630             if (len < 0) {
2631                 return -TARGET_EINVAL;
2632             }
2633 
2634             lglen = sizeof(lg);
2635             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2636                                        &lg, &lglen));
2637             if (ret < 0) {
2638                 return ret;
2639             }
2640             if (len > lglen) {
2641                 len = lglen;
2642             }
2643             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2644                 return -TARGET_EFAULT;
2645             }
2646             __put_user(lg.l_onoff, &tlg->l_onoff);
2647             __put_user(lg.l_linger, &tlg->l_linger);
2648             unlock_user_struct(tlg, optval_addr, 1);
2649             if (put_user_u32(len, optlen)) {
2650                 return -TARGET_EFAULT;
2651             }
2652             break;
2653         }
2654         /* Options with 'int' argument.  */
2655         case TARGET_SO_DEBUG:
2656             optname = SO_DEBUG;
2657             goto int_case;
2658         case TARGET_SO_REUSEADDR:
2659             optname = SO_REUSEADDR;
2660             goto int_case;
2661 #ifdef SO_REUSEPORT
2662         case TARGET_SO_REUSEPORT:
2663             optname = SO_REUSEPORT;
2664             goto int_case;
2665 #endif
2666         case TARGET_SO_TYPE:
2667             optname = SO_TYPE;
2668             goto int_case;
2669         case TARGET_SO_ERROR:
2670             optname = SO_ERROR;
2671             goto int_case;
2672         case TARGET_SO_DONTROUTE:
2673             optname = SO_DONTROUTE;
2674             goto int_case;
2675         case TARGET_SO_BROADCAST:
2676             optname = SO_BROADCAST;
2677             goto int_case;
2678         case TARGET_SO_SNDBUF:
2679             optname = SO_SNDBUF;
2680             goto int_case;
2681         case TARGET_SO_RCVBUF:
2682             optname = SO_RCVBUF;
2683             goto int_case;
2684         case TARGET_SO_KEEPALIVE:
2685             optname = SO_KEEPALIVE;
2686             goto int_case;
2687         case TARGET_SO_OOBINLINE:
2688             optname = SO_OOBINLINE;
2689             goto int_case;
2690         case TARGET_SO_NO_CHECK:
2691             optname = SO_NO_CHECK;
2692             goto int_case;
2693         case TARGET_SO_PRIORITY:
2694             optname = SO_PRIORITY;
2695             goto int_case;
2696 #ifdef SO_BSDCOMPAT
2697         case TARGET_SO_BSDCOMPAT:
2698             optname = SO_BSDCOMPAT;
2699             goto int_case;
2700 #endif
2701         case TARGET_SO_PASSCRED:
2702             optname = SO_PASSCRED;
2703             goto int_case;
2704         case TARGET_SO_TIMESTAMP:
2705             optname = SO_TIMESTAMP;
2706             goto int_case;
2707         case TARGET_SO_RCVLOWAT:
2708             optname = SO_RCVLOWAT;
2709             goto int_case;
2710         case TARGET_SO_ACCEPTCONN:
2711             optname = SO_ACCEPTCONN;
2712             goto int_case;
2713         case TARGET_SO_PROTOCOL:
2714             optname = SO_PROTOCOL;
2715             goto int_case;
2716         case TARGET_SO_DOMAIN:
2717             optname = SO_DOMAIN;
2718             goto int_case;
2719         default:
2720             goto int_case;
2721         }
2722         break;
2723     case SOL_TCP:
2724     case SOL_UDP:
2725         /* TCP and UDP options all take an 'int' value.  */
2726     int_case:
2727         if (get_user_u32(len, optlen))
2728             return -TARGET_EFAULT;
2729         if (len < 0)
2730             return -TARGET_EINVAL;
2731         lv = sizeof(lv);
2732         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2733         if (ret < 0)
2734             return ret;
2735         if (optname == SO_TYPE) {
2736             val = host_to_target_sock_type(val);
2737         }
2738         if (len > lv)
2739             len = lv;
2740         if (len == 4) {
2741             if (put_user_u32(val, optval_addr))
2742                 return -TARGET_EFAULT;
2743         } else {
2744             if (put_user_u8(val, optval_addr))
2745                 return -TARGET_EFAULT;
2746         }
2747         if (put_user_u32(len, optlen))
2748             return -TARGET_EFAULT;
2749         break;
2750     case SOL_IP:
2751         switch(optname) {
2752         case IP_TOS:
2753         case IP_TTL:
2754         case IP_HDRINCL:
2755         case IP_ROUTER_ALERT:
2756         case IP_RECVOPTS:
2757         case IP_RETOPTS:
2758         case IP_PKTINFO:
2759         case IP_MTU_DISCOVER:
2760         case IP_RECVERR:
2761         case IP_RECVTOS:
2762 #ifdef IP_FREEBIND
2763         case IP_FREEBIND:
2764 #endif
2765         case IP_MULTICAST_TTL:
2766         case IP_MULTICAST_LOOP:
2767             if (get_user_u32(len, optlen))
2768                 return -TARGET_EFAULT;
2769             if (len < 0)
2770                 return -TARGET_EINVAL;
2771             lv = sizeof(lv);
2772             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2773             if (ret < 0)
2774                 return ret;
2775             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2776                 len = 1;
2777                 if (put_user_u32(len, optlen)
2778                     || put_user_u8(val, optval_addr))
2779                     return -TARGET_EFAULT;
2780             } else {
2781                 if (len > sizeof(int))
2782                     len = sizeof(int);
2783                 if (put_user_u32(len, optlen)
2784                     || put_user_u32(val, optval_addr))
2785                     return -TARGET_EFAULT;
2786             }
2787             break;
2788         default:
2789             ret = -TARGET_ENOPROTOOPT;
2790             break;
2791         }
2792         break;
2793     case SOL_IPV6:
2794         switch (optname) {
2795         case IPV6_MTU_DISCOVER:
2796         case IPV6_MTU:
2797         case IPV6_V6ONLY:
2798         case IPV6_RECVPKTINFO:
2799         case IPV6_UNICAST_HOPS:
2800         case IPV6_MULTICAST_HOPS:
2801         case IPV6_MULTICAST_LOOP:
2802         case IPV6_RECVERR:
2803         case IPV6_RECVHOPLIMIT:
2804         case IPV6_2292HOPLIMIT:
2805         case IPV6_CHECKSUM:
2806         case IPV6_ADDRFORM:
2807         case IPV6_2292PKTINFO:
2808         case IPV6_RECVTCLASS:
2809         case IPV6_RECVRTHDR:
2810         case IPV6_2292RTHDR:
2811         case IPV6_RECVHOPOPTS:
2812         case IPV6_2292HOPOPTS:
2813         case IPV6_RECVDSTOPTS:
2814         case IPV6_2292DSTOPTS:
2815         case IPV6_TCLASS:
2816         case IPV6_ADDR_PREFERENCES:
2817 #ifdef IPV6_RECVPATHMTU
2818         case IPV6_RECVPATHMTU:
2819 #endif
2820 #ifdef IPV6_TRANSPARENT
2821         case IPV6_TRANSPARENT:
2822 #endif
2823 #ifdef IPV6_FREEBIND
2824         case IPV6_FREEBIND:
2825 #endif
2826 #ifdef IPV6_RECVORIGDSTADDR
2827         case IPV6_RECVORIGDSTADDR:
2828 #endif
2829             if (get_user_u32(len, optlen))
2830                 return -TARGET_EFAULT;
2831             if (len < 0)
2832                 return -TARGET_EINVAL;
2833             lv = sizeof(lv);
2834             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2835             if (ret < 0)
2836                 return ret;
2837             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2838                 len = 1;
2839                 if (put_user_u32(len, optlen)
2840                     || put_user_u8(val, optval_addr))
2841                     return -TARGET_EFAULT;
2842             } else {
2843                 if (len > sizeof(int))
2844                     len = sizeof(int);
2845                 if (put_user_u32(len, optlen)
2846                     || put_user_u32(val, optval_addr))
2847                     return -TARGET_EFAULT;
2848             }
2849             break;
2850         default:
2851             ret = -TARGET_ENOPROTOOPT;
2852             break;
2853         }
2854         break;
2855 #ifdef SOL_NETLINK
2856     case SOL_NETLINK:
2857         switch (optname) {
2858         case NETLINK_PKTINFO:
2859         case NETLINK_BROADCAST_ERROR:
2860         case NETLINK_NO_ENOBUFS:
2861 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2862         case NETLINK_LISTEN_ALL_NSID:
2863         case NETLINK_CAP_ACK:
2864 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2865 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2866         case NETLINK_EXT_ACK:
2867 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2868 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2869         case NETLINK_GET_STRICT_CHK:
2870 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2871             if (get_user_u32(len, optlen)) {
2872                 return -TARGET_EFAULT;
2873             }
2874             if (len != sizeof(val)) {
2875                 return -TARGET_EINVAL;
2876             }
2877             lv = len;
2878             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2879             if (ret < 0) {
2880                 return ret;
2881             }
2882             if (put_user_u32(lv, optlen)
2883                 || put_user_u32(val, optval_addr)) {
2884                 return -TARGET_EFAULT;
2885             }
2886             break;
2887 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2888         case NETLINK_LIST_MEMBERSHIPS:
2889         {
2890             uint32_t *results;
2891             int i;
2892             if (get_user_u32(len, optlen)) {
2893                 return -TARGET_EFAULT;
2894             }
2895             if (len < 0) {
2896                 return -TARGET_EINVAL;
2897             }
2898             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2899             if (!results && len > 0) {
2900                 return -TARGET_EFAULT;
2901             }
2902             lv = len;
2903             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2904             if (ret < 0) {
2905                 unlock_user(results, optval_addr, 0);
2906                 return ret;
2907             }
2908             /* swap host endianess to target endianess. */
2909             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2910                 results[i] = tswap32(results[i]);
2911             }
2912             if (put_user_u32(lv, optlen)) {
2913                 return -TARGET_EFAULT;
2914             }
2915             unlock_user(results, optval_addr, 0);
2916             break;
2917         }
2918 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2919         default:
2920             goto unimplemented;
2921         }
2922         break;
2923 #endif /* SOL_NETLINK */
2924     default:
2925     unimplemented:
2926         qemu_log_mask(LOG_UNIMP,
2927                       "getsockopt level=%d optname=%d not yet supported\n",
2928                       level, optname);
2929         ret = -TARGET_EOPNOTSUPP;
2930         break;
2931     }
2932     return ret;
2933 }
2934 
2935 /* Convert target low/high pair representing file offset into the host
2936  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2937  * as the kernel doesn't handle them either.
2938  */
2939 static void target_to_host_low_high(abi_ulong tlow,
2940                                     abi_ulong thigh,
2941                                     unsigned long *hlow,
2942                                     unsigned long *hhigh)
2943 {
2944     uint64_t off = tlow |
2945         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2946         TARGET_LONG_BITS / 2;
2947 
2948     *hlow = off;
2949     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2950 }
2951 
2952 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2953                                 abi_ulong count, int copy)
2954 {
2955     struct target_iovec *target_vec;
2956     struct iovec *vec;
2957     abi_ulong total_len, max_len;
2958     int i;
2959     int err = 0;
2960     bool bad_address = false;
2961 
2962     if (count == 0) {
2963         errno = 0;
2964         return NULL;
2965     }
2966     if (count > IOV_MAX) {
2967         errno = EINVAL;
2968         return NULL;
2969     }
2970 
2971     vec = g_try_new0(struct iovec, count);
2972     if (vec == NULL) {
2973         errno = ENOMEM;
2974         return NULL;
2975     }
2976 
2977     target_vec = lock_user(VERIFY_READ, target_addr,
2978                            count * sizeof(struct target_iovec), 1);
2979     if (target_vec == NULL) {
2980         err = EFAULT;
2981         goto fail2;
2982     }
2983 
2984     /* ??? If host page size > target page size, this will result in a
2985        value larger than what we can actually support.  */
2986     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2987     total_len = 0;
2988 
2989     for (i = 0; i < count; i++) {
2990         abi_ulong base = tswapal(target_vec[i].iov_base);
2991         abi_long len = tswapal(target_vec[i].iov_len);
2992 
2993         if (len < 0) {
2994             err = EINVAL;
2995             goto fail;
2996         } else if (len == 0) {
2997             /* Zero length pointer is ignored.  */
2998             vec[i].iov_base = 0;
2999         } else {
3000             vec[i].iov_base = lock_user(type, base, len, copy);
3001             /* If the first buffer pointer is bad, this is a fault.  But
3002              * subsequent bad buffers will result in a partial write; this
3003              * is realized by filling the vector with null pointers and
3004              * zero lengths. */
3005             if (!vec[i].iov_base) {
3006                 if (i == 0) {
3007                     err = EFAULT;
3008                     goto fail;
3009                 } else {
3010                     bad_address = true;
3011                 }
3012             }
3013             if (bad_address) {
3014                 len = 0;
3015             }
3016             if (len > max_len - total_len) {
3017                 len = max_len - total_len;
3018             }
3019         }
3020         vec[i].iov_len = len;
3021         total_len += len;
3022     }
3023 
3024     unlock_user(target_vec, target_addr, 0);
3025     return vec;
3026 
3027  fail:
3028     while (--i >= 0) {
3029         if (tswapal(target_vec[i].iov_len) > 0) {
3030             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3031         }
3032     }
3033     unlock_user(target_vec, target_addr, 0);
3034  fail2:
3035     g_free(vec);
3036     errno = err;
3037     return NULL;
3038 }
3039 
3040 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3041                          abi_ulong count, int copy)
3042 {
3043     struct target_iovec *target_vec;
3044     int i;
3045 
3046     target_vec = lock_user(VERIFY_READ, target_addr,
3047                            count * sizeof(struct target_iovec), 1);
3048     if (target_vec) {
3049         for (i = 0; i < count; i++) {
3050             abi_ulong base = tswapal(target_vec[i].iov_base);
3051             abi_long len = tswapal(target_vec[i].iov_len);
3052             if (len < 0) {
3053                 break;
3054             }
3055             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3056         }
3057         unlock_user(target_vec, target_addr, 0);
3058     }
3059 
3060     g_free(vec);
3061 }
3062 
3063 static inline int target_to_host_sock_type(int *type)
3064 {
3065     int host_type = 0;
3066     int target_type = *type;
3067 
3068     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3069     case TARGET_SOCK_DGRAM:
3070         host_type = SOCK_DGRAM;
3071         break;
3072     case TARGET_SOCK_STREAM:
3073         host_type = SOCK_STREAM;
3074         break;
3075     default:
3076         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3077         break;
3078     }
3079     if (target_type & TARGET_SOCK_CLOEXEC) {
3080 #if defined(SOCK_CLOEXEC)
3081         host_type |= SOCK_CLOEXEC;
3082 #else
3083         return -TARGET_EINVAL;
3084 #endif
3085     }
3086     if (target_type & TARGET_SOCK_NONBLOCK) {
3087 #if defined(SOCK_NONBLOCK)
3088         host_type |= SOCK_NONBLOCK;
3089 #elif !defined(O_NONBLOCK)
3090         return -TARGET_EINVAL;
3091 #endif
3092     }
3093     *type = host_type;
3094     return 0;
3095 }
3096 
3097 /* Try to emulate socket type flags after socket creation.  */
3098 static int sock_flags_fixup(int fd, int target_type)
3099 {
3100 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3101     if (target_type & TARGET_SOCK_NONBLOCK) {
3102         int flags = fcntl(fd, F_GETFL);
3103         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3104             close(fd);
3105             return -TARGET_EINVAL;
3106         }
3107     }
3108 #endif
3109     return fd;
3110 }
3111 
3112 /* do_socket() Must return target values and target errnos. */
3113 static abi_long do_socket(int domain, int type, int protocol)
3114 {
3115     int target_type = type;
3116     int ret;
3117 
3118     ret = target_to_host_sock_type(&type);
3119     if (ret) {
3120         return ret;
3121     }
3122 
3123     if (domain == PF_NETLINK && !(
3124 #ifdef CONFIG_RTNETLINK
3125          protocol == NETLINK_ROUTE ||
3126 #endif
3127          protocol == NETLINK_KOBJECT_UEVENT ||
3128          protocol == NETLINK_AUDIT)) {
3129         return -TARGET_EPROTONOSUPPORT;
3130     }
3131 
3132     if (domain == AF_PACKET ||
3133         (domain == AF_INET && type == SOCK_PACKET)) {
3134         protocol = tswap16(protocol);
3135     }
3136 
3137     ret = get_errno(socket(domain, type, protocol));
3138     if (ret >= 0) {
3139         ret = sock_flags_fixup(ret, target_type);
3140         if (type == SOCK_PACKET) {
3141             /* Manage an obsolete case :
3142              * if socket type is SOCK_PACKET, bind by name
3143              */
3144             fd_trans_register(ret, &target_packet_trans);
3145         } else if (domain == PF_NETLINK) {
3146             switch (protocol) {
3147 #ifdef CONFIG_RTNETLINK
3148             case NETLINK_ROUTE:
3149                 fd_trans_register(ret, &target_netlink_route_trans);
3150                 break;
3151 #endif
3152             case NETLINK_KOBJECT_UEVENT:
3153                 /* nothing to do: messages are strings */
3154                 break;
3155             case NETLINK_AUDIT:
3156                 fd_trans_register(ret, &target_netlink_audit_trans);
3157                 break;
3158             default:
3159                 g_assert_not_reached();
3160             }
3161         }
3162     }
3163     return ret;
3164 }
3165 
3166 /* do_bind() Must return target values and target errnos. */
3167 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3168                         socklen_t addrlen)
3169 {
3170     void *addr;
3171     abi_long ret;
3172 
3173     if ((int)addrlen < 0) {
3174         return -TARGET_EINVAL;
3175     }
3176 
3177     addr = alloca(addrlen+1);
3178 
3179     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3180     if (ret)
3181         return ret;
3182 
3183     return get_errno(bind(sockfd, addr, addrlen));
3184 }
3185 
3186 /* do_connect() Must return target values and target errnos. */
3187 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3188                            socklen_t addrlen)
3189 {
3190     void *addr;
3191     abi_long ret;
3192 
3193     if ((int)addrlen < 0) {
3194         return -TARGET_EINVAL;
3195     }
3196 
3197     addr = alloca(addrlen+1);
3198 
3199     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3200     if (ret)
3201         return ret;
3202 
3203     return get_errno(safe_connect(sockfd, addr, addrlen));
3204 }
3205 
3206 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3207 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3208                                       int flags, int send)
3209 {
3210     abi_long ret, len;
3211     struct msghdr msg;
3212     abi_ulong count;
3213     struct iovec *vec;
3214     abi_ulong target_vec;
3215 
3216     if (msgp->msg_name) {
3217         msg.msg_namelen = tswap32(msgp->msg_namelen);
3218         msg.msg_name = alloca(msg.msg_namelen+1);
3219         ret = target_to_host_sockaddr(fd, msg.msg_name,
3220                                       tswapal(msgp->msg_name),
3221                                       msg.msg_namelen);
3222         if (ret == -TARGET_EFAULT) {
3223             /* For connected sockets msg_name and msg_namelen must
3224              * be ignored, so returning EFAULT immediately is wrong.
3225              * Instead, pass a bad msg_name to the host kernel, and
3226              * let it decide whether to return EFAULT or not.
3227              */
3228             msg.msg_name = (void *)-1;
3229         } else if (ret) {
3230             goto out2;
3231         }
3232     } else {
3233         msg.msg_name = NULL;
3234         msg.msg_namelen = 0;
3235     }
3236     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3237     msg.msg_control = alloca(msg.msg_controllen);
3238     memset(msg.msg_control, 0, msg.msg_controllen);
3239 
3240     msg.msg_flags = tswap32(msgp->msg_flags);
3241 
3242     count = tswapal(msgp->msg_iovlen);
3243     target_vec = tswapal(msgp->msg_iov);
3244 
3245     if (count > IOV_MAX) {
3246         /* sendrcvmsg returns a different errno for this condition than
3247          * readv/writev, so we must catch it here before lock_iovec() does.
3248          */
3249         ret = -TARGET_EMSGSIZE;
3250         goto out2;
3251     }
3252 
3253     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3254                      target_vec, count, send);
3255     if (vec == NULL) {
3256         ret = -host_to_target_errno(errno);
3257         goto out2;
3258     }
3259     msg.msg_iovlen = count;
3260     msg.msg_iov = vec;
3261 
3262     if (send) {
3263         if (fd_trans_target_to_host_data(fd)) {
3264             void *host_msg;
3265 
3266             host_msg = g_malloc(msg.msg_iov->iov_len);
3267             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3268             ret = fd_trans_target_to_host_data(fd)(host_msg,
3269                                                    msg.msg_iov->iov_len);
3270             if (ret >= 0) {
3271                 msg.msg_iov->iov_base = host_msg;
3272                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3273             }
3274             g_free(host_msg);
3275         } else {
3276             ret = target_to_host_cmsg(&msg, msgp);
3277             if (ret == 0) {
3278                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3279             }
3280         }
3281     } else {
3282         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3283         if (!is_error(ret)) {
3284             len = ret;
3285             if (fd_trans_host_to_target_data(fd)) {
3286                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3287                                                MIN(msg.msg_iov->iov_len, len));
3288             } else {
3289                 ret = host_to_target_cmsg(msgp, &msg);
3290             }
3291             if (!is_error(ret)) {
3292                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3293                 msgp->msg_flags = tswap32(msg.msg_flags);
3294                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3295                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3296                                     msg.msg_name, msg.msg_namelen);
3297                     if (ret) {
3298                         goto out;
3299                     }
3300                 }
3301 
3302                 ret = len;
3303             }
3304         }
3305     }
3306 
3307 out:
3308     unlock_iovec(vec, target_vec, count, !send);
3309 out2:
3310     return ret;
3311 }
3312 
3313 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3314                                int flags, int send)
3315 {
3316     abi_long ret;
3317     struct target_msghdr *msgp;
3318 
3319     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3320                           msgp,
3321                           target_msg,
3322                           send ? 1 : 0)) {
3323         return -TARGET_EFAULT;
3324     }
3325     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3326     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3327     return ret;
3328 }
3329 
3330 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3331  * so it might not have this *mmsg-specific flag either.
3332  */
3333 #ifndef MSG_WAITFORONE
3334 #define MSG_WAITFORONE 0x10000
3335 #endif
3336 
3337 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3338                                 unsigned int vlen, unsigned int flags,
3339                                 int send)
3340 {
3341     struct target_mmsghdr *mmsgp;
3342     abi_long ret = 0;
3343     int i;
3344 
3345     if (vlen > UIO_MAXIOV) {
3346         vlen = UIO_MAXIOV;
3347     }
3348 
3349     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3350     if (!mmsgp) {
3351         return -TARGET_EFAULT;
3352     }
3353 
3354     for (i = 0; i < vlen; i++) {
3355         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3356         if (is_error(ret)) {
3357             break;
3358         }
3359         mmsgp[i].msg_len = tswap32(ret);
3360         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3361         if (flags & MSG_WAITFORONE) {
3362             flags |= MSG_DONTWAIT;
3363         }
3364     }
3365 
3366     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3367 
3368     /* Return number of datagrams sent if we sent any at all;
3369      * otherwise return the error.
3370      */
3371     if (i) {
3372         return i;
3373     }
3374     return ret;
3375 }
3376 
3377 /* do_accept4() Must return target values and target errnos. */
3378 static abi_long do_accept4(int fd, abi_ulong target_addr,
3379                            abi_ulong target_addrlen_addr, int flags)
3380 {
3381     socklen_t addrlen, ret_addrlen;
3382     void *addr;
3383     abi_long ret;
3384     int host_flags;
3385 
3386     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3387 
3388     if (target_addr == 0) {
3389         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3390     }
3391 
3392     /* linux returns EFAULT if addrlen pointer is invalid */
3393     if (get_user_u32(addrlen, target_addrlen_addr))
3394         return -TARGET_EFAULT;
3395 
3396     if ((int)addrlen < 0) {
3397         return -TARGET_EINVAL;
3398     }
3399 
3400     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3401         return -TARGET_EFAULT;
3402     }
3403 
3404     addr = alloca(addrlen);
3405 
3406     ret_addrlen = addrlen;
3407     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3408     if (!is_error(ret)) {
3409         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3410         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3411             ret = -TARGET_EFAULT;
3412         }
3413     }
3414     return ret;
3415 }
3416 
3417 /* do_getpeername() Must return target values and target errnos. */
3418 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3419                                abi_ulong target_addrlen_addr)
3420 {
3421     socklen_t addrlen, ret_addrlen;
3422     void *addr;
3423     abi_long ret;
3424 
3425     if (get_user_u32(addrlen, target_addrlen_addr))
3426         return -TARGET_EFAULT;
3427 
3428     if ((int)addrlen < 0) {
3429         return -TARGET_EINVAL;
3430     }
3431 
3432     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3433         return -TARGET_EFAULT;
3434     }
3435 
3436     addr = alloca(addrlen);
3437 
3438     ret_addrlen = addrlen;
3439     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3440     if (!is_error(ret)) {
3441         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3442         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3443             ret = -TARGET_EFAULT;
3444         }
3445     }
3446     return ret;
3447 }
3448 
3449 /* do_getsockname() Must return target values and target errnos. */
3450 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3451                                abi_ulong target_addrlen_addr)
3452 {
3453     socklen_t addrlen, ret_addrlen;
3454     void *addr;
3455     abi_long ret;
3456 
3457     if (get_user_u32(addrlen, target_addrlen_addr))
3458         return -TARGET_EFAULT;
3459 
3460     if ((int)addrlen < 0) {
3461         return -TARGET_EINVAL;
3462     }
3463 
3464     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3465         return -TARGET_EFAULT;
3466     }
3467 
3468     addr = alloca(addrlen);
3469 
3470     ret_addrlen = addrlen;
3471     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3472     if (!is_error(ret)) {
3473         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3474         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3475             ret = -TARGET_EFAULT;
3476         }
3477     }
3478     return ret;
3479 }
3480 
3481 /* do_socketpair() Must return target values and target errnos. */
3482 static abi_long do_socketpair(int domain, int type, int protocol,
3483                               abi_ulong target_tab_addr)
3484 {
3485     int tab[2];
3486     abi_long ret;
3487 
3488     target_to_host_sock_type(&type);
3489 
3490     ret = get_errno(socketpair(domain, type, protocol, tab));
3491     if (!is_error(ret)) {
3492         if (put_user_s32(tab[0], target_tab_addr)
3493             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3494             ret = -TARGET_EFAULT;
3495     }
3496     return ret;
3497 }
3498 
3499 /* do_sendto() Must return target values and target errnos. */
3500 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3501                           abi_ulong target_addr, socklen_t addrlen)
3502 {
3503     void *addr;
3504     void *host_msg;
3505     void *copy_msg = NULL;
3506     abi_long ret;
3507 
3508     if ((int)addrlen < 0) {
3509         return -TARGET_EINVAL;
3510     }
3511 
3512     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3513     if (!host_msg)
3514         return -TARGET_EFAULT;
3515     if (fd_trans_target_to_host_data(fd)) {
3516         copy_msg = host_msg;
3517         host_msg = g_malloc(len);
3518         memcpy(host_msg, copy_msg, len);
3519         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3520         if (ret < 0) {
3521             goto fail;
3522         }
3523     }
3524     if (target_addr) {
3525         addr = alloca(addrlen+1);
3526         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3527         if (ret) {
3528             goto fail;
3529         }
3530         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3531     } else {
3532         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3533     }
3534 fail:
3535     if (copy_msg) {
3536         g_free(host_msg);
3537         host_msg = copy_msg;
3538     }
3539     unlock_user(host_msg, msg, 0);
3540     return ret;
3541 }
3542 
3543 /* do_recvfrom() Must return target values and target errnos. */
3544 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3545                             abi_ulong target_addr,
3546                             abi_ulong target_addrlen)
3547 {
3548     socklen_t addrlen, ret_addrlen;
3549     void *addr;
3550     void *host_msg;
3551     abi_long ret;
3552 
3553     if (!msg) {
3554         host_msg = NULL;
3555     } else {
3556         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3557         if (!host_msg) {
3558             return -TARGET_EFAULT;
3559         }
3560     }
3561     if (target_addr) {
3562         if (get_user_u32(addrlen, target_addrlen)) {
3563             ret = -TARGET_EFAULT;
3564             goto fail;
3565         }
3566         if ((int)addrlen < 0) {
3567             ret = -TARGET_EINVAL;
3568             goto fail;
3569         }
3570         addr = alloca(addrlen);
3571         ret_addrlen = addrlen;
3572         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3573                                       addr, &ret_addrlen));
3574     } else {
3575         addr = NULL; /* To keep compiler quiet.  */
3576         addrlen = 0; /* To keep compiler quiet.  */
3577         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3578     }
3579     if (!is_error(ret)) {
3580         if (fd_trans_host_to_target_data(fd)) {
3581             abi_long trans;
3582             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3583             if (is_error(trans)) {
3584                 ret = trans;
3585                 goto fail;
3586             }
3587         }
3588         if (target_addr) {
3589             host_to_target_sockaddr(target_addr, addr,
3590                                     MIN(addrlen, ret_addrlen));
3591             if (put_user_u32(ret_addrlen, target_addrlen)) {
3592                 ret = -TARGET_EFAULT;
3593                 goto fail;
3594             }
3595         }
3596         unlock_user(host_msg, msg, len);
3597     } else {
3598 fail:
3599         unlock_user(host_msg, msg, 0);
3600     }
3601     return ret;
3602 }
3603 
3604 #ifdef TARGET_NR_socketcall
3605 /* do_socketcall() must return target values and target errnos. */
3606 static abi_long do_socketcall(int num, abi_ulong vptr)
3607 {
3608     static const unsigned nargs[] = { /* number of arguments per operation */
3609         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3610         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3611         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3612         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3613         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3614         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3615         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3616         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3617         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3618         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3619         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3620         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3621         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3622         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3623         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3624         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3625         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3626         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3627         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3628         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3629     };
3630     abi_long a[6]; /* max 6 args */
3631     unsigned i;
3632 
3633     /* check the range of the first argument num */
3634     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3635     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3636         return -TARGET_EINVAL;
3637     }
3638     /* ensure we have space for args */
3639     if (nargs[num] > ARRAY_SIZE(a)) {
3640         return -TARGET_EINVAL;
3641     }
3642     /* collect the arguments in a[] according to nargs[] */
3643     for (i = 0; i < nargs[num]; ++i) {
3644         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3645             return -TARGET_EFAULT;
3646         }
3647     }
3648     /* now when we have the args, invoke the appropriate underlying function */
3649     switch (num) {
3650     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3651         return do_socket(a[0], a[1], a[2]);
3652     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3653         return do_bind(a[0], a[1], a[2]);
3654     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3655         return do_connect(a[0], a[1], a[2]);
3656     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3657         return get_errno(listen(a[0], a[1]));
3658     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3659         return do_accept4(a[0], a[1], a[2], 0);
3660     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3661         return do_getsockname(a[0], a[1], a[2]);
3662     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3663         return do_getpeername(a[0], a[1], a[2]);
3664     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3665         return do_socketpair(a[0], a[1], a[2], a[3]);
3666     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3667         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3668     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3669         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3670     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3671         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3672     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3673         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3674     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3675         return get_errno(shutdown(a[0], a[1]));
3676     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3677         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3678     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3679         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3680     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3681         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3682     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3683         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3684     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3685         return do_accept4(a[0], a[1], a[2], a[3]);
3686     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3687         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3688     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3689         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3690     default:
3691         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3692         return -TARGET_EINVAL;
3693     }
3694 }
3695 #endif
3696 
3697 #define N_SHM_REGIONS	32
3698 
3699 static struct shm_region {
3700     abi_ulong start;
3701     abi_ulong size;
3702     bool in_use;
3703 } shm_regions[N_SHM_REGIONS];
3704 
3705 #ifndef TARGET_SEMID64_DS
3706 /* asm-generic version of this struct */
3707 struct target_semid64_ds
3708 {
3709   struct target_ipc_perm sem_perm;
3710   abi_ulong sem_otime;
3711 #if TARGET_ABI_BITS == 32
3712   abi_ulong __unused1;
3713 #endif
3714   abi_ulong sem_ctime;
3715 #if TARGET_ABI_BITS == 32
3716   abi_ulong __unused2;
3717 #endif
3718   abi_ulong sem_nsems;
3719   abi_ulong __unused3;
3720   abi_ulong __unused4;
3721 };
3722 #endif
3723 
3724 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3725                                                abi_ulong target_addr)
3726 {
3727     struct target_ipc_perm *target_ip;
3728     struct target_semid64_ds *target_sd;
3729 
3730     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3731         return -TARGET_EFAULT;
3732     target_ip = &(target_sd->sem_perm);
3733     host_ip->__key = tswap32(target_ip->__key);
3734     host_ip->uid = tswap32(target_ip->uid);
3735     host_ip->gid = tswap32(target_ip->gid);
3736     host_ip->cuid = tswap32(target_ip->cuid);
3737     host_ip->cgid = tswap32(target_ip->cgid);
3738 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3739     host_ip->mode = tswap32(target_ip->mode);
3740 #else
3741     host_ip->mode = tswap16(target_ip->mode);
3742 #endif
3743 #if defined(TARGET_PPC)
3744     host_ip->__seq = tswap32(target_ip->__seq);
3745 #else
3746     host_ip->__seq = tswap16(target_ip->__seq);
3747 #endif
3748     unlock_user_struct(target_sd, target_addr, 0);
3749     return 0;
3750 }
3751 
3752 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3753                                                struct ipc_perm *host_ip)
3754 {
3755     struct target_ipc_perm *target_ip;
3756     struct target_semid64_ds *target_sd;
3757 
3758     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3759         return -TARGET_EFAULT;
3760     target_ip = &(target_sd->sem_perm);
3761     target_ip->__key = tswap32(host_ip->__key);
3762     target_ip->uid = tswap32(host_ip->uid);
3763     target_ip->gid = tswap32(host_ip->gid);
3764     target_ip->cuid = tswap32(host_ip->cuid);
3765     target_ip->cgid = tswap32(host_ip->cgid);
3766 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3767     target_ip->mode = tswap32(host_ip->mode);
3768 #else
3769     target_ip->mode = tswap16(host_ip->mode);
3770 #endif
3771 #if defined(TARGET_PPC)
3772     target_ip->__seq = tswap32(host_ip->__seq);
3773 #else
3774     target_ip->__seq = tswap16(host_ip->__seq);
3775 #endif
3776     unlock_user_struct(target_sd, target_addr, 1);
3777     return 0;
3778 }
3779 
3780 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3781                                                abi_ulong target_addr)
3782 {
3783     struct target_semid64_ds *target_sd;
3784 
3785     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3786         return -TARGET_EFAULT;
3787     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3788         return -TARGET_EFAULT;
3789     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3790     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3791     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3792     unlock_user_struct(target_sd, target_addr, 0);
3793     return 0;
3794 }
3795 
3796 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3797                                                struct semid_ds *host_sd)
3798 {
3799     struct target_semid64_ds *target_sd;
3800 
3801     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3802         return -TARGET_EFAULT;
3803     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3804         return -TARGET_EFAULT;
3805     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3806     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3807     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3808     unlock_user_struct(target_sd, target_addr, 1);
3809     return 0;
3810 }
3811 
3812 struct target_seminfo {
3813     int semmap;
3814     int semmni;
3815     int semmns;
3816     int semmnu;
3817     int semmsl;
3818     int semopm;
3819     int semume;
3820     int semusz;
3821     int semvmx;
3822     int semaem;
3823 };
3824 
3825 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3826                                               struct seminfo *host_seminfo)
3827 {
3828     struct target_seminfo *target_seminfo;
3829     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3830         return -TARGET_EFAULT;
3831     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3832     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3833     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3834     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3835     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3836     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3837     __put_user(host_seminfo->semume, &target_seminfo->semume);
3838     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3839     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3840     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3841     unlock_user_struct(target_seminfo, target_addr, 1);
3842     return 0;
3843 }
3844 
3845 union semun {
3846 	int val;
3847 	struct semid_ds *buf;
3848 	unsigned short *array;
3849 	struct seminfo *__buf;
3850 };
3851 
3852 union target_semun {
3853 	int val;
3854 	abi_ulong buf;
3855 	abi_ulong array;
3856 	abi_ulong __buf;
3857 };
3858 
3859 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3860                                                abi_ulong target_addr)
3861 {
3862     int nsems;
3863     unsigned short *array;
3864     union semun semun;
3865     struct semid_ds semid_ds;
3866     int i, ret;
3867 
3868     semun.buf = &semid_ds;
3869 
3870     ret = semctl(semid, 0, IPC_STAT, semun);
3871     if (ret == -1)
3872         return get_errno(ret);
3873 
3874     nsems = semid_ds.sem_nsems;
3875 
3876     *host_array = g_try_new(unsigned short, nsems);
3877     if (!*host_array) {
3878         return -TARGET_ENOMEM;
3879     }
3880     array = lock_user(VERIFY_READ, target_addr,
3881                       nsems*sizeof(unsigned short), 1);
3882     if (!array) {
3883         g_free(*host_array);
3884         return -TARGET_EFAULT;
3885     }
3886 
3887     for(i=0; i<nsems; i++) {
3888         __get_user((*host_array)[i], &array[i]);
3889     }
3890     unlock_user(array, target_addr, 0);
3891 
3892     return 0;
3893 }
3894 
3895 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3896                                                unsigned short **host_array)
3897 {
3898     int nsems;
3899     unsigned short *array;
3900     union semun semun;
3901     struct semid_ds semid_ds;
3902     int i, ret;
3903 
3904     semun.buf = &semid_ds;
3905 
3906     ret = semctl(semid, 0, IPC_STAT, semun);
3907     if (ret == -1)
3908         return get_errno(ret);
3909 
3910     nsems = semid_ds.sem_nsems;
3911 
3912     array = lock_user(VERIFY_WRITE, target_addr,
3913                       nsems*sizeof(unsigned short), 0);
3914     if (!array)
3915         return -TARGET_EFAULT;
3916 
3917     for(i=0; i<nsems; i++) {
3918         __put_user((*host_array)[i], &array[i]);
3919     }
3920     g_free(*host_array);
3921     unlock_user(array, target_addr, 1);
3922 
3923     return 0;
3924 }
3925 
3926 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3927                                  abi_ulong target_arg)
3928 {
3929     union target_semun target_su = { .buf = target_arg };
3930     union semun arg;
3931     struct semid_ds dsarg;
3932     unsigned short *array = NULL;
3933     struct seminfo seminfo;
3934     abi_long ret = -TARGET_EINVAL;
3935     abi_long err;
3936     cmd &= 0xff;
3937 
3938     switch( cmd ) {
3939 	case GETVAL:
3940 	case SETVAL:
3941             /* In 64 bit cross-endian situations, we will erroneously pick up
3942              * the wrong half of the union for the "val" element.  To rectify
3943              * this, the entire 8-byte structure is byteswapped, followed by
3944 	     * a swap of the 4 byte val field. In other cases, the data is
3945 	     * already in proper host byte order. */
3946 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3947 		target_su.buf = tswapal(target_su.buf);
3948 		arg.val = tswap32(target_su.val);
3949 	    } else {
3950 		arg.val = target_su.val;
3951 	    }
3952             ret = get_errno(semctl(semid, semnum, cmd, arg));
3953             break;
3954 	case GETALL:
3955 	case SETALL:
3956             err = target_to_host_semarray(semid, &array, target_su.array);
3957             if (err)
3958                 return err;
3959             arg.array = array;
3960             ret = get_errno(semctl(semid, semnum, cmd, arg));
3961             err = host_to_target_semarray(semid, target_su.array, &array);
3962             if (err)
3963                 return err;
3964             break;
3965 	case IPC_STAT:
3966 	case IPC_SET:
3967 	case SEM_STAT:
3968             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3969             if (err)
3970                 return err;
3971             arg.buf = &dsarg;
3972             ret = get_errno(semctl(semid, semnum, cmd, arg));
3973             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3974             if (err)
3975                 return err;
3976             break;
3977 	case IPC_INFO:
3978 	case SEM_INFO:
3979             arg.__buf = &seminfo;
3980             ret = get_errno(semctl(semid, semnum, cmd, arg));
3981             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3982             if (err)
3983                 return err;
3984             break;
3985 	case IPC_RMID:
3986 	case GETPID:
3987 	case GETNCNT:
3988 	case GETZCNT:
3989             ret = get_errno(semctl(semid, semnum, cmd, NULL));
3990             break;
3991     }
3992 
3993     return ret;
3994 }
3995 
3996 struct target_sembuf {
3997     unsigned short sem_num;
3998     short sem_op;
3999     short sem_flg;
4000 };
4001 
4002 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4003                                              abi_ulong target_addr,
4004                                              unsigned nsops)
4005 {
4006     struct target_sembuf *target_sembuf;
4007     int i;
4008 
4009     target_sembuf = lock_user(VERIFY_READ, target_addr,
4010                               nsops*sizeof(struct target_sembuf), 1);
4011     if (!target_sembuf)
4012         return -TARGET_EFAULT;
4013 
4014     for(i=0; i<nsops; i++) {
4015         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4016         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4017         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4018     }
4019 
4020     unlock_user(target_sembuf, target_addr, 0);
4021 
4022     return 0;
4023 }
4024 
4025 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4026     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4027 
4028 /*
4029  * This macro is required to handle the s390 variants, which passes the
4030  * arguments in a different order than default.
4031  */
4032 #ifdef __s390x__
4033 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4034   (__nsops), (__timeout), (__sops)
4035 #else
4036 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4037   (__nsops), 0, (__sops), (__timeout)
4038 #endif
4039 
4040 static inline abi_long do_semtimedop(int semid,
4041                                      abi_long ptr,
4042                                      unsigned nsops,
4043                                      abi_long timeout, bool time64)
4044 {
4045     struct sembuf *sops;
4046     struct timespec ts, *pts = NULL;
4047     abi_long ret;
4048 
4049     if (timeout) {
4050         pts = &ts;
4051         if (time64) {
4052             if (target_to_host_timespec64(pts, timeout)) {
4053                 return -TARGET_EFAULT;
4054             }
4055         } else {
4056             if (target_to_host_timespec(pts, timeout)) {
4057                 return -TARGET_EFAULT;
4058             }
4059         }
4060     }
4061 
4062     if (nsops > TARGET_SEMOPM) {
4063         return -TARGET_E2BIG;
4064     }
4065 
4066     sops = g_new(struct sembuf, nsops);
4067 
4068     if (target_to_host_sembuf(sops, ptr, nsops)) {
4069         g_free(sops);
4070         return -TARGET_EFAULT;
4071     }
4072 
4073     ret = -TARGET_ENOSYS;
4074 #ifdef __NR_semtimedop
4075     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4076 #endif
4077 #ifdef __NR_ipc
4078     if (ret == -TARGET_ENOSYS) {
4079         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4080                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4081     }
4082 #endif
4083     g_free(sops);
4084     return ret;
4085 }
4086 #endif
4087 
4088 struct target_msqid_ds
4089 {
4090     struct target_ipc_perm msg_perm;
4091     abi_ulong msg_stime;
4092 #if TARGET_ABI_BITS == 32
4093     abi_ulong __unused1;
4094 #endif
4095     abi_ulong msg_rtime;
4096 #if TARGET_ABI_BITS == 32
4097     abi_ulong __unused2;
4098 #endif
4099     abi_ulong msg_ctime;
4100 #if TARGET_ABI_BITS == 32
4101     abi_ulong __unused3;
4102 #endif
4103     abi_ulong __msg_cbytes;
4104     abi_ulong msg_qnum;
4105     abi_ulong msg_qbytes;
4106     abi_ulong msg_lspid;
4107     abi_ulong msg_lrpid;
4108     abi_ulong __unused4;
4109     abi_ulong __unused5;
4110 };
4111 
4112 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4113                                                abi_ulong target_addr)
4114 {
4115     struct target_msqid_ds *target_md;
4116 
4117     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4118         return -TARGET_EFAULT;
4119     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4120         return -TARGET_EFAULT;
4121     host_md->msg_stime = tswapal(target_md->msg_stime);
4122     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4123     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4124     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4125     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4126     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4127     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4128     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4129     unlock_user_struct(target_md, target_addr, 0);
4130     return 0;
4131 }
4132 
4133 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4134                                                struct msqid_ds *host_md)
4135 {
4136     struct target_msqid_ds *target_md;
4137 
4138     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4139         return -TARGET_EFAULT;
4140     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4141         return -TARGET_EFAULT;
4142     target_md->msg_stime = tswapal(host_md->msg_stime);
4143     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4144     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4145     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4146     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4147     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4148     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4149     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4150     unlock_user_struct(target_md, target_addr, 1);
4151     return 0;
4152 }
4153 
4154 struct target_msginfo {
4155     int msgpool;
4156     int msgmap;
4157     int msgmax;
4158     int msgmnb;
4159     int msgmni;
4160     int msgssz;
4161     int msgtql;
4162     unsigned short int msgseg;
4163 };
4164 
4165 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4166                                               struct msginfo *host_msginfo)
4167 {
4168     struct target_msginfo *target_msginfo;
4169     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4170         return -TARGET_EFAULT;
4171     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4172     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4173     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4174     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4175     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4176     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4177     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4178     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4179     unlock_user_struct(target_msginfo, target_addr, 1);
4180     return 0;
4181 }
4182 
4183 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4184 {
4185     struct msqid_ds dsarg;
4186     struct msginfo msginfo;
4187     abi_long ret = -TARGET_EINVAL;
4188 
4189     cmd &= 0xff;
4190 
4191     switch (cmd) {
4192     case IPC_STAT:
4193     case IPC_SET:
4194     case MSG_STAT:
4195         if (target_to_host_msqid_ds(&dsarg,ptr))
4196             return -TARGET_EFAULT;
4197         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4198         if (host_to_target_msqid_ds(ptr,&dsarg))
4199             return -TARGET_EFAULT;
4200         break;
4201     case IPC_RMID:
4202         ret = get_errno(msgctl(msgid, cmd, NULL));
4203         break;
4204     case IPC_INFO:
4205     case MSG_INFO:
4206         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4207         if (host_to_target_msginfo(ptr, &msginfo))
4208             return -TARGET_EFAULT;
4209         break;
4210     }
4211 
4212     return ret;
4213 }
4214 
4215 struct target_msgbuf {
4216     abi_long mtype;
4217     char	mtext[1];
4218 };
4219 
4220 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4221                                  ssize_t msgsz, int msgflg)
4222 {
4223     struct target_msgbuf *target_mb;
4224     struct msgbuf *host_mb;
4225     abi_long ret = 0;
4226 
4227     if (msgsz < 0) {
4228         return -TARGET_EINVAL;
4229     }
4230 
4231     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4232         return -TARGET_EFAULT;
4233     host_mb = g_try_malloc(msgsz + sizeof(long));
4234     if (!host_mb) {
4235         unlock_user_struct(target_mb, msgp, 0);
4236         return -TARGET_ENOMEM;
4237     }
4238     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4239     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4240     ret = -TARGET_ENOSYS;
4241 #ifdef __NR_msgsnd
4242     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4243 #endif
4244 #ifdef __NR_ipc
4245     if (ret == -TARGET_ENOSYS) {
4246 #ifdef __s390x__
4247         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4248                                  host_mb));
4249 #else
4250         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4251                                  host_mb, 0));
4252 #endif
4253     }
4254 #endif
4255     g_free(host_mb);
4256     unlock_user_struct(target_mb, msgp, 0);
4257 
4258     return ret;
4259 }
4260 
4261 #ifdef __NR_ipc
4262 #if defined(__sparc__)
4263 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4264 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4265 #elif defined(__s390x__)
4266 /* The s390 sys_ipc variant has only five parameters.  */
4267 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4268     ((long int[]){(long int)__msgp, __msgtyp})
4269 #else
4270 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4271     ((long int[]){(long int)__msgp, __msgtyp}), 0
4272 #endif
4273 #endif
4274 
4275 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4276                                  ssize_t msgsz, abi_long msgtyp,
4277                                  int msgflg)
4278 {
4279     struct target_msgbuf *target_mb;
4280     char *target_mtext;
4281     struct msgbuf *host_mb;
4282     abi_long ret = 0;
4283 
4284     if (msgsz < 0) {
4285         return -TARGET_EINVAL;
4286     }
4287 
4288     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4289         return -TARGET_EFAULT;
4290 
4291     host_mb = g_try_malloc(msgsz + sizeof(long));
4292     if (!host_mb) {
4293         ret = -TARGET_ENOMEM;
4294         goto end;
4295     }
4296     ret = -TARGET_ENOSYS;
4297 #ifdef __NR_msgrcv
4298     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4299 #endif
4300 #ifdef __NR_ipc
4301     if (ret == -TARGET_ENOSYS) {
4302         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4303                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4304     }
4305 #endif
4306 
4307     if (ret > 0) {
4308         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4309         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4310         if (!target_mtext) {
4311             ret = -TARGET_EFAULT;
4312             goto end;
4313         }
4314         memcpy(target_mb->mtext, host_mb->mtext, ret);
4315         unlock_user(target_mtext, target_mtext_addr, ret);
4316     }
4317 
4318     target_mb->mtype = tswapal(host_mb->mtype);
4319 
4320 end:
4321     if (target_mb)
4322         unlock_user_struct(target_mb, msgp, 1);
4323     g_free(host_mb);
4324     return ret;
4325 }
4326 
4327 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4328                                                abi_ulong target_addr)
4329 {
4330     struct target_shmid_ds *target_sd;
4331 
4332     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4333         return -TARGET_EFAULT;
4334     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4335         return -TARGET_EFAULT;
4336     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4337     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4338     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4339     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4340     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4341     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4342     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4343     unlock_user_struct(target_sd, target_addr, 0);
4344     return 0;
4345 }
4346 
4347 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4348                                                struct shmid_ds *host_sd)
4349 {
4350     struct target_shmid_ds *target_sd;
4351 
4352     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4353         return -TARGET_EFAULT;
4354     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4355         return -TARGET_EFAULT;
4356     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4357     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4358     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4359     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4360     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4361     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4362     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4363     unlock_user_struct(target_sd, target_addr, 1);
4364     return 0;
4365 }
4366 
4367 struct  target_shminfo {
4368     abi_ulong shmmax;
4369     abi_ulong shmmin;
4370     abi_ulong shmmni;
4371     abi_ulong shmseg;
4372     abi_ulong shmall;
4373 };
4374 
4375 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4376                                               struct shminfo *host_shminfo)
4377 {
4378     struct target_shminfo *target_shminfo;
4379     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4380         return -TARGET_EFAULT;
4381     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4382     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4383     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4384     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4385     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4386     unlock_user_struct(target_shminfo, target_addr, 1);
4387     return 0;
4388 }
4389 
4390 struct target_shm_info {
4391     int used_ids;
4392     abi_ulong shm_tot;
4393     abi_ulong shm_rss;
4394     abi_ulong shm_swp;
4395     abi_ulong swap_attempts;
4396     abi_ulong swap_successes;
4397 };
4398 
4399 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4400                                                struct shm_info *host_shm_info)
4401 {
4402     struct target_shm_info *target_shm_info;
4403     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4404         return -TARGET_EFAULT;
4405     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4406     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4407     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4408     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4409     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4410     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4411     unlock_user_struct(target_shm_info, target_addr, 1);
4412     return 0;
4413 }
4414 
4415 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4416 {
4417     struct shmid_ds dsarg;
4418     struct shminfo shminfo;
4419     struct shm_info shm_info;
4420     abi_long ret = -TARGET_EINVAL;
4421 
4422     cmd &= 0xff;
4423 
4424     switch(cmd) {
4425     case IPC_STAT:
4426     case IPC_SET:
4427     case SHM_STAT:
4428         if (target_to_host_shmid_ds(&dsarg, buf))
4429             return -TARGET_EFAULT;
4430         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4431         if (host_to_target_shmid_ds(buf, &dsarg))
4432             return -TARGET_EFAULT;
4433         break;
4434     case IPC_INFO:
4435         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4436         if (host_to_target_shminfo(buf, &shminfo))
4437             return -TARGET_EFAULT;
4438         break;
4439     case SHM_INFO:
4440         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4441         if (host_to_target_shm_info(buf, &shm_info))
4442             return -TARGET_EFAULT;
4443         break;
4444     case IPC_RMID:
4445     case SHM_LOCK:
4446     case SHM_UNLOCK:
4447         ret = get_errno(shmctl(shmid, cmd, NULL));
4448         break;
4449     }
4450 
4451     return ret;
4452 }
4453 
4454 #ifndef TARGET_FORCE_SHMLBA
4455 /* For most architectures, SHMLBA is the same as the page size;
4456  * some architectures have larger values, in which case they should
4457  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4458  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4459  * and defining its own value for SHMLBA.
4460  *
4461  * The kernel also permits SHMLBA to be set by the architecture to a
4462  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4463  * this means that addresses are rounded to the large size if
4464  * SHM_RND is set but addresses not aligned to that size are not rejected
4465  * as long as they are at least page-aligned. Since the only architecture
4466  * which uses this is ia64 this code doesn't provide for that oddity.
4467  */
4468 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4469 {
4470     return TARGET_PAGE_SIZE;
4471 }
4472 #endif
4473 
4474 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4475                                  int shmid, abi_ulong shmaddr, int shmflg)
4476 {
4477     CPUState *cpu = env_cpu(cpu_env);
4478     abi_long raddr;
4479     void *host_raddr;
4480     struct shmid_ds shm_info;
4481     int i,ret;
4482     abi_ulong shmlba;
4483 
4484     /* shmat pointers are always untagged */
4485 
4486     /* find out the length of the shared memory segment */
4487     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4488     if (is_error(ret)) {
4489         /* can't get length, bail out */
4490         return ret;
4491     }
4492 
4493     shmlba = target_shmlba(cpu_env);
4494 
4495     if (shmaddr & (shmlba - 1)) {
4496         if (shmflg & SHM_RND) {
4497             shmaddr &= ~(shmlba - 1);
4498         } else {
4499             return -TARGET_EINVAL;
4500         }
4501     }
4502     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4503         return -TARGET_EINVAL;
4504     }
4505 
4506     mmap_lock();
4507 
4508     /*
4509      * We're mapping shared memory, so ensure we generate code for parallel
4510      * execution and flush old translations.  This will work up to the level
4511      * supported by the host -- anything that requires EXCP_ATOMIC will not
4512      * be atomic with respect to an external process.
4513      */
4514     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4515         cpu->tcg_cflags |= CF_PARALLEL;
4516         tb_flush(cpu);
4517     }
4518 
4519     if (shmaddr)
4520         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4521     else {
4522         abi_ulong mmap_start;
4523 
4524         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4525         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4526 
4527         if (mmap_start == -1) {
4528             errno = ENOMEM;
4529             host_raddr = (void *)-1;
4530         } else
4531             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4532                                shmflg | SHM_REMAP);
4533     }
4534 
4535     if (host_raddr == (void *)-1) {
4536         mmap_unlock();
4537         return get_errno((long)host_raddr);
4538     }
4539     raddr=h2g((unsigned long)host_raddr);
4540 
4541     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4542                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4543                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4544 
4545     for (i = 0; i < N_SHM_REGIONS; i++) {
4546         if (!shm_regions[i].in_use) {
4547             shm_regions[i].in_use = true;
4548             shm_regions[i].start = raddr;
4549             shm_regions[i].size = shm_info.shm_segsz;
4550             break;
4551         }
4552     }
4553 
4554     mmap_unlock();
4555     return raddr;
4556 
4557 }
4558 
4559 static inline abi_long do_shmdt(abi_ulong shmaddr)
4560 {
4561     int i;
4562     abi_long rv;
4563 
4564     /* shmdt pointers are always untagged */
4565 
4566     mmap_lock();
4567 
4568     for (i = 0; i < N_SHM_REGIONS; ++i) {
4569         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4570             shm_regions[i].in_use = false;
4571             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4572             break;
4573         }
4574     }
4575     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4576 
4577     mmap_unlock();
4578 
4579     return rv;
4580 }
4581 
4582 #ifdef TARGET_NR_ipc
4583 /* ??? This only works with linear mappings.  */
4584 /* do_ipc() must return target values and target errnos. */
4585 static abi_long do_ipc(CPUArchState *cpu_env,
4586                        unsigned int call, abi_long first,
4587                        abi_long second, abi_long third,
4588                        abi_long ptr, abi_long fifth)
4589 {
4590     int version;
4591     abi_long ret = 0;
4592 
4593     version = call >> 16;
4594     call &= 0xffff;
4595 
4596     switch (call) {
4597     case IPCOP_semop:
4598         ret = do_semtimedop(first, ptr, second, 0, false);
4599         break;
4600     case IPCOP_semtimedop:
4601     /*
4602      * The s390 sys_ipc variant has only five parameters instead of six
4603      * (as for default variant) and the only difference is the handling of
4604      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4605      * to a struct timespec where the generic variant uses fifth parameter.
4606      */
4607 #if defined(TARGET_S390X)
4608         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4609 #else
4610         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4611 #endif
4612         break;
4613 
4614     case IPCOP_semget:
4615         ret = get_errno(semget(first, second, third));
4616         break;
4617 
4618     case IPCOP_semctl: {
4619         /* The semun argument to semctl is passed by value, so dereference the
4620          * ptr argument. */
4621         abi_ulong atptr;
4622         get_user_ual(atptr, ptr);
4623         ret = do_semctl(first, second, third, atptr);
4624         break;
4625     }
4626 
4627     case IPCOP_msgget:
4628         ret = get_errno(msgget(first, second));
4629         break;
4630 
4631     case IPCOP_msgsnd:
4632         ret = do_msgsnd(first, ptr, second, third);
4633         break;
4634 
4635     case IPCOP_msgctl:
4636         ret = do_msgctl(first, second, ptr);
4637         break;
4638 
4639     case IPCOP_msgrcv:
4640         switch (version) {
4641         case 0:
4642             {
4643                 struct target_ipc_kludge {
4644                     abi_long msgp;
4645                     abi_long msgtyp;
4646                 } *tmp;
4647 
4648                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4649                     ret = -TARGET_EFAULT;
4650                     break;
4651                 }
4652 
4653                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4654 
4655                 unlock_user_struct(tmp, ptr, 0);
4656                 break;
4657             }
4658         default:
4659             ret = do_msgrcv(first, ptr, second, fifth, third);
4660         }
4661         break;
4662 
4663     case IPCOP_shmat:
4664         switch (version) {
4665         default:
4666         {
4667             abi_ulong raddr;
4668             raddr = do_shmat(cpu_env, first, ptr, second);
4669             if (is_error(raddr))
4670                 return get_errno(raddr);
4671             if (put_user_ual(raddr, third))
4672                 return -TARGET_EFAULT;
4673             break;
4674         }
4675         case 1:
4676             ret = -TARGET_EINVAL;
4677             break;
4678         }
4679 	break;
4680     case IPCOP_shmdt:
4681         ret = do_shmdt(ptr);
4682 	break;
4683 
4684     case IPCOP_shmget:
4685 	/* IPC_* flag values are the same on all linux platforms */
4686 	ret = get_errno(shmget(first, second, third));
4687 	break;
4688 
4689 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4690     case IPCOP_shmctl:
4691         ret = do_shmctl(first, second, ptr);
4692         break;
4693     default:
4694         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4695                       call, version);
4696 	ret = -TARGET_ENOSYS;
4697 	break;
4698     }
4699     return ret;
4700 }
4701 #endif
4702 
4703 /* kernel structure types definitions */
4704 
4705 #define STRUCT(name, ...) STRUCT_ ## name,
4706 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4707 enum {
4708 #include "syscall_types.h"
4709 STRUCT_MAX
4710 };
4711 #undef STRUCT
4712 #undef STRUCT_SPECIAL
4713 
4714 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4715 #define STRUCT_SPECIAL(name)
4716 #include "syscall_types.h"
4717 #undef STRUCT
4718 #undef STRUCT_SPECIAL
4719 
4720 #define MAX_STRUCT_SIZE 4096
4721 
4722 #ifdef CONFIG_FIEMAP
4723 /* So fiemap access checks don't overflow on 32 bit systems.
4724  * This is very slightly smaller than the limit imposed by
4725  * the underlying kernel.
4726  */
4727 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4728                             / sizeof(struct fiemap_extent))
4729 
4730 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4731                                        int fd, int cmd, abi_long arg)
4732 {
4733     /* The parameter for this ioctl is a struct fiemap followed
4734      * by an array of struct fiemap_extent whose size is set
4735      * in fiemap->fm_extent_count. The array is filled in by the
4736      * ioctl.
4737      */
4738     int target_size_in, target_size_out;
4739     struct fiemap *fm;
4740     const argtype *arg_type = ie->arg_type;
4741     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4742     void *argptr, *p;
4743     abi_long ret;
4744     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4745     uint32_t outbufsz;
4746     int free_fm = 0;
4747 
4748     assert(arg_type[0] == TYPE_PTR);
4749     assert(ie->access == IOC_RW);
4750     arg_type++;
4751     target_size_in = thunk_type_size(arg_type, 0);
4752     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4753     if (!argptr) {
4754         return -TARGET_EFAULT;
4755     }
4756     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4757     unlock_user(argptr, arg, 0);
4758     fm = (struct fiemap *)buf_temp;
4759     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4760         return -TARGET_EINVAL;
4761     }
4762 
4763     outbufsz = sizeof (*fm) +
4764         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4765 
4766     if (outbufsz > MAX_STRUCT_SIZE) {
4767         /* We can't fit all the extents into the fixed size buffer.
4768          * Allocate one that is large enough and use it instead.
4769          */
4770         fm = g_try_malloc(outbufsz);
4771         if (!fm) {
4772             return -TARGET_ENOMEM;
4773         }
4774         memcpy(fm, buf_temp, sizeof(struct fiemap));
4775         free_fm = 1;
4776     }
4777     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4778     if (!is_error(ret)) {
4779         target_size_out = target_size_in;
4780         /* An extent_count of 0 means we were only counting the extents
4781          * so there are no structs to copy
4782          */
4783         if (fm->fm_extent_count != 0) {
4784             target_size_out += fm->fm_mapped_extents * extent_size;
4785         }
4786         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4787         if (!argptr) {
4788             ret = -TARGET_EFAULT;
4789         } else {
4790             /* Convert the struct fiemap */
4791             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4792             if (fm->fm_extent_count != 0) {
4793                 p = argptr + target_size_in;
4794                 /* ...and then all the struct fiemap_extents */
4795                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4796                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4797                                   THUNK_TARGET);
4798                     p += extent_size;
4799                 }
4800             }
4801             unlock_user(argptr, arg, target_size_out);
4802         }
4803     }
4804     if (free_fm) {
4805         g_free(fm);
4806     }
4807     return ret;
4808 }
4809 #endif
4810 
4811 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4812                                 int fd, int cmd, abi_long arg)
4813 {
4814     const argtype *arg_type = ie->arg_type;
4815     int target_size;
4816     void *argptr;
4817     int ret;
4818     struct ifconf *host_ifconf;
4819     uint32_t outbufsz;
4820     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4821     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4822     int target_ifreq_size;
4823     int nb_ifreq;
4824     int free_buf = 0;
4825     int i;
4826     int target_ifc_len;
4827     abi_long target_ifc_buf;
4828     int host_ifc_len;
4829     char *host_ifc_buf;
4830 
4831     assert(arg_type[0] == TYPE_PTR);
4832     assert(ie->access == IOC_RW);
4833 
4834     arg_type++;
4835     target_size = thunk_type_size(arg_type, 0);
4836 
4837     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4838     if (!argptr)
4839         return -TARGET_EFAULT;
4840     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4841     unlock_user(argptr, arg, 0);
4842 
4843     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4844     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4845     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4846 
4847     if (target_ifc_buf != 0) {
4848         target_ifc_len = host_ifconf->ifc_len;
4849         nb_ifreq = target_ifc_len / target_ifreq_size;
4850         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4851 
4852         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4853         if (outbufsz > MAX_STRUCT_SIZE) {
4854             /*
4855              * We can't fit all the extents into the fixed size buffer.
4856              * Allocate one that is large enough and use it instead.
4857              */
4858             host_ifconf = malloc(outbufsz);
4859             if (!host_ifconf) {
4860                 return -TARGET_ENOMEM;
4861             }
4862             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4863             free_buf = 1;
4864         }
4865         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4866 
4867         host_ifconf->ifc_len = host_ifc_len;
4868     } else {
4869       host_ifc_buf = NULL;
4870     }
4871     host_ifconf->ifc_buf = host_ifc_buf;
4872 
4873     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4874     if (!is_error(ret)) {
4875 	/* convert host ifc_len to target ifc_len */
4876 
4877         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4878         target_ifc_len = nb_ifreq * target_ifreq_size;
4879         host_ifconf->ifc_len = target_ifc_len;
4880 
4881 	/* restore target ifc_buf */
4882 
4883         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4884 
4885 	/* copy struct ifconf to target user */
4886 
4887         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4888         if (!argptr)
4889             return -TARGET_EFAULT;
4890         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4891         unlock_user(argptr, arg, target_size);
4892 
4893         if (target_ifc_buf != 0) {
4894             /* copy ifreq[] to target user */
4895             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4896             for (i = 0; i < nb_ifreq ; i++) {
4897                 thunk_convert(argptr + i * target_ifreq_size,
4898                               host_ifc_buf + i * sizeof(struct ifreq),
4899                               ifreq_arg_type, THUNK_TARGET);
4900             }
4901             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4902         }
4903     }
4904 
4905     if (free_buf) {
4906         free(host_ifconf);
4907     }
4908 
4909     return ret;
4910 }
4911 
4912 #if defined(CONFIG_USBFS)
4913 #if HOST_LONG_BITS > 64
4914 #error USBDEVFS thunks do not support >64 bit hosts yet.
4915 #endif
4916 struct live_urb {
4917     uint64_t target_urb_adr;
4918     uint64_t target_buf_adr;
4919     char *target_buf_ptr;
4920     struct usbdevfs_urb host_urb;
4921 };
4922 
4923 static GHashTable *usbdevfs_urb_hashtable(void)
4924 {
4925     static GHashTable *urb_hashtable;
4926 
4927     if (!urb_hashtable) {
4928         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4929     }
4930     return urb_hashtable;
4931 }
4932 
4933 static void urb_hashtable_insert(struct live_urb *urb)
4934 {
4935     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4936     g_hash_table_insert(urb_hashtable, urb, urb);
4937 }
4938 
4939 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4940 {
4941     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4942     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4943 }
4944 
4945 static void urb_hashtable_remove(struct live_urb *urb)
4946 {
4947     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4948     g_hash_table_remove(urb_hashtable, urb);
4949 }
4950 
4951 static abi_long
4952 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4953                           int fd, int cmd, abi_long arg)
4954 {
4955     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4956     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4957     struct live_urb *lurb;
4958     void *argptr;
4959     uint64_t hurb;
4960     int target_size;
4961     uintptr_t target_urb_adr;
4962     abi_long ret;
4963 
4964     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4965 
4966     memset(buf_temp, 0, sizeof(uint64_t));
4967     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4968     if (is_error(ret)) {
4969         return ret;
4970     }
4971 
4972     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4973     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4974     if (!lurb->target_urb_adr) {
4975         return -TARGET_EFAULT;
4976     }
4977     urb_hashtable_remove(lurb);
4978     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4979         lurb->host_urb.buffer_length);
4980     lurb->target_buf_ptr = NULL;
4981 
4982     /* restore the guest buffer pointer */
4983     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4984 
4985     /* update the guest urb struct */
4986     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4987     if (!argptr) {
4988         g_free(lurb);
4989         return -TARGET_EFAULT;
4990     }
4991     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4992     unlock_user(argptr, lurb->target_urb_adr, target_size);
4993 
4994     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4995     /* write back the urb handle */
4996     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4997     if (!argptr) {
4998         g_free(lurb);
4999         return -TARGET_EFAULT;
5000     }
5001 
5002     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5003     target_urb_adr = lurb->target_urb_adr;
5004     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5005     unlock_user(argptr, arg, target_size);
5006 
5007     g_free(lurb);
5008     return ret;
5009 }
5010 
5011 static abi_long
5012 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5013                              uint8_t *buf_temp __attribute__((unused)),
5014                              int fd, int cmd, abi_long arg)
5015 {
5016     struct live_urb *lurb;
5017 
5018     /* map target address back to host URB with metadata. */
5019     lurb = urb_hashtable_lookup(arg);
5020     if (!lurb) {
5021         return -TARGET_EFAULT;
5022     }
5023     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5024 }
5025 
5026 static abi_long
5027 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5028                             int fd, int cmd, abi_long arg)
5029 {
5030     const argtype *arg_type = ie->arg_type;
5031     int target_size;
5032     abi_long ret;
5033     void *argptr;
5034     int rw_dir;
5035     struct live_urb *lurb;
5036 
5037     /*
5038      * each submitted URB needs to map to a unique ID for the
5039      * kernel, and that unique ID needs to be a pointer to
5040      * host memory.  hence, we need to malloc for each URB.
5041      * isochronous transfers have a variable length struct.
5042      */
5043     arg_type++;
5044     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5045 
5046     /* construct host copy of urb and metadata */
5047     lurb = g_try_malloc0(sizeof(struct live_urb));
5048     if (!lurb) {
5049         return -TARGET_ENOMEM;
5050     }
5051 
5052     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5053     if (!argptr) {
5054         g_free(lurb);
5055         return -TARGET_EFAULT;
5056     }
5057     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5058     unlock_user(argptr, arg, 0);
5059 
5060     lurb->target_urb_adr = arg;
5061     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5062 
5063     /* buffer space used depends on endpoint type so lock the entire buffer */
5064     /* control type urbs should check the buffer contents for true direction */
5065     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5066     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5067         lurb->host_urb.buffer_length, 1);
5068     if (lurb->target_buf_ptr == NULL) {
5069         g_free(lurb);
5070         return -TARGET_EFAULT;
5071     }
5072 
5073     /* update buffer pointer in host copy */
5074     lurb->host_urb.buffer = lurb->target_buf_ptr;
5075 
5076     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5077     if (is_error(ret)) {
5078         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5079         g_free(lurb);
5080     } else {
5081         urb_hashtable_insert(lurb);
5082     }
5083 
5084     return ret;
5085 }
5086 #endif /* CONFIG_USBFS */
5087 
5088 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5089                             int cmd, abi_long arg)
5090 {
5091     void *argptr;
5092     struct dm_ioctl *host_dm;
5093     abi_long guest_data;
5094     uint32_t guest_data_size;
5095     int target_size;
5096     const argtype *arg_type = ie->arg_type;
5097     abi_long ret;
5098     void *big_buf = NULL;
5099     char *host_data;
5100 
5101     arg_type++;
5102     target_size = thunk_type_size(arg_type, 0);
5103     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5104     if (!argptr) {
5105         ret = -TARGET_EFAULT;
5106         goto out;
5107     }
5108     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5109     unlock_user(argptr, arg, 0);
5110 
5111     /* buf_temp is too small, so fetch things into a bigger buffer */
5112     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5113     memcpy(big_buf, buf_temp, target_size);
5114     buf_temp = big_buf;
5115     host_dm = big_buf;
5116 
5117     guest_data = arg + host_dm->data_start;
5118     if ((guest_data - arg) < 0) {
5119         ret = -TARGET_EINVAL;
5120         goto out;
5121     }
5122     guest_data_size = host_dm->data_size - host_dm->data_start;
5123     host_data = (char*)host_dm + host_dm->data_start;
5124 
5125     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5126     if (!argptr) {
5127         ret = -TARGET_EFAULT;
5128         goto out;
5129     }
5130 
5131     switch (ie->host_cmd) {
5132     case DM_REMOVE_ALL:
5133     case DM_LIST_DEVICES:
5134     case DM_DEV_CREATE:
5135     case DM_DEV_REMOVE:
5136     case DM_DEV_SUSPEND:
5137     case DM_DEV_STATUS:
5138     case DM_DEV_WAIT:
5139     case DM_TABLE_STATUS:
5140     case DM_TABLE_CLEAR:
5141     case DM_TABLE_DEPS:
5142     case DM_LIST_VERSIONS:
5143         /* no input data */
5144         break;
5145     case DM_DEV_RENAME:
5146     case DM_DEV_SET_GEOMETRY:
5147         /* data contains only strings */
5148         memcpy(host_data, argptr, guest_data_size);
5149         break;
5150     case DM_TARGET_MSG:
5151         memcpy(host_data, argptr, guest_data_size);
5152         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5153         break;
5154     case DM_TABLE_LOAD:
5155     {
5156         void *gspec = argptr;
5157         void *cur_data = host_data;
5158         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5159         int spec_size = thunk_type_size(arg_type, 0);
5160         int i;
5161 
5162         for (i = 0; i < host_dm->target_count; i++) {
5163             struct dm_target_spec *spec = cur_data;
5164             uint32_t next;
5165             int slen;
5166 
5167             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5168             slen = strlen((char*)gspec + spec_size) + 1;
5169             next = spec->next;
5170             spec->next = sizeof(*spec) + slen;
5171             strcpy((char*)&spec[1], gspec + spec_size);
5172             gspec += next;
5173             cur_data += spec->next;
5174         }
5175         break;
5176     }
5177     default:
5178         ret = -TARGET_EINVAL;
5179         unlock_user(argptr, guest_data, 0);
5180         goto out;
5181     }
5182     unlock_user(argptr, guest_data, 0);
5183 
5184     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5185     if (!is_error(ret)) {
5186         guest_data = arg + host_dm->data_start;
5187         guest_data_size = host_dm->data_size - host_dm->data_start;
5188         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5189         switch (ie->host_cmd) {
5190         case DM_REMOVE_ALL:
5191         case DM_DEV_CREATE:
5192         case DM_DEV_REMOVE:
5193         case DM_DEV_RENAME:
5194         case DM_DEV_SUSPEND:
5195         case DM_DEV_STATUS:
5196         case DM_TABLE_LOAD:
5197         case DM_TABLE_CLEAR:
5198         case DM_TARGET_MSG:
5199         case DM_DEV_SET_GEOMETRY:
5200             /* no return data */
5201             break;
5202         case DM_LIST_DEVICES:
5203         {
5204             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5205             uint32_t remaining_data = guest_data_size;
5206             void *cur_data = argptr;
5207             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5208             int nl_size = 12; /* can't use thunk_size due to alignment */
5209 
5210             while (1) {
5211                 uint32_t next = nl->next;
5212                 if (next) {
5213                     nl->next = nl_size + (strlen(nl->name) + 1);
5214                 }
5215                 if (remaining_data < nl->next) {
5216                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5217                     break;
5218                 }
5219                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5220                 strcpy(cur_data + nl_size, nl->name);
5221                 cur_data += nl->next;
5222                 remaining_data -= nl->next;
5223                 if (!next) {
5224                     break;
5225                 }
5226                 nl = (void*)nl + next;
5227             }
5228             break;
5229         }
5230         case DM_DEV_WAIT:
5231         case DM_TABLE_STATUS:
5232         {
5233             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5234             void *cur_data = argptr;
5235             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5236             int spec_size = thunk_type_size(arg_type, 0);
5237             int i;
5238 
5239             for (i = 0; i < host_dm->target_count; i++) {
5240                 uint32_t next = spec->next;
5241                 int slen = strlen((char*)&spec[1]) + 1;
5242                 spec->next = (cur_data - argptr) + spec_size + slen;
5243                 if (guest_data_size < spec->next) {
5244                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5245                     break;
5246                 }
5247                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5248                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5249                 cur_data = argptr + spec->next;
5250                 spec = (void*)host_dm + host_dm->data_start + next;
5251             }
5252             break;
5253         }
5254         case DM_TABLE_DEPS:
5255         {
5256             void *hdata = (void*)host_dm + host_dm->data_start;
5257             int count = *(uint32_t*)hdata;
5258             uint64_t *hdev = hdata + 8;
5259             uint64_t *gdev = argptr + 8;
5260             int i;
5261 
5262             *(uint32_t*)argptr = tswap32(count);
5263             for (i = 0; i < count; i++) {
5264                 *gdev = tswap64(*hdev);
5265                 gdev++;
5266                 hdev++;
5267             }
5268             break;
5269         }
5270         case DM_LIST_VERSIONS:
5271         {
5272             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5273             uint32_t remaining_data = guest_data_size;
5274             void *cur_data = argptr;
5275             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5276             int vers_size = thunk_type_size(arg_type, 0);
5277 
5278             while (1) {
5279                 uint32_t next = vers->next;
5280                 if (next) {
5281                     vers->next = vers_size + (strlen(vers->name) + 1);
5282                 }
5283                 if (remaining_data < vers->next) {
5284                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5285                     break;
5286                 }
5287                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5288                 strcpy(cur_data + vers_size, vers->name);
5289                 cur_data += vers->next;
5290                 remaining_data -= vers->next;
5291                 if (!next) {
5292                     break;
5293                 }
5294                 vers = (void*)vers + next;
5295             }
5296             break;
5297         }
5298         default:
5299             unlock_user(argptr, guest_data, 0);
5300             ret = -TARGET_EINVAL;
5301             goto out;
5302         }
5303         unlock_user(argptr, guest_data, guest_data_size);
5304 
5305         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5306         if (!argptr) {
5307             ret = -TARGET_EFAULT;
5308             goto out;
5309         }
5310         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5311         unlock_user(argptr, arg, target_size);
5312     }
5313 out:
5314     g_free(big_buf);
5315     return ret;
5316 }
5317 
5318 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5319                                int cmd, abi_long arg)
5320 {
5321     void *argptr;
5322     int target_size;
5323     const argtype *arg_type = ie->arg_type;
5324     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5325     abi_long ret;
5326 
5327     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5328     struct blkpg_partition host_part;
5329 
5330     /* Read and convert blkpg */
5331     arg_type++;
5332     target_size = thunk_type_size(arg_type, 0);
5333     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5334     if (!argptr) {
5335         ret = -TARGET_EFAULT;
5336         goto out;
5337     }
5338     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5339     unlock_user(argptr, arg, 0);
5340 
5341     switch (host_blkpg->op) {
5342     case BLKPG_ADD_PARTITION:
5343     case BLKPG_DEL_PARTITION:
5344         /* payload is struct blkpg_partition */
5345         break;
5346     default:
5347         /* Unknown opcode */
5348         ret = -TARGET_EINVAL;
5349         goto out;
5350     }
5351 
5352     /* Read and convert blkpg->data */
5353     arg = (abi_long)(uintptr_t)host_blkpg->data;
5354     target_size = thunk_type_size(part_arg_type, 0);
5355     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5356     if (!argptr) {
5357         ret = -TARGET_EFAULT;
5358         goto out;
5359     }
5360     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5361     unlock_user(argptr, arg, 0);
5362 
5363     /* Swizzle the data pointer to our local copy and call! */
5364     host_blkpg->data = &host_part;
5365     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5366 
5367 out:
5368     return ret;
5369 }
5370 
5371 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5372                                 int fd, int cmd, abi_long arg)
5373 {
5374     const argtype *arg_type = ie->arg_type;
5375     const StructEntry *se;
5376     const argtype *field_types;
5377     const int *dst_offsets, *src_offsets;
5378     int target_size;
5379     void *argptr;
5380     abi_ulong *target_rt_dev_ptr = NULL;
5381     unsigned long *host_rt_dev_ptr = NULL;
5382     abi_long ret;
5383     int i;
5384 
5385     assert(ie->access == IOC_W);
5386     assert(*arg_type == TYPE_PTR);
5387     arg_type++;
5388     assert(*arg_type == TYPE_STRUCT);
5389     target_size = thunk_type_size(arg_type, 0);
5390     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5391     if (!argptr) {
5392         return -TARGET_EFAULT;
5393     }
5394     arg_type++;
5395     assert(*arg_type == (int)STRUCT_rtentry);
5396     se = struct_entries + *arg_type++;
5397     assert(se->convert[0] == NULL);
5398     /* convert struct here to be able to catch rt_dev string */
5399     field_types = se->field_types;
5400     dst_offsets = se->field_offsets[THUNK_HOST];
5401     src_offsets = se->field_offsets[THUNK_TARGET];
5402     for (i = 0; i < se->nb_fields; i++) {
5403         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5404             assert(*field_types == TYPE_PTRVOID);
5405             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5406             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5407             if (*target_rt_dev_ptr != 0) {
5408                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5409                                                   tswapal(*target_rt_dev_ptr));
5410                 if (!*host_rt_dev_ptr) {
5411                     unlock_user(argptr, arg, 0);
5412                     return -TARGET_EFAULT;
5413                 }
5414             } else {
5415                 *host_rt_dev_ptr = 0;
5416             }
5417             field_types++;
5418             continue;
5419         }
5420         field_types = thunk_convert(buf_temp + dst_offsets[i],
5421                                     argptr + src_offsets[i],
5422                                     field_types, THUNK_HOST);
5423     }
5424     unlock_user(argptr, arg, 0);
5425 
5426     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5427 
5428     assert(host_rt_dev_ptr != NULL);
5429     assert(target_rt_dev_ptr != NULL);
5430     if (*host_rt_dev_ptr != 0) {
5431         unlock_user((void *)*host_rt_dev_ptr,
5432                     *target_rt_dev_ptr, 0);
5433     }
5434     return ret;
5435 }
5436 
5437 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5438                                      int fd, int cmd, abi_long arg)
5439 {
5440     int sig = target_to_host_signal(arg);
5441     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5442 }
5443 
5444 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5445                                     int fd, int cmd, abi_long arg)
5446 {
5447     struct timeval tv;
5448     abi_long ret;
5449 
5450     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5451     if (is_error(ret)) {
5452         return ret;
5453     }
5454 
5455     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5456         if (copy_to_user_timeval(arg, &tv)) {
5457             return -TARGET_EFAULT;
5458         }
5459     } else {
5460         if (copy_to_user_timeval64(arg, &tv)) {
5461             return -TARGET_EFAULT;
5462         }
5463     }
5464 
5465     return ret;
5466 }
5467 
5468 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5469                                       int fd, int cmd, abi_long arg)
5470 {
5471     struct timespec ts;
5472     abi_long ret;
5473 
5474     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5475     if (is_error(ret)) {
5476         return ret;
5477     }
5478 
5479     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5480         if (host_to_target_timespec(arg, &ts)) {
5481             return -TARGET_EFAULT;
5482         }
5483     } else{
5484         if (host_to_target_timespec64(arg, &ts)) {
5485             return -TARGET_EFAULT;
5486         }
5487     }
5488 
5489     return ret;
5490 }
5491 
5492 #ifdef TIOCGPTPEER
5493 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5494                                      int fd, int cmd, abi_long arg)
5495 {
5496     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5497     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5498 }
5499 #endif
5500 
5501 #ifdef HAVE_DRM_H
5502 
5503 static void unlock_drm_version(struct drm_version *host_ver,
5504                                struct target_drm_version *target_ver,
5505                                bool copy)
5506 {
5507     unlock_user(host_ver->name, target_ver->name,
5508                                 copy ? host_ver->name_len : 0);
5509     unlock_user(host_ver->date, target_ver->date,
5510                                 copy ? host_ver->date_len : 0);
5511     unlock_user(host_ver->desc, target_ver->desc,
5512                                 copy ? host_ver->desc_len : 0);
5513 }
5514 
5515 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5516                                           struct target_drm_version *target_ver)
5517 {
5518     memset(host_ver, 0, sizeof(*host_ver));
5519 
5520     __get_user(host_ver->name_len, &target_ver->name_len);
5521     if (host_ver->name_len) {
5522         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5523                                    target_ver->name_len, 0);
5524         if (!host_ver->name) {
5525             return -EFAULT;
5526         }
5527     }
5528 
5529     __get_user(host_ver->date_len, &target_ver->date_len);
5530     if (host_ver->date_len) {
5531         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5532                                    target_ver->date_len, 0);
5533         if (!host_ver->date) {
5534             goto err;
5535         }
5536     }
5537 
5538     __get_user(host_ver->desc_len, &target_ver->desc_len);
5539     if (host_ver->desc_len) {
5540         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5541                                    target_ver->desc_len, 0);
5542         if (!host_ver->desc) {
5543             goto err;
5544         }
5545     }
5546 
5547     return 0;
5548 err:
5549     unlock_drm_version(host_ver, target_ver, false);
5550     return -EFAULT;
5551 }
5552 
5553 static inline void host_to_target_drmversion(
5554                                           struct target_drm_version *target_ver,
5555                                           struct drm_version *host_ver)
5556 {
5557     __put_user(host_ver->version_major, &target_ver->version_major);
5558     __put_user(host_ver->version_minor, &target_ver->version_minor);
5559     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5560     __put_user(host_ver->name_len, &target_ver->name_len);
5561     __put_user(host_ver->date_len, &target_ver->date_len);
5562     __put_user(host_ver->desc_len, &target_ver->desc_len);
5563     unlock_drm_version(host_ver, target_ver, true);
5564 }
5565 
5566 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5567                              int fd, int cmd, abi_long arg)
5568 {
5569     struct drm_version *ver;
5570     struct target_drm_version *target_ver;
5571     abi_long ret;
5572 
5573     switch (ie->host_cmd) {
5574     case DRM_IOCTL_VERSION:
5575         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5576             return -TARGET_EFAULT;
5577         }
5578         ver = (struct drm_version *)buf_temp;
5579         ret = target_to_host_drmversion(ver, target_ver);
5580         if (!is_error(ret)) {
5581             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5582             if (is_error(ret)) {
5583                 unlock_drm_version(ver, target_ver, false);
5584             } else {
5585                 host_to_target_drmversion(target_ver, ver);
5586             }
5587         }
5588         unlock_user_struct(target_ver, arg, 0);
5589         return ret;
5590     }
5591     return -TARGET_ENOSYS;
5592 }
5593 
5594 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5595                                            struct drm_i915_getparam *gparam,
5596                                            int fd, abi_long arg)
5597 {
5598     abi_long ret;
5599     int value;
5600     struct target_drm_i915_getparam *target_gparam;
5601 
5602     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5603         return -TARGET_EFAULT;
5604     }
5605 
5606     __get_user(gparam->param, &target_gparam->param);
5607     gparam->value = &value;
5608     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5609     put_user_s32(value, target_gparam->value);
5610 
5611     unlock_user_struct(target_gparam, arg, 0);
5612     return ret;
5613 }
5614 
5615 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5616                                   int fd, int cmd, abi_long arg)
5617 {
5618     switch (ie->host_cmd) {
5619     case DRM_IOCTL_I915_GETPARAM:
5620         return do_ioctl_drm_i915_getparam(ie,
5621                                           (struct drm_i915_getparam *)buf_temp,
5622                                           fd, arg);
5623     default:
5624         return -TARGET_ENOSYS;
5625     }
5626 }
5627 
5628 #endif
5629 
5630 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5631                                         int fd, int cmd, abi_long arg)
5632 {
5633     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5634     struct tun_filter *target_filter;
5635     char *target_addr;
5636 
5637     assert(ie->access == IOC_W);
5638 
5639     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5640     if (!target_filter) {
5641         return -TARGET_EFAULT;
5642     }
5643     filter->flags = tswap16(target_filter->flags);
5644     filter->count = tswap16(target_filter->count);
5645     unlock_user(target_filter, arg, 0);
5646 
5647     if (filter->count) {
5648         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5649             MAX_STRUCT_SIZE) {
5650             return -TARGET_EFAULT;
5651         }
5652 
5653         target_addr = lock_user(VERIFY_READ,
5654                                 arg + offsetof(struct tun_filter, addr),
5655                                 filter->count * ETH_ALEN, 1);
5656         if (!target_addr) {
5657             return -TARGET_EFAULT;
5658         }
5659         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5660         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5661     }
5662 
5663     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5664 }
5665 
5666 IOCTLEntry ioctl_entries[] = {
5667 #define IOCTL(cmd, access, ...) \
5668     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5669 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5670     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5671 #define IOCTL_IGNORE(cmd) \
5672     { TARGET_ ## cmd, 0, #cmd },
5673 #include "ioctls.h"
5674     { 0, 0, },
5675 };
5676 
5677 /* ??? Implement proper locking for ioctls.  */
5678 /* do_ioctl() Must return target values and target errnos. */
5679 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5680 {
5681     const IOCTLEntry *ie;
5682     const argtype *arg_type;
5683     abi_long ret;
5684     uint8_t buf_temp[MAX_STRUCT_SIZE];
5685     int target_size;
5686     void *argptr;
5687 
5688     ie = ioctl_entries;
5689     for(;;) {
5690         if (ie->target_cmd == 0) {
5691             qemu_log_mask(
5692                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5693             return -TARGET_ENOSYS;
5694         }
5695         if (ie->target_cmd == cmd)
5696             break;
5697         ie++;
5698     }
5699     arg_type = ie->arg_type;
5700     if (ie->do_ioctl) {
5701         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5702     } else if (!ie->host_cmd) {
5703         /* Some architectures define BSD ioctls in their headers
5704            that are not implemented in Linux.  */
5705         return -TARGET_ENOSYS;
5706     }
5707 
5708     switch(arg_type[0]) {
5709     case TYPE_NULL:
5710         /* no argument */
5711         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5712         break;
5713     case TYPE_PTRVOID:
5714     case TYPE_INT:
5715     case TYPE_LONG:
5716     case TYPE_ULONG:
5717         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5718         break;
5719     case TYPE_PTR:
5720         arg_type++;
5721         target_size = thunk_type_size(arg_type, 0);
5722         switch(ie->access) {
5723         case IOC_R:
5724             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5725             if (!is_error(ret)) {
5726                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5727                 if (!argptr)
5728                     return -TARGET_EFAULT;
5729                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5730                 unlock_user(argptr, arg, target_size);
5731             }
5732             break;
5733         case IOC_W:
5734             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5735             if (!argptr)
5736                 return -TARGET_EFAULT;
5737             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5738             unlock_user(argptr, arg, 0);
5739             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5740             break;
5741         default:
5742         case IOC_RW:
5743             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5744             if (!argptr)
5745                 return -TARGET_EFAULT;
5746             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5747             unlock_user(argptr, arg, 0);
5748             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5749             if (!is_error(ret)) {
5750                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5751                 if (!argptr)
5752                     return -TARGET_EFAULT;
5753                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5754                 unlock_user(argptr, arg, target_size);
5755             }
5756             break;
5757         }
5758         break;
5759     default:
5760         qemu_log_mask(LOG_UNIMP,
5761                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5762                       (long)cmd, arg_type[0]);
5763         ret = -TARGET_ENOSYS;
5764         break;
5765     }
5766     return ret;
5767 }
5768 
5769 static const bitmask_transtbl iflag_tbl[] = {
5770         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5771         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5772         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5773         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5774         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5775         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5776         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5777         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5778         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5779         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5780         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5781         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5782         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5783         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5784         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5785         { 0, 0, 0, 0 }
5786 };
5787 
5788 static const bitmask_transtbl oflag_tbl[] = {
5789 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5790 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5791 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5792 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5793 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5794 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5795 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5796 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5797 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5798 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5799 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5800 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5801 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5802 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5803 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5804 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5805 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5806 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5807 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5808 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5809 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5810 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5811 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5812 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5813 	{ 0, 0, 0, 0 }
5814 };
5815 
5816 static const bitmask_transtbl cflag_tbl[] = {
5817 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5818 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5819 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5820 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5821 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5822 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5823 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5824 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5825 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5826 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5827 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5828 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5829 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5830 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5831 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5832 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5833 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5834 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5835 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5836 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5837 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5838 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5839 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5840 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5841 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5842 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5843 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5844 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5845 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5846 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5847 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5848 	{ 0, 0, 0, 0 }
5849 };
5850 
5851 static const bitmask_transtbl lflag_tbl[] = {
5852   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5853   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5854   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5855   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5856   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5857   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5858   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5859   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5860   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5861   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5862   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5863   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5864   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5865   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5866   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5867   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5868   { 0, 0, 0, 0 }
5869 };
5870 
5871 static void target_to_host_termios (void *dst, const void *src)
5872 {
5873     struct host_termios *host = dst;
5874     const struct target_termios *target = src;
5875 
5876     host->c_iflag =
5877         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5878     host->c_oflag =
5879         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5880     host->c_cflag =
5881         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5882     host->c_lflag =
5883         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5884     host->c_line = target->c_line;
5885 
5886     memset(host->c_cc, 0, sizeof(host->c_cc));
5887     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5888     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5889     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5890     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5891     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5892     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5893     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5894     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5895     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5896     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5897     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5898     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5899     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5900     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5901     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5902     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5903     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5904 }
5905 
5906 static void host_to_target_termios (void *dst, const void *src)
5907 {
5908     struct target_termios *target = dst;
5909     const struct host_termios *host = src;
5910 
5911     target->c_iflag =
5912         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5913     target->c_oflag =
5914         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5915     target->c_cflag =
5916         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5917     target->c_lflag =
5918         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5919     target->c_line = host->c_line;
5920 
5921     memset(target->c_cc, 0, sizeof(target->c_cc));
5922     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5923     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5924     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5925     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5926     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5927     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5928     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5929     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5930     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5931     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5932     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5933     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5934     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5935     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5936     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5937     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5938     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5939 }
5940 
5941 static const StructEntry struct_termios_def = {
5942     .convert = { host_to_target_termios, target_to_host_termios },
5943     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5944     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5945     .print = print_termios,
5946 };
5947 
5948 static const bitmask_transtbl mmap_flags_tbl[] = {
5949     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5950     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5951     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5952     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5953       MAP_ANONYMOUS, MAP_ANONYMOUS },
5954     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5955       MAP_GROWSDOWN, MAP_GROWSDOWN },
5956     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5957       MAP_DENYWRITE, MAP_DENYWRITE },
5958     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5959       MAP_EXECUTABLE, MAP_EXECUTABLE },
5960     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5961     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5962       MAP_NORESERVE, MAP_NORESERVE },
5963     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5964     /* MAP_STACK had been ignored by the kernel for quite some time.
5965        Recognize it for the target insofar as we do not want to pass
5966        it through to the host.  */
5967     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5968     { 0, 0, 0, 0 }
5969 };
5970 
5971 /*
5972  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5973  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5974  */
5975 #if defined(TARGET_I386)
5976 
5977 /* NOTE: there is really one LDT for all the threads */
5978 static uint8_t *ldt_table;
5979 
5980 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5981 {
5982     int size;
5983     void *p;
5984 
5985     if (!ldt_table)
5986         return 0;
5987     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5988     if (size > bytecount)
5989         size = bytecount;
5990     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5991     if (!p)
5992         return -TARGET_EFAULT;
5993     /* ??? Should this by byteswapped?  */
5994     memcpy(p, ldt_table, size);
5995     unlock_user(p, ptr, size);
5996     return size;
5997 }
5998 
5999 /* XXX: add locking support */
6000 static abi_long write_ldt(CPUX86State *env,
6001                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6002 {
6003     struct target_modify_ldt_ldt_s ldt_info;
6004     struct target_modify_ldt_ldt_s *target_ldt_info;
6005     int seg_32bit, contents, read_exec_only, limit_in_pages;
6006     int seg_not_present, useable, lm;
6007     uint32_t *lp, entry_1, entry_2;
6008 
6009     if (bytecount != sizeof(ldt_info))
6010         return -TARGET_EINVAL;
6011     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6012         return -TARGET_EFAULT;
6013     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6014     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6015     ldt_info.limit = tswap32(target_ldt_info->limit);
6016     ldt_info.flags = tswap32(target_ldt_info->flags);
6017     unlock_user_struct(target_ldt_info, ptr, 0);
6018 
6019     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6020         return -TARGET_EINVAL;
6021     seg_32bit = ldt_info.flags & 1;
6022     contents = (ldt_info.flags >> 1) & 3;
6023     read_exec_only = (ldt_info.flags >> 3) & 1;
6024     limit_in_pages = (ldt_info.flags >> 4) & 1;
6025     seg_not_present = (ldt_info.flags >> 5) & 1;
6026     useable = (ldt_info.flags >> 6) & 1;
6027 #ifdef TARGET_ABI32
6028     lm = 0;
6029 #else
6030     lm = (ldt_info.flags >> 7) & 1;
6031 #endif
6032     if (contents == 3) {
6033         if (oldmode)
6034             return -TARGET_EINVAL;
6035         if (seg_not_present == 0)
6036             return -TARGET_EINVAL;
6037     }
6038     /* allocate the LDT */
6039     if (!ldt_table) {
6040         env->ldt.base = target_mmap(0,
6041                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6042                                     PROT_READ|PROT_WRITE,
6043                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6044         if (env->ldt.base == -1)
6045             return -TARGET_ENOMEM;
6046         memset(g2h_untagged(env->ldt.base), 0,
6047                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6048         env->ldt.limit = 0xffff;
6049         ldt_table = g2h_untagged(env->ldt.base);
6050     }
6051 
6052     /* NOTE: same code as Linux kernel */
6053     /* Allow LDTs to be cleared by the user. */
6054     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6055         if (oldmode ||
6056             (contents == 0		&&
6057              read_exec_only == 1	&&
6058              seg_32bit == 0		&&
6059              limit_in_pages == 0	&&
6060              seg_not_present == 1	&&
6061              useable == 0 )) {
6062             entry_1 = 0;
6063             entry_2 = 0;
6064             goto install;
6065         }
6066     }
6067 
6068     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6069         (ldt_info.limit & 0x0ffff);
6070     entry_2 = (ldt_info.base_addr & 0xff000000) |
6071         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6072         (ldt_info.limit & 0xf0000) |
6073         ((read_exec_only ^ 1) << 9) |
6074         (contents << 10) |
6075         ((seg_not_present ^ 1) << 15) |
6076         (seg_32bit << 22) |
6077         (limit_in_pages << 23) |
6078         (lm << 21) |
6079         0x7000;
6080     if (!oldmode)
6081         entry_2 |= (useable << 20);
6082 
6083     /* Install the new entry ...  */
6084 install:
6085     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6086     lp[0] = tswap32(entry_1);
6087     lp[1] = tswap32(entry_2);
6088     return 0;
6089 }
6090 
6091 /* specific and weird i386 syscalls */
6092 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6093                               unsigned long bytecount)
6094 {
6095     abi_long ret;
6096 
6097     switch (func) {
6098     case 0:
6099         ret = read_ldt(ptr, bytecount);
6100         break;
6101     case 1:
6102         ret = write_ldt(env, ptr, bytecount, 1);
6103         break;
6104     case 0x11:
6105         ret = write_ldt(env, ptr, bytecount, 0);
6106         break;
6107     default:
6108         ret = -TARGET_ENOSYS;
6109         break;
6110     }
6111     return ret;
6112 }
6113 
6114 #if defined(TARGET_ABI32)
6115 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6116 {
6117     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6118     struct target_modify_ldt_ldt_s ldt_info;
6119     struct target_modify_ldt_ldt_s *target_ldt_info;
6120     int seg_32bit, contents, read_exec_only, limit_in_pages;
6121     int seg_not_present, useable, lm;
6122     uint32_t *lp, entry_1, entry_2;
6123     int i;
6124 
6125     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6126     if (!target_ldt_info)
6127         return -TARGET_EFAULT;
6128     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6129     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6130     ldt_info.limit = tswap32(target_ldt_info->limit);
6131     ldt_info.flags = tswap32(target_ldt_info->flags);
6132     if (ldt_info.entry_number == -1) {
6133         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6134             if (gdt_table[i] == 0) {
6135                 ldt_info.entry_number = i;
6136                 target_ldt_info->entry_number = tswap32(i);
6137                 break;
6138             }
6139         }
6140     }
6141     unlock_user_struct(target_ldt_info, ptr, 1);
6142 
6143     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6144         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6145            return -TARGET_EINVAL;
6146     seg_32bit = ldt_info.flags & 1;
6147     contents = (ldt_info.flags >> 1) & 3;
6148     read_exec_only = (ldt_info.flags >> 3) & 1;
6149     limit_in_pages = (ldt_info.flags >> 4) & 1;
6150     seg_not_present = (ldt_info.flags >> 5) & 1;
6151     useable = (ldt_info.flags >> 6) & 1;
6152 #ifdef TARGET_ABI32
6153     lm = 0;
6154 #else
6155     lm = (ldt_info.flags >> 7) & 1;
6156 #endif
6157 
6158     if (contents == 3) {
6159         if (seg_not_present == 0)
6160             return -TARGET_EINVAL;
6161     }
6162 
6163     /* NOTE: same code as Linux kernel */
6164     /* Allow LDTs to be cleared by the user. */
6165     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6166         if ((contents == 0             &&
6167              read_exec_only == 1       &&
6168              seg_32bit == 0            &&
6169              limit_in_pages == 0       &&
6170              seg_not_present == 1      &&
6171              useable == 0 )) {
6172             entry_1 = 0;
6173             entry_2 = 0;
6174             goto install;
6175         }
6176     }
6177 
6178     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6179         (ldt_info.limit & 0x0ffff);
6180     entry_2 = (ldt_info.base_addr & 0xff000000) |
6181         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6182         (ldt_info.limit & 0xf0000) |
6183         ((read_exec_only ^ 1) << 9) |
6184         (contents << 10) |
6185         ((seg_not_present ^ 1) << 15) |
6186         (seg_32bit << 22) |
6187         (limit_in_pages << 23) |
6188         (useable << 20) |
6189         (lm << 21) |
6190         0x7000;
6191 
6192     /* Install the new entry ...  */
6193 install:
6194     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6195     lp[0] = tswap32(entry_1);
6196     lp[1] = tswap32(entry_2);
6197     return 0;
6198 }
6199 
6200 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6201 {
6202     struct target_modify_ldt_ldt_s *target_ldt_info;
6203     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6204     uint32_t base_addr, limit, flags;
6205     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6206     int seg_not_present, useable, lm;
6207     uint32_t *lp, entry_1, entry_2;
6208 
6209     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6210     if (!target_ldt_info)
6211         return -TARGET_EFAULT;
6212     idx = tswap32(target_ldt_info->entry_number);
6213     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6214         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6215         unlock_user_struct(target_ldt_info, ptr, 1);
6216         return -TARGET_EINVAL;
6217     }
6218     lp = (uint32_t *)(gdt_table + idx);
6219     entry_1 = tswap32(lp[0]);
6220     entry_2 = tswap32(lp[1]);
6221 
6222     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6223     contents = (entry_2 >> 10) & 3;
6224     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6225     seg_32bit = (entry_2 >> 22) & 1;
6226     limit_in_pages = (entry_2 >> 23) & 1;
6227     useable = (entry_2 >> 20) & 1;
6228 #ifdef TARGET_ABI32
6229     lm = 0;
6230 #else
6231     lm = (entry_2 >> 21) & 1;
6232 #endif
6233     flags = (seg_32bit << 0) | (contents << 1) |
6234         (read_exec_only << 3) | (limit_in_pages << 4) |
6235         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6236     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6237     base_addr = (entry_1 >> 16) |
6238         (entry_2 & 0xff000000) |
6239         ((entry_2 & 0xff) << 16);
6240     target_ldt_info->base_addr = tswapal(base_addr);
6241     target_ldt_info->limit = tswap32(limit);
6242     target_ldt_info->flags = tswap32(flags);
6243     unlock_user_struct(target_ldt_info, ptr, 1);
6244     return 0;
6245 }
6246 
6247 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6248 {
6249     return -TARGET_ENOSYS;
6250 }
6251 #else
6252 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6253 {
6254     abi_long ret = 0;
6255     abi_ulong val;
6256     int idx;
6257 
6258     switch(code) {
6259     case TARGET_ARCH_SET_GS:
6260     case TARGET_ARCH_SET_FS:
6261         if (code == TARGET_ARCH_SET_GS)
6262             idx = R_GS;
6263         else
6264             idx = R_FS;
6265         cpu_x86_load_seg(env, idx, 0);
6266         env->segs[idx].base = addr;
6267         break;
6268     case TARGET_ARCH_GET_GS:
6269     case TARGET_ARCH_GET_FS:
6270         if (code == TARGET_ARCH_GET_GS)
6271             idx = R_GS;
6272         else
6273             idx = R_FS;
6274         val = env->segs[idx].base;
6275         if (put_user(val, addr, abi_ulong))
6276             ret = -TARGET_EFAULT;
6277         break;
6278     default:
6279         ret = -TARGET_EINVAL;
6280         break;
6281     }
6282     return ret;
6283 }
6284 #endif /* defined(TARGET_ABI32 */
6285 
6286 #endif /* defined(TARGET_I386) */
6287 
6288 #define NEW_STACK_SIZE 0x40000
6289 
6290 
6291 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6292 typedef struct {
6293     CPUArchState *env;
6294     pthread_mutex_t mutex;
6295     pthread_cond_t cond;
6296     pthread_t thread;
6297     uint32_t tid;
6298     abi_ulong child_tidptr;
6299     abi_ulong parent_tidptr;
6300     sigset_t sigmask;
6301 } new_thread_info;
6302 
6303 static void *clone_func(void *arg)
6304 {
6305     new_thread_info *info = arg;
6306     CPUArchState *env;
6307     CPUState *cpu;
6308     TaskState *ts;
6309 
6310     rcu_register_thread();
6311     tcg_register_thread();
6312     env = info->env;
6313     cpu = env_cpu(env);
6314     thread_cpu = cpu;
6315     ts = (TaskState *)cpu->opaque;
6316     info->tid = sys_gettid();
6317     task_settid(ts);
6318     if (info->child_tidptr)
6319         put_user_u32(info->tid, info->child_tidptr);
6320     if (info->parent_tidptr)
6321         put_user_u32(info->tid, info->parent_tidptr);
6322     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6323     /* Enable signals.  */
6324     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6325     /* Signal to the parent that we're ready.  */
6326     pthread_mutex_lock(&info->mutex);
6327     pthread_cond_broadcast(&info->cond);
6328     pthread_mutex_unlock(&info->mutex);
6329     /* Wait until the parent has finished initializing the tls state.  */
6330     pthread_mutex_lock(&clone_lock);
6331     pthread_mutex_unlock(&clone_lock);
6332     cpu_loop(env);
6333     /* never exits */
6334     return NULL;
6335 }
6336 
6337 /* do_fork() Must return host values and target errnos (unlike most
6338    do_*() functions). */
6339 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6340                    abi_ulong parent_tidptr, target_ulong newtls,
6341                    abi_ulong child_tidptr)
6342 {
6343     CPUState *cpu = env_cpu(env);
6344     int ret;
6345     TaskState *ts;
6346     CPUState *new_cpu;
6347     CPUArchState *new_env;
6348     sigset_t sigmask;
6349 
6350     flags &= ~CLONE_IGNORED_FLAGS;
6351 
6352     /* Emulate vfork() with fork() */
6353     if (flags & CLONE_VFORK)
6354         flags &= ~(CLONE_VFORK | CLONE_VM);
6355 
6356     if (flags & CLONE_VM) {
6357         TaskState *parent_ts = (TaskState *)cpu->opaque;
6358         new_thread_info info;
6359         pthread_attr_t attr;
6360 
6361         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6362             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6363             return -TARGET_EINVAL;
6364         }
6365 
6366         ts = g_new0(TaskState, 1);
6367         init_task_state(ts);
6368 
6369         /* Grab a mutex so that thread setup appears atomic.  */
6370         pthread_mutex_lock(&clone_lock);
6371 
6372         /*
6373          * If this is our first additional thread, we need to ensure we
6374          * generate code for parallel execution and flush old translations.
6375          * Do this now so that the copy gets CF_PARALLEL too.
6376          */
6377         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6378             cpu->tcg_cflags |= CF_PARALLEL;
6379             tb_flush(cpu);
6380         }
6381 
6382         /* we create a new CPU instance. */
6383         new_env = cpu_copy(env);
6384         /* Init regs that differ from the parent.  */
6385         cpu_clone_regs_child(new_env, newsp, flags);
6386         cpu_clone_regs_parent(env, flags);
6387         new_cpu = env_cpu(new_env);
6388         new_cpu->opaque = ts;
6389         ts->bprm = parent_ts->bprm;
6390         ts->info = parent_ts->info;
6391         ts->signal_mask = parent_ts->signal_mask;
6392 
6393         if (flags & CLONE_CHILD_CLEARTID) {
6394             ts->child_tidptr = child_tidptr;
6395         }
6396 
6397         if (flags & CLONE_SETTLS) {
6398             cpu_set_tls (new_env, newtls);
6399         }
6400 
6401         memset(&info, 0, sizeof(info));
6402         pthread_mutex_init(&info.mutex, NULL);
6403         pthread_mutex_lock(&info.mutex);
6404         pthread_cond_init(&info.cond, NULL);
6405         info.env = new_env;
6406         if (flags & CLONE_CHILD_SETTID) {
6407             info.child_tidptr = child_tidptr;
6408         }
6409         if (flags & CLONE_PARENT_SETTID) {
6410             info.parent_tidptr = parent_tidptr;
6411         }
6412 
6413         ret = pthread_attr_init(&attr);
6414         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6415         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6416         /* It is not safe to deliver signals until the child has finished
6417            initializing, so temporarily block all signals.  */
6418         sigfillset(&sigmask);
6419         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6420         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6421 
6422         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6423         /* TODO: Free new CPU state if thread creation failed.  */
6424 
6425         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6426         pthread_attr_destroy(&attr);
6427         if (ret == 0) {
6428             /* Wait for the child to initialize.  */
6429             pthread_cond_wait(&info.cond, &info.mutex);
6430             ret = info.tid;
6431         } else {
6432             ret = -1;
6433         }
6434         pthread_mutex_unlock(&info.mutex);
6435         pthread_cond_destroy(&info.cond);
6436         pthread_mutex_destroy(&info.mutex);
6437         pthread_mutex_unlock(&clone_lock);
6438     } else {
6439         /* if no CLONE_VM, we consider it is a fork */
6440         if (flags & CLONE_INVALID_FORK_FLAGS) {
6441             return -TARGET_EINVAL;
6442         }
6443 
6444         /* We can't support custom termination signals */
6445         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6446             return -TARGET_EINVAL;
6447         }
6448 
6449         if (block_signals()) {
6450             return -TARGET_ERESTARTSYS;
6451         }
6452 
6453         fork_start();
6454         ret = fork();
6455         if (ret == 0) {
6456             /* Child Process.  */
6457             cpu_clone_regs_child(env, newsp, flags);
6458             fork_end(1);
6459             /* There is a race condition here.  The parent process could
6460                theoretically read the TID in the child process before the child
6461                tid is set.  This would require using either ptrace
6462                (not implemented) or having *_tidptr to point at a shared memory
6463                mapping.  We can't repeat the spinlock hack used above because
6464                the child process gets its own copy of the lock.  */
6465             if (flags & CLONE_CHILD_SETTID)
6466                 put_user_u32(sys_gettid(), child_tidptr);
6467             if (flags & CLONE_PARENT_SETTID)
6468                 put_user_u32(sys_gettid(), parent_tidptr);
6469             ts = (TaskState *)cpu->opaque;
6470             if (flags & CLONE_SETTLS)
6471                 cpu_set_tls (env, newtls);
6472             if (flags & CLONE_CHILD_CLEARTID)
6473                 ts->child_tidptr = child_tidptr;
6474         } else {
6475             cpu_clone_regs_parent(env, flags);
6476             fork_end(0);
6477         }
6478     }
6479     return ret;
6480 }
6481 
6482 /* warning : doesn't handle linux specific flags... */
6483 static int target_to_host_fcntl_cmd(int cmd)
6484 {
6485     int ret;
6486 
6487     switch(cmd) {
6488     case TARGET_F_DUPFD:
6489     case TARGET_F_GETFD:
6490     case TARGET_F_SETFD:
6491     case TARGET_F_GETFL:
6492     case TARGET_F_SETFL:
6493     case TARGET_F_OFD_GETLK:
6494     case TARGET_F_OFD_SETLK:
6495     case TARGET_F_OFD_SETLKW:
6496         ret = cmd;
6497         break;
6498     case TARGET_F_GETLK:
6499         ret = F_GETLK64;
6500         break;
6501     case TARGET_F_SETLK:
6502         ret = F_SETLK64;
6503         break;
6504     case TARGET_F_SETLKW:
6505         ret = F_SETLKW64;
6506         break;
6507     case TARGET_F_GETOWN:
6508         ret = F_GETOWN;
6509         break;
6510     case TARGET_F_SETOWN:
6511         ret = F_SETOWN;
6512         break;
6513     case TARGET_F_GETSIG:
6514         ret = F_GETSIG;
6515         break;
6516     case TARGET_F_SETSIG:
6517         ret = F_SETSIG;
6518         break;
6519 #if TARGET_ABI_BITS == 32
6520     case TARGET_F_GETLK64:
6521         ret = F_GETLK64;
6522         break;
6523     case TARGET_F_SETLK64:
6524         ret = F_SETLK64;
6525         break;
6526     case TARGET_F_SETLKW64:
6527         ret = F_SETLKW64;
6528         break;
6529 #endif
6530     case TARGET_F_SETLEASE:
6531         ret = F_SETLEASE;
6532         break;
6533     case TARGET_F_GETLEASE:
6534         ret = F_GETLEASE;
6535         break;
6536 #ifdef F_DUPFD_CLOEXEC
6537     case TARGET_F_DUPFD_CLOEXEC:
6538         ret = F_DUPFD_CLOEXEC;
6539         break;
6540 #endif
6541     case TARGET_F_NOTIFY:
6542         ret = F_NOTIFY;
6543         break;
6544 #ifdef F_GETOWN_EX
6545     case TARGET_F_GETOWN_EX:
6546         ret = F_GETOWN_EX;
6547         break;
6548 #endif
6549 #ifdef F_SETOWN_EX
6550     case TARGET_F_SETOWN_EX:
6551         ret = F_SETOWN_EX;
6552         break;
6553 #endif
6554 #ifdef F_SETPIPE_SZ
6555     case TARGET_F_SETPIPE_SZ:
6556         ret = F_SETPIPE_SZ;
6557         break;
6558     case TARGET_F_GETPIPE_SZ:
6559         ret = F_GETPIPE_SZ;
6560         break;
6561 #endif
6562 #ifdef F_ADD_SEALS
6563     case TARGET_F_ADD_SEALS:
6564         ret = F_ADD_SEALS;
6565         break;
6566     case TARGET_F_GET_SEALS:
6567         ret = F_GET_SEALS;
6568         break;
6569 #endif
6570     default:
6571         ret = -TARGET_EINVAL;
6572         break;
6573     }
6574 
6575 #if defined(__powerpc64__)
6576     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6577      * is not supported by kernel. The glibc fcntl call actually adjusts
6578      * them to 5, 6 and 7 before making the syscall(). Since we make the
6579      * syscall directly, adjust to what is supported by the kernel.
6580      */
6581     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6582         ret -= F_GETLK64 - 5;
6583     }
6584 #endif
6585 
6586     return ret;
6587 }
6588 
6589 #define FLOCK_TRANSTBL \
6590     switch (type) { \
6591     TRANSTBL_CONVERT(F_RDLCK); \
6592     TRANSTBL_CONVERT(F_WRLCK); \
6593     TRANSTBL_CONVERT(F_UNLCK); \
6594     }
6595 
6596 static int target_to_host_flock(int type)
6597 {
6598 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6599     FLOCK_TRANSTBL
6600 #undef  TRANSTBL_CONVERT
6601     return -TARGET_EINVAL;
6602 }
6603 
6604 static int host_to_target_flock(int type)
6605 {
6606 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6607     FLOCK_TRANSTBL
6608 #undef  TRANSTBL_CONVERT
6609     /* if we don't know how to convert the value coming
6610      * from the host we copy to the target field as-is
6611      */
6612     return type;
6613 }
6614 
6615 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6616                                             abi_ulong target_flock_addr)
6617 {
6618     struct target_flock *target_fl;
6619     int l_type;
6620 
6621     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6622         return -TARGET_EFAULT;
6623     }
6624 
6625     __get_user(l_type, &target_fl->l_type);
6626     l_type = target_to_host_flock(l_type);
6627     if (l_type < 0) {
6628         return l_type;
6629     }
6630     fl->l_type = l_type;
6631     __get_user(fl->l_whence, &target_fl->l_whence);
6632     __get_user(fl->l_start, &target_fl->l_start);
6633     __get_user(fl->l_len, &target_fl->l_len);
6634     __get_user(fl->l_pid, &target_fl->l_pid);
6635     unlock_user_struct(target_fl, target_flock_addr, 0);
6636     return 0;
6637 }
6638 
6639 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6640                                           const struct flock64 *fl)
6641 {
6642     struct target_flock *target_fl;
6643     short l_type;
6644 
6645     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6646         return -TARGET_EFAULT;
6647     }
6648 
6649     l_type = host_to_target_flock(fl->l_type);
6650     __put_user(l_type, &target_fl->l_type);
6651     __put_user(fl->l_whence, &target_fl->l_whence);
6652     __put_user(fl->l_start, &target_fl->l_start);
6653     __put_user(fl->l_len, &target_fl->l_len);
6654     __put_user(fl->l_pid, &target_fl->l_pid);
6655     unlock_user_struct(target_fl, target_flock_addr, 1);
6656     return 0;
6657 }
6658 
6659 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6660 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6661 
6662 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6663 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6664                                                    abi_ulong target_flock_addr)
6665 {
6666     struct target_oabi_flock64 *target_fl;
6667     int l_type;
6668 
6669     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6670         return -TARGET_EFAULT;
6671     }
6672 
6673     __get_user(l_type, &target_fl->l_type);
6674     l_type = target_to_host_flock(l_type);
6675     if (l_type < 0) {
6676         return l_type;
6677     }
6678     fl->l_type = l_type;
6679     __get_user(fl->l_whence, &target_fl->l_whence);
6680     __get_user(fl->l_start, &target_fl->l_start);
6681     __get_user(fl->l_len, &target_fl->l_len);
6682     __get_user(fl->l_pid, &target_fl->l_pid);
6683     unlock_user_struct(target_fl, target_flock_addr, 0);
6684     return 0;
6685 }
6686 
6687 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6688                                                  const struct flock64 *fl)
6689 {
6690     struct target_oabi_flock64 *target_fl;
6691     short l_type;
6692 
6693     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6694         return -TARGET_EFAULT;
6695     }
6696 
6697     l_type = host_to_target_flock(fl->l_type);
6698     __put_user(l_type, &target_fl->l_type);
6699     __put_user(fl->l_whence, &target_fl->l_whence);
6700     __put_user(fl->l_start, &target_fl->l_start);
6701     __put_user(fl->l_len, &target_fl->l_len);
6702     __put_user(fl->l_pid, &target_fl->l_pid);
6703     unlock_user_struct(target_fl, target_flock_addr, 1);
6704     return 0;
6705 }
6706 #endif
6707 
6708 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6709                                               abi_ulong target_flock_addr)
6710 {
6711     struct target_flock64 *target_fl;
6712     int l_type;
6713 
6714     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6715         return -TARGET_EFAULT;
6716     }
6717 
6718     __get_user(l_type, &target_fl->l_type);
6719     l_type = target_to_host_flock(l_type);
6720     if (l_type < 0) {
6721         return l_type;
6722     }
6723     fl->l_type = l_type;
6724     __get_user(fl->l_whence, &target_fl->l_whence);
6725     __get_user(fl->l_start, &target_fl->l_start);
6726     __get_user(fl->l_len, &target_fl->l_len);
6727     __get_user(fl->l_pid, &target_fl->l_pid);
6728     unlock_user_struct(target_fl, target_flock_addr, 0);
6729     return 0;
6730 }
6731 
6732 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6733                                             const struct flock64 *fl)
6734 {
6735     struct target_flock64 *target_fl;
6736     short l_type;
6737 
6738     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6739         return -TARGET_EFAULT;
6740     }
6741 
6742     l_type = host_to_target_flock(fl->l_type);
6743     __put_user(l_type, &target_fl->l_type);
6744     __put_user(fl->l_whence, &target_fl->l_whence);
6745     __put_user(fl->l_start, &target_fl->l_start);
6746     __put_user(fl->l_len, &target_fl->l_len);
6747     __put_user(fl->l_pid, &target_fl->l_pid);
6748     unlock_user_struct(target_fl, target_flock_addr, 1);
6749     return 0;
6750 }
6751 
6752 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6753 {
6754     struct flock64 fl64;
6755 #ifdef F_GETOWN_EX
6756     struct f_owner_ex fox;
6757     struct target_f_owner_ex *target_fox;
6758 #endif
6759     abi_long ret;
6760     int host_cmd = target_to_host_fcntl_cmd(cmd);
6761 
6762     if (host_cmd == -TARGET_EINVAL)
6763 	    return host_cmd;
6764 
6765     switch(cmd) {
6766     case TARGET_F_GETLK:
6767         ret = copy_from_user_flock(&fl64, arg);
6768         if (ret) {
6769             return ret;
6770         }
6771         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6772         if (ret == 0) {
6773             ret = copy_to_user_flock(arg, &fl64);
6774         }
6775         break;
6776 
6777     case TARGET_F_SETLK:
6778     case TARGET_F_SETLKW:
6779         ret = copy_from_user_flock(&fl64, arg);
6780         if (ret) {
6781             return ret;
6782         }
6783         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6784         break;
6785 
6786     case TARGET_F_GETLK64:
6787     case TARGET_F_OFD_GETLK:
6788         ret = copy_from_user_flock64(&fl64, arg);
6789         if (ret) {
6790             return ret;
6791         }
6792         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6793         if (ret == 0) {
6794             ret = copy_to_user_flock64(arg, &fl64);
6795         }
6796         break;
6797     case TARGET_F_SETLK64:
6798     case TARGET_F_SETLKW64:
6799     case TARGET_F_OFD_SETLK:
6800     case TARGET_F_OFD_SETLKW:
6801         ret = copy_from_user_flock64(&fl64, arg);
6802         if (ret) {
6803             return ret;
6804         }
6805         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
6806         break;
6807 
6808     case TARGET_F_GETFL:
6809         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6810         if (ret >= 0) {
6811             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
6812         }
6813         break;
6814 
6815     case TARGET_F_SETFL:
6816         ret = get_errno(safe_fcntl(fd, host_cmd,
6817                                    target_to_host_bitmask(arg,
6818                                                           fcntl_flags_tbl)));
6819         break;
6820 
6821 #ifdef F_GETOWN_EX
6822     case TARGET_F_GETOWN_EX:
6823         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6824         if (ret >= 0) {
6825             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
6826                 return -TARGET_EFAULT;
6827             target_fox->type = tswap32(fox.type);
6828             target_fox->pid = tswap32(fox.pid);
6829             unlock_user_struct(target_fox, arg, 1);
6830         }
6831         break;
6832 #endif
6833 
6834 #ifdef F_SETOWN_EX
6835     case TARGET_F_SETOWN_EX:
6836         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
6837             return -TARGET_EFAULT;
6838         fox.type = tswap32(target_fox->type);
6839         fox.pid = tswap32(target_fox->pid);
6840         unlock_user_struct(target_fox, arg, 0);
6841         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
6842         break;
6843 #endif
6844 
6845     case TARGET_F_SETSIG:
6846         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
6847         break;
6848 
6849     case TARGET_F_GETSIG:
6850         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
6851         break;
6852 
6853     case TARGET_F_SETOWN:
6854     case TARGET_F_GETOWN:
6855     case TARGET_F_SETLEASE:
6856     case TARGET_F_GETLEASE:
6857     case TARGET_F_SETPIPE_SZ:
6858     case TARGET_F_GETPIPE_SZ:
6859     case TARGET_F_ADD_SEALS:
6860     case TARGET_F_GET_SEALS:
6861         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
6862         break;
6863 
6864     default:
6865         ret = get_errno(safe_fcntl(fd, cmd, arg));
6866         break;
6867     }
6868     return ret;
6869 }
6870 
6871 #ifdef USE_UID16
6872 
6873 static inline int high2lowuid(int uid)
6874 {
6875     if (uid > 65535)
6876         return 65534;
6877     else
6878         return uid;
6879 }
6880 
6881 static inline int high2lowgid(int gid)
6882 {
6883     if (gid > 65535)
6884         return 65534;
6885     else
6886         return gid;
6887 }
6888 
6889 static inline int low2highuid(int uid)
6890 {
6891     if ((int16_t)uid == -1)
6892         return -1;
6893     else
6894         return uid;
6895 }
6896 
6897 static inline int low2highgid(int gid)
6898 {
6899     if ((int16_t)gid == -1)
6900         return -1;
6901     else
6902         return gid;
6903 }
6904 static inline int tswapid(int id)
6905 {
6906     return tswap16(id);
6907 }
6908 
6909 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
6910 
6911 #else /* !USE_UID16 */
6912 static inline int high2lowuid(int uid)
6913 {
6914     return uid;
6915 }
6916 static inline int high2lowgid(int gid)
6917 {
6918     return gid;
6919 }
6920 static inline int low2highuid(int uid)
6921 {
6922     return uid;
6923 }
6924 static inline int low2highgid(int gid)
6925 {
6926     return gid;
6927 }
6928 static inline int tswapid(int id)
6929 {
6930     return tswap32(id);
6931 }
6932 
6933 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
6934 
6935 #endif /* USE_UID16 */
6936 
6937 /* We must do direct syscalls for setting UID/GID, because we want to
6938  * implement the Linux system call semantics of "change only for this thread",
6939  * not the libc/POSIX semantics of "change for all threads in process".
6940  * (See http://ewontfix.com/17/ for more details.)
6941  * We use the 32-bit version of the syscalls if present; if it is not
6942  * then either the host architecture supports 32-bit UIDs natively with
6943  * the standard syscall, or the 16-bit UID is the best we can do.
6944  */
6945 #ifdef __NR_setuid32
6946 #define __NR_sys_setuid __NR_setuid32
6947 #else
6948 #define __NR_sys_setuid __NR_setuid
6949 #endif
6950 #ifdef __NR_setgid32
6951 #define __NR_sys_setgid __NR_setgid32
6952 #else
6953 #define __NR_sys_setgid __NR_setgid
6954 #endif
6955 #ifdef __NR_setresuid32
6956 #define __NR_sys_setresuid __NR_setresuid32
6957 #else
6958 #define __NR_sys_setresuid __NR_setresuid
6959 #endif
6960 #ifdef __NR_setresgid32
6961 #define __NR_sys_setresgid __NR_setresgid32
6962 #else
6963 #define __NR_sys_setresgid __NR_setresgid
6964 #endif
6965 
6966 _syscall1(int, sys_setuid, uid_t, uid)
6967 _syscall1(int, sys_setgid, gid_t, gid)
6968 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
6969 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
6970 
6971 void syscall_init(void)
6972 {
6973     IOCTLEntry *ie;
6974     const argtype *arg_type;
6975     int size;
6976 
6977     thunk_init(STRUCT_MAX);
6978 
6979 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
6980 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
6981 #include "syscall_types.h"
6982 #undef STRUCT
6983 #undef STRUCT_SPECIAL
6984 
6985     /* we patch the ioctl size if necessary. We rely on the fact that
6986        no ioctl has all the bits at '1' in the size field */
6987     ie = ioctl_entries;
6988     while (ie->target_cmd != 0) {
6989         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
6990             TARGET_IOC_SIZEMASK) {
6991             arg_type = ie->arg_type;
6992             if (arg_type[0] != TYPE_PTR) {
6993                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
6994                         ie->target_cmd);
6995                 exit(1);
6996             }
6997             arg_type++;
6998             size = thunk_type_size(arg_type, 0);
6999             ie->target_cmd = (ie->target_cmd &
7000                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7001                 (size << TARGET_IOC_SIZESHIFT);
7002         }
7003 
7004         /* automatic consistency check if same arch */
7005 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7006     (defined(__x86_64__) && defined(TARGET_X86_64))
7007         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7008             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7009                     ie->name, ie->target_cmd, ie->host_cmd);
7010         }
7011 #endif
7012         ie++;
7013     }
7014 }
7015 
7016 #ifdef TARGET_NR_truncate64
7017 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7018                                          abi_long arg2,
7019                                          abi_long arg3,
7020                                          abi_long arg4)
7021 {
7022     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7023         arg2 = arg3;
7024         arg3 = arg4;
7025     }
7026     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7027 }
7028 #endif
7029 
7030 #ifdef TARGET_NR_ftruncate64
7031 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7032                                           abi_long arg2,
7033                                           abi_long arg3,
7034                                           abi_long arg4)
7035 {
7036     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7037         arg2 = arg3;
7038         arg3 = arg4;
7039     }
7040     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7041 }
7042 #endif
7043 
7044 #if defined(TARGET_NR_timer_settime) || \
7045     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7046 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7047                                                  abi_ulong target_addr)
7048 {
7049     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7050                                 offsetof(struct target_itimerspec,
7051                                          it_interval)) ||
7052         target_to_host_timespec(&host_its->it_value, target_addr +
7053                                 offsetof(struct target_itimerspec,
7054                                          it_value))) {
7055         return -TARGET_EFAULT;
7056     }
7057 
7058     return 0;
7059 }
7060 #endif
7061 
7062 #if defined(TARGET_NR_timer_settime64) || \
7063     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7064 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7065                                                    abi_ulong target_addr)
7066 {
7067     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7068                                   offsetof(struct target__kernel_itimerspec,
7069                                            it_interval)) ||
7070         target_to_host_timespec64(&host_its->it_value, target_addr +
7071                                   offsetof(struct target__kernel_itimerspec,
7072                                            it_value))) {
7073         return -TARGET_EFAULT;
7074     }
7075 
7076     return 0;
7077 }
7078 #endif
7079 
7080 #if ((defined(TARGET_NR_timerfd_gettime) || \
7081       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7082       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7083 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7084                                                  struct itimerspec *host_its)
7085 {
7086     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7087                                                        it_interval),
7088                                 &host_its->it_interval) ||
7089         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7090                                                        it_value),
7091                                 &host_its->it_value)) {
7092         return -TARGET_EFAULT;
7093     }
7094     return 0;
7095 }
7096 #endif
7097 
7098 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7099       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7100       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7101 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7102                                                    struct itimerspec *host_its)
7103 {
7104     if (host_to_target_timespec64(target_addr +
7105                                   offsetof(struct target__kernel_itimerspec,
7106                                            it_interval),
7107                                   &host_its->it_interval) ||
7108         host_to_target_timespec64(target_addr +
7109                                   offsetof(struct target__kernel_itimerspec,
7110                                            it_value),
7111                                   &host_its->it_value)) {
7112         return -TARGET_EFAULT;
7113     }
7114     return 0;
7115 }
7116 #endif
7117 
7118 #if defined(TARGET_NR_adjtimex) || \
7119     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7120 static inline abi_long target_to_host_timex(struct timex *host_tx,
7121                                             abi_long target_addr)
7122 {
7123     struct target_timex *target_tx;
7124 
7125     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7126         return -TARGET_EFAULT;
7127     }
7128 
7129     __get_user(host_tx->modes, &target_tx->modes);
7130     __get_user(host_tx->offset, &target_tx->offset);
7131     __get_user(host_tx->freq, &target_tx->freq);
7132     __get_user(host_tx->maxerror, &target_tx->maxerror);
7133     __get_user(host_tx->esterror, &target_tx->esterror);
7134     __get_user(host_tx->status, &target_tx->status);
7135     __get_user(host_tx->constant, &target_tx->constant);
7136     __get_user(host_tx->precision, &target_tx->precision);
7137     __get_user(host_tx->tolerance, &target_tx->tolerance);
7138     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7139     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7140     __get_user(host_tx->tick, &target_tx->tick);
7141     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7142     __get_user(host_tx->jitter, &target_tx->jitter);
7143     __get_user(host_tx->shift, &target_tx->shift);
7144     __get_user(host_tx->stabil, &target_tx->stabil);
7145     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7146     __get_user(host_tx->calcnt, &target_tx->calcnt);
7147     __get_user(host_tx->errcnt, &target_tx->errcnt);
7148     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7149     __get_user(host_tx->tai, &target_tx->tai);
7150 
7151     unlock_user_struct(target_tx, target_addr, 0);
7152     return 0;
7153 }
7154 
7155 static inline abi_long host_to_target_timex(abi_long target_addr,
7156                                             struct timex *host_tx)
7157 {
7158     struct target_timex *target_tx;
7159 
7160     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7161         return -TARGET_EFAULT;
7162     }
7163 
7164     __put_user(host_tx->modes, &target_tx->modes);
7165     __put_user(host_tx->offset, &target_tx->offset);
7166     __put_user(host_tx->freq, &target_tx->freq);
7167     __put_user(host_tx->maxerror, &target_tx->maxerror);
7168     __put_user(host_tx->esterror, &target_tx->esterror);
7169     __put_user(host_tx->status, &target_tx->status);
7170     __put_user(host_tx->constant, &target_tx->constant);
7171     __put_user(host_tx->precision, &target_tx->precision);
7172     __put_user(host_tx->tolerance, &target_tx->tolerance);
7173     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7174     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7175     __put_user(host_tx->tick, &target_tx->tick);
7176     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7177     __put_user(host_tx->jitter, &target_tx->jitter);
7178     __put_user(host_tx->shift, &target_tx->shift);
7179     __put_user(host_tx->stabil, &target_tx->stabil);
7180     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7181     __put_user(host_tx->calcnt, &target_tx->calcnt);
7182     __put_user(host_tx->errcnt, &target_tx->errcnt);
7183     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7184     __put_user(host_tx->tai, &target_tx->tai);
7185 
7186     unlock_user_struct(target_tx, target_addr, 1);
7187     return 0;
7188 }
7189 #endif
7190 
7191 
7192 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7193 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7194                                               abi_long target_addr)
7195 {
7196     struct target__kernel_timex *target_tx;
7197 
7198     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7199                                  offsetof(struct target__kernel_timex,
7200                                           time))) {
7201         return -TARGET_EFAULT;
7202     }
7203 
7204     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7205         return -TARGET_EFAULT;
7206     }
7207 
7208     __get_user(host_tx->modes, &target_tx->modes);
7209     __get_user(host_tx->offset, &target_tx->offset);
7210     __get_user(host_tx->freq, &target_tx->freq);
7211     __get_user(host_tx->maxerror, &target_tx->maxerror);
7212     __get_user(host_tx->esterror, &target_tx->esterror);
7213     __get_user(host_tx->status, &target_tx->status);
7214     __get_user(host_tx->constant, &target_tx->constant);
7215     __get_user(host_tx->precision, &target_tx->precision);
7216     __get_user(host_tx->tolerance, &target_tx->tolerance);
7217     __get_user(host_tx->tick, &target_tx->tick);
7218     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7219     __get_user(host_tx->jitter, &target_tx->jitter);
7220     __get_user(host_tx->shift, &target_tx->shift);
7221     __get_user(host_tx->stabil, &target_tx->stabil);
7222     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7223     __get_user(host_tx->calcnt, &target_tx->calcnt);
7224     __get_user(host_tx->errcnt, &target_tx->errcnt);
7225     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7226     __get_user(host_tx->tai, &target_tx->tai);
7227 
7228     unlock_user_struct(target_tx, target_addr, 0);
7229     return 0;
7230 }
7231 
7232 static inline abi_long host_to_target_timex64(abi_long target_addr,
7233                                               struct timex *host_tx)
7234 {
7235     struct target__kernel_timex *target_tx;
7236 
7237    if (copy_to_user_timeval64(target_addr +
7238                               offsetof(struct target__kernel_timex, time),
7239                               &host_tx->time)) {
7240         return -TARGET_EFAULT;
7241     }
7242 
7243     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7244         return -TARGET_EFAULT;
7245     }
7246 
7247     __put_user(host_tx->modes, &target_tx->modes);
7248     __put_user(host_tx->offset, &target_tx->offset);
7249     __put_user(host_tx->freq, &target_tx->freq);
7250     __put_user(host_tx->maxerror, &target_tx->maxerror);
7251     __put_user(host_tx->esterror, &target_tx->esterror);
7252     __put_user(host_tx->status, &target_tx->status);
7253     __put_user(host_tx->constant, &target_tx->constant);
7254     __put_user(host_tx->precision, &target_tx->precision);
7255     __put_user(host_tx->tolerance, &target_tx->tolerance);
7256     __put_user(host_tx->tick, &target_tx->tick);
7257     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7258     __put_user(host_tx->jitter, &target_tx->jitter);
7259     __put_user(host_tx->shift, &target_tx->shift);
7260     __put_user(host_tx->stabil, &target_tx->stabil);
7261     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7262     __put_user(host_tx->calcnt, &target_tx->calcnt);
7263     __put_user(host_tx->errcnt, &target_tx->errcnt);
7264     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7265     __put_user(host_tx->tai, &target_tx->tai);
7266 
7267     unlock_user_struct(target_tx, target_addr, 1);
7268     return 0;
7269 }
7270 #endif
7271 
7272 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7273 #define sigev_notify_thread_id _sigev_un._tid
7274 #endif
7275 
7276 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7277                                                abi_ulong target_addr)
7278 {
7279     struct target_sigevent *target_sevp;
7280 
7281     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7282         return -TARGET_EFAULT;
7283     }
7284 
7285     /* This union is awkward on 64 bit systems because it has a 32 bit
7286      * integer and a pointer in it; we follow the conversion approach
7287      * used for handling sigval types in signal.c so the guest should get
7288      * the correct value back even if we did a 64 bit byteswap and it's
7289      * using the 32 bit integer.
7290      */
7291     host_sevp->sigev_value.sival_ptr =
7292         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7293     host_sevp->sigev_signo =
7294         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7295     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7296     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7297 
7298     unlock_user_struct(target_sevp, target_addr, 1);
7299     return 0;
7300 }
7301 
7302 #if defined(TARGET_NR_mlockall)
7303 static inline int target_to_host_mlockall_arg(int arg)
7304 {
7305     int result = 0;
7306 
7307     if (arg & TARGET_MCL_CURRENT) {
7308         result |= MCL_CURRENT;
7309     }
7310     if (arg & TARGET_MCL_FUTURE) {
7311         result |= MCL_FUTURE;
7312     }
7313 #ifdef MCL_ONFAULT
7314     if (arg & TARGET_MCL_ONFAULT) {
7315         result |= MCL_ONFAULT;
7316     }
7317 #endif
7318 
7319     return result;
7320 }
7321 #endif
7322 
7323 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7324      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7325      defined(TARGET_NR_newfstatat))
7326 static inline abi_long host_to_target_stat64(void *cpu_env,
7327                                              abi_ulong target_addr,
7328                                              struct stat *host_st)
7329 {
7330 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7331     if (((CPUARMState *)cpu_env)->eabi) {
7332         struct target_eabi_stat64 *target_st;
7333 
7334         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7335             return -TARGET_EFAULT;
7336         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7337         __put_user(host_st->st_dev, &target_st->st_dev);
7338         __put_user(host_st->st_ino, &target_st->st_ino);
7339 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7340         __put_user(host_st->st_ino, &target_st->__st_ino);
7341 #endif
7342         __put_user(host_st->st_mode, &target_st->st_mode);
7343         __put_user(host_st->st_nlink, &target_st->st_nlink);
7344         __put_user(host_st->st_uid, &target_st->st_uid);
7345         __put_user(host_st->st_gid, &target_st->st_gid);
7346         __put_user(host_st->st_rdev, &target_st->st_rdev);
7347         __put_user(host_st->st_size, &target_st->st_size);
7348         __put_user(host_st->st_blksize, &target_st->st_blksize);
7349         __put_user(host_st->st_blocks, &target_st->st_blocks);
7350         __put_user(host_st->st_atime, &target_st->target_st_atime);
7351         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7352         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7353 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7354         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7355         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7356         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7357 #endif
7358         unlock_user_struct(target_st, target_addr, 1);
7359     } else
7360 #endif
7361     {
7362 #if defined(TARGET_HAS_STRUCT_STAT64)
7363         struct target_stat64 *target_st;
7364 #else
7365         struct target_stat *target_st;
7366 #endif
7367 
7368         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7369             return -TARGET_EFAULT;
7370         memset(target_st, 0, sizeof(*target_st));
7371         __put_user(host_st->st_dev, &target_st->st_dev);
7372         __put_user(host_st->st_ino, &target_st->st_ino);
7373 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7374         __put_user(host_st->st_ino, &target_st->__st_ino);
7375 #endif
7376         __put_user(host_st->st_mode, &target_st->st_mode);
7377         __put_user(host_st->st_nlink, &target_st->st_nlink);
7378         __put_user(host_st->st_uid, &target_st->st_uid);
7379         __put_user(host_st->st_gid, &target_st->st_gid);
7380         __put_user(host_st->st_rdev, &target_st->st_rdev);
7381         /* XXX: better use of kernel struct */
7382         __put_user(host_st->st_size, &target_st->st_size);
7383         __put_user(host_st->st_blksize, &target_st->st_blksize);
7384         __put_user(host_st->st_blocks, &target_st->st_blocks);
7385         __put_user(host_st->st_atime, &target_st->target_st_atime);
7386         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7387         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7388 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7389         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7390         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7391         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7392 #endif
7393         unlock_user_struct(target_st, target_addr, 1);
7394     }
7395 
7396     return 0;
7397 }
7398 #endif
7399 
7400 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7401 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7402                                             abi_ulong target_addr)
7403 {
7404     struct target_statx *target_stx;
7405 
7406     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7407         return -TARGET_EFAULT;
7408     }
7409     memset(target_stx, 0, sizeof(*target_stx));
7410 
7411     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7412     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7413     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7414     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7415     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7416     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7417     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7418     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7419     __put_user(host_stx->stx_size, &target_stx->stx_size);
7420     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7421     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7422     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7423     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7424     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7425     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7426     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7427     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7428     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7429     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7430     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7431     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7432     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7433     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7434 
7435     unlock_user_struct(target_stx, target_addr, 1);
7436 
7437     return 0;
7438 }
7439 #endif
7440 
7441 static int do_sys_futex(int *uaddr, int op, int val,
7442                          const struct timespec *timeout, int *uaddr2,
7443                          int val3)
7444 {
7445 #if HOST_LONG_BITS == 64
7446 #if defined(__NR_futex)
7447     /* always a 64-bit time_t, it doesn't define _time64 version  */
7448     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7449 
7450 #endif
7451 #else /* HOST_LONG_BITS == 64 */
7452 #if defined(__NR_futex_time64)
7453     if (sizeof(timeout->tv_sec) == 8) {
7454         /* _time64 function on 32bit arch */
7455         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7456     }
7457 #endif
7458 #if defined(__NR_futex)
7459     /* old function on 32bit arch */
7460     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7461 #endif
7462 #endif /* HOST_LONG_BITS == 64 */
7463     g_assert_not_reached();
7464 }
7465 
7466 static int do_safe_futex(int *uaddr, int op, int val,
7467                          const struct timespec *timeout, int *uaddr2,
7468                          int val3)
7469 {
7470 #if HOST_LONG_BITS == 64
7471 #if defined(__NR_futex)
7472     /* always a 64-bit time_t, it doesn't define _time64 version  */
7473     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7474 #endif
7475 #else /* HOST_LONG_BITS == 64 */
7476 #if defined(__NR_futex_time64)
7477     if (sizeof(timeout->tv_sec) == 8) {
7478         /* _time64 function on 32bit arch */
7479         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7480                                            val3));
7481     }
7482 #endif
7483 #if defined(__NR_futex)
7484     /* old function on 32bit arch */
7485     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7486 #endif
7487 #endif /* HOST_LONG_BITS == 64 */
7488     return -TARGET_ENOSYS;
7489 }
7490 
7491 /* ??? Using host futex calls even when target atomic operations
7492    are not really atomic probably breaks things.  However implementing
7493    futexes locally would make futexes shared between multiple processes
7494    tricky.  However they're probably useless because guest atomic
7495    operations won't work either.  */
7496 #if defined(TARGET_NR_futex)
7497 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7498                     target_ulong timeout, target_ulong uaddr2, int val3)
7499 {
7500     struct timespec ts, *pts;
7501     int base_op;
7502 
7503     /* ??? We assume FUTEX_* constants are the same on both host
7504        and target.  */
7505 #ifdef FUTEX_CMD_MASK
7506     base_op = op & FUTEX_CMD_MASK;
7507 #else
7508     base_op = op;
7509 #endif
7510     switch (base_op) {
7511     case FUTEX_WAIT:
7512     case FUTEX_WAIT_BITSET:
7513         if (timeout) {
7514             pts = &ts;
7515             target_to_host_timespec(pts, timeout);
7516         } else {
7517             pts = NULL;
7518         }
7519         return do_safe_futex(g2h(cpu, uaddr),
7520                              op, tswap32(val), pts, NULL, val3);
7521     case FUTEX_WAKE:
7522         return do_safe_futex(g2h(cpu, uaddr),
7523                              op, val, NULL, NULL, 0);
7524     case FUTEX_FD:
7525         return do_safe_futex(g2h(cpu, uaddr),
7526                              op, val, NULL, NULL, 0);
7527     case FUTEX_REQUEUE:
7528     case FUTEX_CMP_REQUEUE:
7529     case FUTEX_WAKE_OP:
7530         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7531            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7532            But the prototype takes a `struct timespec *'; insert casts
7533            to satisfy the compiler.  We do not need to tswap TIMEOUT
7534            since it's not compared to guest memory.  */
7535         pts = (struct timespec *)(uintptr_t) timeout;
7536         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7537                              (base_op == FUTEX_CMP_REQUEUE
7538                               ? tswap32(val3) : val3));
7539     default:
7540         return -TARGET_ENOSYS;
7541     }
7542 }
7543 #endif
7544 
7545 #if defined(TARGET_NR_futex_time64)
7546 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7547                            int val, target_ulong timeout,
7548                            target_ulong uaddr2, int val3)
7549 {
7550     struct timespec ts, *pts;
7551     int base_op;
7552 
7553     /* ??? We assume FUTEX_* constants are the same on both host
7554        and target.  */
7555 #ifdef FUTEX_CMD_MASK
7556     base_op = op & FUTEX_CMD_MASK;
7557 #else
7558     base_op = op;
7559 #endif
7560     switch (base_op) {
7561     case FUTEX_WAIT:
7562     case FUTEX_WAIT_BITSET:
7563         if (timeout) {
7564             pts = &ts;
7565             if (target_to_host_timespec64(pts, timeout)) {
7566                 return -TARGET_EFAULT;
7567             }
7568         } else {
7569             pts = NULL;
7570         }
7571         return do_safe_futex(g2h(cpu, uaddr), op,
7572                              tswap32(val), pts, NULL, val3);
7573     case FUTEX_WAKE:
7574         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7575     case FUTEX_FD:
7576         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7577     case FUTEX_REQUEUE:
7578     case FUTEX_CMP_REQUEUE:
7579     case FUTEX_WAKE_OP:
7580         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7581            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7582            But the prototype takes a `struct timespec *'; insert casts
7583            to satisfy the compiler.  We do not need to tswap TIMEOUT
7584            since it's not compared to guest memory.  */
7585         pts = (struct timespec *)(uintptr_t) timeout;
7586         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7587                              (base_op == FUTEX_CMP_REQUEUE
7588                               ? tswap32(val3) : val3));
7589     default:
7590         return -TARGET_ENOSYS;
7591     }
7592 }
7593 #endif
7594 
7595 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7596 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7597                                      abi_long handle, abi_long mount_id,
7598                                      abi_long flags)
7599 {
7600     struct file_handle *target_fh;
7601     struct file_handle *fh;
7602     int mid = 0;
7603     abi_long ret;
7604     char *name;
7605     unsigned int size, total_size;
7606 
7607     if (get_user_s32(size, handle)) {
7608         return -TARGET_EFAULT;
7609     }
7610 
7611     name = lock_user_string(pathname);
7612     if (!name) {
7613         return -TARGET_EFAULT;
7614     }
7615 
7616     total_size = sizeof(struct file_handle) + size;
7617     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7618     if (!target_fh) {
7619         unlock_user(name, pathname, 0);
7620         return -TARGET_EFAULT;
7621     }
7622 
7623     fh = g_malloc0(total_size);
7624     fh->handle_bytes = size;
7625 
7626     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7627     unlock_user(name, pathname, 0);
7628 
7629     /* man name_to_handle_at(2):
7630      * Other than the use of the handle_bytes field, the caller should treat
7631      * the file_handle structure as an opaque data type
7632      */
7633 
7634     memcpy(target_fh, fh, total_size);
7635     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7636     target_fh->handle_type = tswap32(fh->handle_type);
7637     g_free(fh);
7638     unlock_user(target_fh, handle, total_size);
7639 
7640     if (put_user_s32(mid, mount_id)) {
7641         return -TARGET_EFAULT;
7642     }
7643 
7644     return ret;
7645 
7646 }
7647 #endif
7648 
7649 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7650 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7651                                      abi_long flags)
7652 {
7653     struct file_handle *target_fh;
7654     struct file_handle *fh;
7655     unsigned int size, total_size;
7656     abi_long ret;
7657 
7658     if (get_user_s32(size, handle)) {
7659         return -TARGET_EFAULT;
7660     }
7661 
7662     total_size = sizeof(struct file_handle) + size;
7663     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7664     if (!target_fh) {
7665         return -TARGET_EFAULT;
7666     }
7667 
7668     fh = g_memdup(target_fh, total_size);
7669     fh->handle_bytes = size;
7670     fh->handle_type = tswap32(target_fh->handle_type);
7671 
7672     ret = get_errno(open_by_handle_at(mount_fd, fh,
7673                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7674 
7675     g_free(fh);
7676 
7677     unlock_user(target_fh, handle, total_size);
7678 
7679     return ret;
7680 }
7681 #endif
7682 
7683 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7684 
7685 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7686 {
7687     int host_flags;
7688     target_sigset_t *target_mask;
7689     sigset_t host_mask;
7690     abi_long ret;
7691 
7692     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7693         return -TARGET_EINVAL;
7694     }
7695     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7696         return -TARGET_EFAULT;
7697     }
7698 
7699     target_to_host_sigset(&host_mask, target_mask);
7700 
7701     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7702 
7703     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7704     if (ret >= 0) {
7705         fd_trans_register(ret, &target_signalfd_trans);
7706     }
7707 
7708     unlock_user_struct(target_mask, mask, 0);
7709 
7710     return ret;
7711 }
7712 #endif
7713 
7714 /* Map host to target signal numbers for the wait family of syscalls.
7715    Assume all other status bits are the same.  */
7716 int host_to_target_waitstatus(int status)
7717 {
7718     if (WIFSIGNALED(status)) {
7719         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7720     }
7721     if (WIFSTOPPED(status)) {
7722         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7723                | (status & 0xff);
7724     }
7725     return status;
7726 }
7727 
7728 static int open_self_cmdline(void *cpu_env, int fd)
7729 {
7730     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7731     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7732     int i;
7733 
7734     for (i = 0; i < bprm->argc; i++) {
7735         size_t len = strlen(bprm->argv[i]) + 1;
7736 
7737         if (write(fd, bprm->argv[i], len) != len) {
7738             return -1;
7739         }
7740     }
7741 
7742     return 0;
7743 }
7744 
7745 static int open_self_maps(void *cpu_env, int fd)
7746 {
7747     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7748     TaskState *ts = cpu->opaque;
7749     GSList *map_info = read_self_maps();
7750     GSList *s;
7751     int count;
7752 
7753     for (s = map_info; s; s = g_slist_next(s)) {
7754         MapInfo *e = (MapInfo *) s->data;
7755 
7756         if (h2g_valid(e->start)) {
7757             unsigned long min = e->start;
7758             unsigned long max = e->end;
7759             int flags = page_get_flags(h2g(min));
7760             const char *path;
7761 
7762             max = h2g_valid(max - 1) ?
7763                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
7764 
7765             if (page_check_range(h2g(min), max - min, flags) == -1) {
7766                 continue;
7767             }
7768 
7769             if (h2g(min) == ts->info->stack_limit) {
7770                 path = "[stack]";
7771             } else {
7772                 path = e->path;
7773             }
7774 
7775             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
7776                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
7777                             h2g(min), h2g(max - 1) + 1,
7778                             (flags & PAGE_READ) ? 'r' : '-',
7779                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
7780                             (flags & PAGE_EXEC) ? 'x' : '-',
7781                             e->is_priv ? 'p' : '-',
7782                             (uint64_t) e->offset, e->dev, e->inode);
7783             if (path) {
7784                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
7785             } else {
7786                 dprintf(fd, "\n");
7787             }
7788         }
7789     }
7790 
7791     free_self_maps(map_info);
7792 
7793 #ifdef TARGET_VSYSCALL_PAGE
7794     /*
7795      * We only support execution from the vsyscall page.
7796      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
7797      */
7798     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
7799                     " --xp 00000000 00:00 0",
7800                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
7801     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
7802 #endif
7803 
7804     return 0;
7805 }
7806 
7807 static int open_self_stat(void *cpu_env, int fd)
7808 {
7809     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7810     TaskState *ts = cpu->opaque;
7811     g_autoptr(GString) buf = g_string_new(NULL);
7812     int i;
7813 
7814     for (i = 0; i < 44; i++) {
7815         if (i == 0) {
7816             /* pid */
7817             g_string_printf(buf, FMT_pid " ", getpid());
7818         } else if (i == 1) {
7819             /* app name */
7820             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
7821             bin = bin ? bin + 1 : ts->bprm->argv[0];
7822             g_string_printf(buf, "(%.15s) ", bin);
7823         } else if (i == 3) {
7824             /* ppid */
7825             g_string_printf(buf, FMT_pid " ", getppid());
7826         } else if (i == 27) {
7827             /* stack bottom */
7828             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
7829         } else {
7830             /* for the rest, there is MasterCard */
7831             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
7832         }
7833 
7834         if (write(fd, buf->str, buf->len) != buf->len) {
7835             return -1;
7836         }
7837     }
7838 
7839     return 0;
7840 }
7841 
7842 static int open_self_auxv(void *cpu_env, int fd)
7843 {
7844     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7845     TaskState *ts = cpu->opaque;
7846     abi_ulong auxv = ts->info->saved_auxv;
7847     abi_ulong len = ts->info->auxv_len;
7848     char *ptr;
7849 
7850     /*
7851      * Auxiliary vector is stored in target process stack.
7852      * read in whole auxv vector and copy it to file
7853      */
7854     ptr = lock_user(VERIFY_READ, auxv, len, 0);
7855     if (ptr != NULL) {
7856         while (len > 0) {
7857             ssize_t r;
7858             r = write(fd, ptr, len);
7859             if (r <= 0) {
7860                 break;
7861             }
7862             len -= r;
7863             ptr += r;
7864         }
7865         lseek(fd, 0, SEEK_SET);
7866         unlock_user(ptr, auxv, len);
7867     }
7868 
7869     return 0;
7870 }
7871 
7872 static int is_proc_myself(const char *filename, const char *entry)
7873 {
7874     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
7875         filename += strlen("/proc/");
7876         if (!strncmp(filename, "self/", strlen("self/"))) {
7877             filename += strlen("self/");
7878         } else if (*filename >= '1' && *filename <= '9') {
7879             char myself[80];
7880             snprintf(myself, sizeof(myself), "%d/", getpid());
7881             if (!strncmp(filename, myself, strlen(myself))) {
7882                 filename += strlen(myself);
7883             } else {
7884                 return 0;
7885             }
7886         } else {
7887             return 0;
7888         }
7889         if (!strcmp(filename, entry)) {
7890             return 1;
7891         }
7892     }
7893     return 0;
7894 }
7895 
7896 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
7897     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
7898 static int is_proc(const char *filename, const char *entry)
7899 {
7900     return strcmp(filename, entry) == 0;
7901 }
7902 #endif
7903 
7904 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7905 static int open_net_route(void *cpu_env, int fd)
7906 {
7907     FILE *fp;
7908     char *line = NULL;
7909     size_t len = 0;
7910     ssize_t read;
7911 
7912     fp = fopen("/proc/net/route", "r");
7913     if (fp == NULL) {
7914         return -1;
7915     }
7916 
7917     /* read header */
7918 
7919     read = getline(&line, &len, fp);
7920     dprintf(fd, "%s", line);
7921 
7922     /* read routes */
7923 
7924     while ((read = getline(&line, &len, fp)) != -1) {
7925         char iface[16];
7926         uint32_t dest, gw, mask;
7927         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
7928         int fields;
7929 
7930         fields = sscanf(line,
7931                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7932                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
7933                         &mask, &mtu, &window, &irtt);
7934         if (fields != 11) {
7935             continue;
7936         }
7937         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
7938                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
7939                 metric, tswap32(mask), mtu, window, irtt);
7940     }
7941 
7942     free(line);
7943     fclose(fp);
7944 
7945     return 0;
7946 }
7947 #endif
7948 
7949 #if defined(TARGET_SPARC)
7950 static int open_cpuinfo(void *cpu_env, int fd)
7951 {
7952     dprintf(fd, "type\t\t: sun4u\n");
7953     return 0;
7954 }
7955 #endif
7956 
7957 #if defined(TARGET_HPPA)
7958 static int open_cpuinfo(void *cpu_env, int fd)
7959 {
7960     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
7961     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
7962     dprintf(fd, "capabilities\t: os32\n");
7963     dprintf(fd, "model\t\t: 9000/778/B160L\n");
7964     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
7965     return 0;
7966 }
7967 #endif
7968 
7969 #if defined(TARGET_M68K)
7970 static int open_hardware(void *cpu_env, int fd)
7971 {
7972     dprintf(fd, "Model:\t\tqemu-m68k\n");
7973     return 0;
7974 }
7975 #endif
7976 
7977 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
7978 {
7979     struct fake_open {
7980         const char *filename;
7981         int (*fill)(void *cpu_env, int fd);
7982         int (*cmp)(const char *s1, const char *s2);
7983     };
7984     const struct fake_open *fake_open;
7985     static const struct fake_open fakes[] = {
7986         { "maps", open_self_maps, is_proc_myself },
7987         { "stat", open_self_stat, is_proc_myself },
7988         { "auxv", open_self_auxv, is_proc_myself },
7989         { "cmdline", open_self_cmdline, is_proc_myself },
7990 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
7991         { "/proc/net/route", open_net_route, is_proc },
7992 #endif
7993 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
7994         { "/proc/cpuinfo", open_cpuinfo, is_proc },
7995 #endif
7996 #if defined(TARGET_M68K)
7997         { "/proc/hardware", open_hardware, is_proc },
7998 #endif
7999         { NULL, NULL, NULL }
8000     };
8001 
8002     if (is_proc_myself(pathname, "exe")) {
8003         int execfd = qemu_getauxval(AT_EXECFD);
8004         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8005     }
8006 
8007     for (fake_open = fakes; fake_open->filename; fake_open++) {
8008         if (fake_open->cmp(pathname, fake_open->filename)) {
8009             break;
8010         }
8011     }
8012 
8013     if (fake_open->filename) {
8014         const char *tmpdir;
8015         char filename[PATH_MAX];
8016         int fd, r;
8017 
8018         /* create temporary file to map stat to */
8019         tmpdir = getenv("TMPDIR");
8020         if (!tmpdir)
8021             tmpdir = "/tmp";
8022         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8023         fd = mkstemp(filename);
8024         if (fd < 0) {
8025             return fd;
8026         }
8027         unlink(filename);
8028 
8029         if ((r = fake_open->fill(cpu_env, fd))) {
8030             int e = errno;
8031             close(fd);
8032             errno = e;
8033             return r;
8034         }
8035         lseek(fd, 0, SEEK_SET);
8036 
8037         return fd;
8038     }
8039 
8040     return safe_openat(dirfd, path(pathname), flags, mode);
8041 }
8042 
8043 #define TIMER_MAGIC 0x0caf0000
8044 #define TIMER_MAGIC_MASK 0xffff0000
8045 
8046 /* Convert QEMU provided timer ID back to internal 16bit index format */
8047 static target_timer_t get_timer_id(abi_long arg)
8048 {
8049     target_timer_t timerid = arg;
8050 
8051     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8052         return -TARGET_EINVAL;
8053     }
8054 
8055     timerid &= 0xffff;
8056 
8057     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8058         return -TARGET_EINVAL;
8059     }
8060 
8061     return timerid;
8062 }
8063 
8064 static int target_to_host_cpu_mask(unsigned long *host_mask,
8065                                    size_t host_size,
8066                                    abi_ulong target_addr,
8067                                    size_t target_size)
8068 {
8069     unsigned target_bits = sizeof(abi_ulong) * 8;
8070     unsigned host_bits = sizeof(*host_mask) * 8;
8071     abi_ulong *target_mask;
8072     unsigned i, j;
8073 
8074     assert(host_size >= target_size);
8075 
8076     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8077     if (!target_mask) {
8078         return -TARGET_EFAULT;
8079     }
8080     memset(host_mask, 0, host_size);
8081 
8082     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8083         unsigned bit = i * target_bits;
8084         abi_ulong val;
8085 
8086         __get_user(val, &target_mask[i]);
8087         for (j = 0; j < target_bits; j++, bit++) {
8088             if (val & (1UL << j)) {
8089                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8090             }
8091         }
8092     }
8093 
8094     unlock_user(target_mask, target_addr, 0);
8095     return 0;
8096 }
8097 
8098 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8099                                    size_t host_size,
8100                                    abi_ulong target_addr,
8101                                    size_t target_size)
8102 {
8103     unsigned target_bits = sizeof(abi_ulong) * 8;
8104     unsigned host_bits = sizeof(*host_mask) * 8;
8105     abi_ulong *target_mask;
8106     unsigned i, j;
8107 
8108     assert(host_size >= target_size);
8109 
8110     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8111     if (!target_mask) {
8112         return -TARGET_EFAULT;
8113     }
8114 
8115     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8116         unsigned bit = i * target_bits;
8117         abi_ulong val = 0;
8118 
8119         for (j = 0; j < target_bits; j++, bit++) {
8120             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8121                 val |= 1UL << j;
8122             }
8123         }
8124         __put_user(val, &target_mask[i]);
8125     }
8126 
8127     unlock_user(target_mask, target_addr, target_size);
8128     return 0;
8129 }
8130 
8131 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8132 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8133 #endif
8134 
8135 /* This is an internal helper for do_syscall so that it is easier
8136  * to have a single return point, so that actions, such as logging
8137  * of syscall results, can be performed.
8138  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8139  */
8140 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8141                             abi_long arg2, abi_long arg3, abi_long arg4,
8142                             abi_long arg5, abi_long arg6, abi_long arg7,
8143                             abi_long arg8)
8144 {
8145     CPUState *cpu = env_cpu(cpu_env);
8146     abi_long ret;
8147 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8148     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8149     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8150     || defined(TARGET_NR_statx)
8151     struct stat st;
8152 #endif
8153 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8154     || defined(TARGET_NR_fstatfs)
8155     struct statfs stfs;
8156 #endif
8157     void *p;
8158 
8159     switch(num) {
8160     case TARGET_NR_exit:
8161         /* In old applications this may be used to implement _exit(2).
8162            However in threaded applications it is used for thread termination,
8163            and _exit_group is used for application termination.
8164            Do thread termination if we have more then one thread.  */
8165 
8166         if (block_signals()) {
8167             return -TARGET_ERESTARTSYS;
8168         }
8169 
8170         pthread_mutex_lock(&clone_lock);
8171 
8172         if (CPU_NEXT(first_cpu)) {
8173             TaskState *ts = cpu->opaque;
8174 
8175             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8176             object_unref(OBJECT(cpu));
8177             /*
8178              * At this point the CPU should be unrealized and removed
8179              * from cpu lists. We can clean-up the rest of the thread
8180              * data without the lock held.
8181              */
8182 
8183             pthread_mutex_unlock(&clone_lock);
8184 
8185             if (ts->child_tidptr) {
8186                 put_user_u32(0, ts->child_tidptr);
8187                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8188                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8189             }
8190             thread_cpu = NULL;
8191             g_free(ts);
8192             rcu_unregister_thread();
8193             pthread_exit(NULL);
8194         }
8195 
8196         pthread_mutex_unlock(&clone_lock);
8197         preexit_cleanup(cpu_env, arg1);
8198         _exit(arg1);
8199         return 0; /* avoid warning */
8200     case TARGET_NR_read:
8201         if (arg2 == 0 && arg3 == 0) {
8202             return get_errno(safe_read(arg1, 0, 0));
8203         } else {
8204             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8205                 return -TARGET_EFAULT;
8206             ret = get_errno(safe_read(arg1, p, arg3));
8207             if (ret >= 0 &&
8208                 fd_trans_host_to_target_data(arg1)) {
8209                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8210             }
8211             unlock_user(p, arg2, ret);
8212         }
8213         return ret;
8214     case TARGET_NR_write:
8215         if (arg2 == 0 && arg3 == 0) {
8216             return get_errno(safe_write(arg1, 0, 0));
8217         }
8218         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8219             return -TARGET_EFAULT;
8220         if (fd_trans_target_to_host_data(arg1)) {
8221             void *copy = g_malloc(arg3);
8222             memcpy(copy, p, arg3);
8223             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8224             if (ret >= 0) {
8225                 ret = get_errno(safe_write(arg1, copy, ret));
8226             }
8227             g_free(copy);
8228         } else {
8229             ret = get_errno(safe_write(arg1, p, arg3));
8230         }
8231         unlock_user(p, arg2, 0);
8232         return ret;
8233 
8234 #ifdef TARGET_NR_open
8235     case TARGET_NR_open:
8236         if (!(p = lock_user_string(arg1)))
8237             return -TARGET_EFAULT;
8238         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8239                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8240                                   arg3));
8241         fd_trans_unregister(ret);
8242         unlock_user(p, arg1, 0);
8243         return ret;
8244 #endif
8245     case TARGET_NR_openat:
8246         if (!(p = lock_user_string(arg2)))
8247             return -TARGET_EFAULT;
8248         ret = get_errno(do_openat(cpu_env, arg1, p,
8249                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8250                                   arg4));
8251         fd_trans_unregister(ret);
8252         unlock_user(p, arg2, 0);
8253         return ret;
8254 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8255     case TARGET_NR_name_to_handle_at:
8256         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8257         return ret;
8258 #endif
8259 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8260     case TARGET_NR_open_by_handle_at:
8261         ret = do_open_by_handle_at(arg1, arg2, arg3);
8262         fd_trans_unregister(ret);
8263         return ret;
8264 #endif
8265     case TARGET_NR_close:
8266         fd_trans_unregister(arg1);
8267         return get_errno(close(arg1));
8268 
8269     case TARGET_NR_brk:
8270         return do_brk(arg1);
8271 #ifdef TARGET_NR_fork
8272     case TARGET_NR_fork:
8273         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8274 #endif
8275 #ifdef TARGET_NR_waitpid
8276     case TARGET_NR_waitpid:
8277         {
8278             int status;
8279             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8280             if (!is_error(ret) && arg2 && ret
8281                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8282                 return -TARGET_EFAULT;
8283         }
8284         return ret;
8285 #endif
8286 #ifdef TARGET_NR_waitid
8287     case TARGET_NR_waitid:
8288         {
8289             siginfo_t info;
8290             info.si_pid = 0;
8291             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8292             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8293                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8294                     return -TARGET_EFAULT;
8295                 host_to_target_siginfo(p, &info);
8296                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8297             }
8298         }
8299         return ret;
8300 #endif
8301 #ifdef TARGET_NR_creat /* not on alpha */
8302     case TARGET_NR_creat:
8303         if (!(p = lock_user_string(arg1)))
8304             return -TARGET_EFAULT;
8305         ret = get_errno(creat(p, arg2));
8306         fd_trans_unregister(ret);
8307         unlock_user(p, arg1, 0);
8308         return ret;
8309 #endif
8310 #ifdef TARGET_NR_link
8311     case TARGET_NR_link:
8312         {
8313             void * p2;
8314             p = lock_user_string(arg1);
8315             p2 = lock_user_string(arg2);
8316             if (!p || !p2)
8317                 ret = -TARGET_EFAULT;
8318             else
8319                 ret = get_errno(link(p, p2));
8320             unlock_user(p2, arg2, 0);
8321             unlock_user(p, arg1, 0);
8322         }
8323         return ret;
8324 #endif
8325 #if defined(TARGET_NR_linkat)
8326     case TARGET_NR_linkat:
8327         {
8328             void * p2 = NULL;
8329             if (!arg2 || !arg4)
8330                 return -TARGET_EFAULT;
8331             p  = lock_user_string(arg2);
8332             p2 = lock_user_string(arg4);
8333             if (!p || !p2)
8334                 ret = -TARGET_EFAULT;
8335             else
8336                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8337             unlock_user(p, arg2, 0);
8338             unlock_user(p2, arg4, 0);
8339         }
8340         return ret;
8341 #endif
8342 #ifdef TARGET_NR_unlink
8343     case TARGET_NR_unlink:
8344         if (!(p = lock_user_string(arg1)))
8345             return -TARGET_EFAULT;
8346         ret = get_errno(unlink(p));
8347         unlock_user(p, arg1, 0);
8348         return ret;
8349 #endif
8350 #if defined(TARGET_NR_unlinkat)
8351     case TARGET_NR_unlinkat:
8352         if (!(p = lock_user_string(arg2)))
8353             return -TARGET_EFAULT;
8354         ret = get_errno(unlinkat(arg1, p, arg3));
8355         unlock_user(p, arg2, 0);
8356         return ret;
8357 #endif
8358     case TARGET_NR_execve:
8359         {
8360             char **argp, **envp;
8361             int argc, envc;
8362             abi_ulong gp;
8363             abi_ulong guest_argp;
8364             abi_ulong guest_envp;
8365             abi_ulong addr;
8366             char **q;
8367 
8368             argc = 0;
8369             guest_argp = arg2;
8370             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8371                 if (get_user_ual(addr, gp))
8372                     return -TARGET_EFAULT;
8373                 if (!addr)
8374                     break;
8375                 argc++;
8376             }
8377             envc = 0;
8378             guest_envp = arg3;
8379             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8380                 if (get_user_ual(addr, gp))
8381                     return -TARGET_EFAULT;
8382                 if (!addr)
8383                     break;
8384                 envc++;
8385             }
8386 
8387             argp = g_new0(char *, argc + 1);
8388             envp = g_new0(char *, envc + 1);
8389 
8390             for (gp = guest_argp, q = argp; gp;
8391                   gp += sizeof(abi_ulong), q++) {
8392                 if (get_user_ual(addr, gp))
8393                     goto execve_efault;
8394                 if (!addr)
8395                     break;
8396                 if (!(*q = lock_user_string(addr)))
8397                     goto execve_efault;
8398             }
8399             *q = NULL;
8400 
8401             for (gp = guest_envp, q = envp; gp;
8402                   gp += sizeof(abi_ulong), q++) {
8403                 if (get_user_ual(addr, gp))
8404                     goto execve_efault;
8405                 if (!addr)
8406                     break;
8407                 if (!(*q = lock_user_string(addr)))
8408                     goto execve_efault;
8409             }
8410             *q = NULL;
8411 
8412             if (!(p = lock_user_string(arg1)))
8413                 goto execve_efault;
8414             /* Although execve() is not an interruptible syscall it is
8415              * a special case where we must use the safe_syscall wrapper:
8416              * if we allow a signal to happen before we make the host
8417              * syscall then we will 'lose' it, because at the point of
8418              * execve the process leaves QEMU's control. So we use the
8419              * safe syscall wrapper to ensure that we either take the
8420              * signal as a guest signal, or else it does not happen
8421              * before the execve completes and makes it the other
8422              * program's problem.
8423              */
8424             ret = get_errno(safe_execve(p, argp, envp));
8425             unlock_user(p, arg1, 0);
8426 
8427             goto execve_end;
8428 
8429         execve_efault:
8430             ret = -TARGET_EFAULT;
8431 
8432         execve_end:
8433             for (gp = guest_argp, q = argp; *q;
8434                   gp += sizeof(abi_ulong), q++) {
8435                 if (get_user_ual(addr, gp)
8436                     || !addr)
8437                     break;
8438                 unlock_user(*q, addr, 0);
8439             }
8440             for (gp = guest_envp, q = envp; *q;
8441                   gp += sizeof(abi_ulong), q++) {
8442                 if (get_user_ual(addr, gp)
8443                     || !addr)
8444                     break;
8445                 unlock_user(*q, addr, 0);
8446             }
8447 
8448             g_free(argp);
8449             g_free(envp);
8450         }
8451         return ret;
8452     case TARGET_NR_chdir:
8453         if (!(p = lock_user_string(arg1)))
8454             return -TARGET_EFAULT;
8455         ret = get_errno(chdir(p));
8456         unlock_user(p, arg1, 0);
8457         return ret;
8458 #ifdef TARGET_NR_time
8459     case TARGET_NR_time:
8460         {
8461             time_t host_time;
8462             ret = get_errno(time(&host_time));
8463             if (!is_error(ret)
8464                 && arg1
8465                 && put_user_sal(host_time, arg1))
8466                 return -TARGET_EFAULT;
8467         }
8468         return ret;
8469 #endif
8470 #ifdef TARGET_NR_mknod
8471     case TARGET_NR_mknod:
8472         if (!(p = lock_user_string(arg1)))
8473             return -TARGET_EFAULT;
8474         ret = get_errno(mknod(p, arg2, arg3));
8475         unlock_user(p, arg1, 0);
8476         return ret;
8477 #endif
8478 #if defined(TARGET_NR_mknodat)
8479     case TARGET_NR_mknodat:
8480         if (!(p = lock_user_string(arg2)))
8481             return -TARGET_EFAULT;
8482         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8483         unlock_user(p, arg2, 0);
8484         return ret;
8485 #endif
8486 #ifdef TARGET_NR_chmod
8487     case TARGET_NR_chmod:
8488         if (!(p = lock_user_string(arg1)))
8489             return -TARGET_EFAULT;
8490         ret = get_errno(chmod(p, arg2));
8491         unlock_user(p, arg1, 0);
8492         return ret;
8493 #endif
8494 #ifdef TARGET_NR_lseek
8495     case TARGET_NR_lseek:
8496         return get_errno(lseek(arg1, arg2, arg3));
8497 #endif
8498 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8499     /* Alpha specific */
8500     case TARGET_NR_getxpid:
8501         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8502         return get_errno(getpid());
8503 #endif
8504 #ifdef TARGET_NR_getpid
8505     case TARGET_NR_getpid:
8506         return get_errno(getpid());
8507 #endif
8508     case TARGET_NR_mount:
8509         {
8510             /* need to look at the data field */
8511             void *p2, *p3;
8512 
8513             if (arg1) {
8514                 p = lock_user_string(arg1);
8515                 if (!p) {
8516                     return -TARGET_EFAULT;
8517                 }
8518             } else {
8519                 p = NULL;
8520             }
8521 
8522             p2 = lock_user_string(arg2);
8523             if (!p2) {
8524                 if (arg1) {
8525                     unlock_user(p, arg1, 0);
8526                 }
8527                 return -TARGET_EFAULT;
8528             }
8529 
8530             if (arg3) {
8531                 p3 = lock_user_string(arg3);
8532                 if (!p3) {
8533                     if (arg1) {
8534                         unlock_user(p, arg1, 0);
8535                     }
8536                     unlock_user(p2, arg2, 0);
8537                     return -TARGET_EFAULT;
8538                 }
8539             } else {
8540                 p3 = NULL;
8541             }
8542 
8543             /* FIXME - arg5 should be locked, but it isn't clear how to
8544              * do that since it's not guaranteed to be a NULL-terminated
8545              * string.
8546              */
8547             if (!arg5) {
8548                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8549             } else {
8550                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8551             }
8552             ret = get_errno(ret);
8553 
8554             if (arg1) {
8555                 unlock_user(p, arg1, 0);
8556             }
8557             unlock_user(p2, arg2, 0);
8558             if (arg3) {
8559                 unlock_user(p3, arg3, 0);
8560             }
8561         }
8562         return ret;
8563 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8564 #if defined(TARGET_NR_umount)
8565     case TARGET_NR_umount:
8566 #endif
8567 #if defined(TARGET_NR_oldumount)
8568     case TARGET_NR_oldumount:
8569 #endif
8570         if (!(p = lock_user_string(arg1)))
8571             return -TARGET_EFAULT;
8572         ret = get_errno(umount(p));
8573         unlock_user(p, arg1, 0);
8574         return ret;
8575 #endif
8576 #ifdef TARGET_NR_stime /* not on alpha */
8577     case TARGET_NR_stime:
8578         {
8579             struct timespec ts;
8580             ts.tv_nsec = 0;
8581             if (get_user_sal(ts.tv_sec, arg1)) {
8582                 return -TARGET_EFAULT;
8583             }
8584             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8585         }
8586 #endif
8587 #ifdef TARGET_NR_alarm /* not on alpha */
8588     case TARGET_NR_alarm:
8589         return alarm(arg1);
8590 #endif
8591 #ifdef TARGET_NR_pause /* not on alpha */
8592     case TARGET_NR_pause:
8593         if (!block_signals()) {
8594             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8595         }
8596         return -TARGET_EINTR;
8597 #endif
8598 #ifdef TARGET_NR_utime
8599     case TARGET_NR_utime:
8600         {
8601             struct utimbuf tbuf, *host_tbuf;
8602             struct target_utimbuf *target_tbuf;
8603             if (arg2) {
8604                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
8605                     return -TARGET_EFAULT;
8606                 tbuf.actime = tswapal(target_tbuf->actime);
8607                 tbuf.modtime = tswapal(target_tbuf->modtime);
8608                 unlock_user_struct(target_tbuf, arg2, 0);
8609                 host_tbuf = &tbuf;
8610             } else {
8611                 host_tbuf = NULL;
8612             }
8613             if (!(p = lock_user_string(arg1)))
8614                 return -TARGET_EFAULT;
8615             ret = get_errno(utime(p, host_tbuf));
8616             unlock_user(p, arg1, 0);
8617         }
8618         return ret;
8619 #endif
8620 #ifdef TARGET_NR_utimes
8621     case TARGET_NR_utimes:
8622         {
8623             struct timeval *tvp, tv[2];
8624             if (arg2) {
8625                 if (copy_from_user_timeval(&tv[0], arg2)
8626                     || copy_from_user_timeval(&tv[1],
8627                                               arg2 + sizeof(struct target_timeval)))
8628                     return -TARGET_EFAULT;
8629                 tvp = tv;
8630             } else {
8631                 tvp = NULL;
8632             }
8633             if (!(p = lock_user_string(arg1)))
8634                 return -TARGET_EFAULT;
8635             ret = get_errno(utimes(p, tvp));
8636             unlock_user(p, arg1, 0);
8637         }
8638         return ret;
8639 #endif
8640 #if defined(TARGET_NR_futimesat)
8641     case TARGET_NR_futimesat:
8642         {
8643             struct timeval *tvp, tv[2];
8644             if (arg3) {
8645                 if (copy_from_user_timeval(&tv[0], arg3)
8646                     || copy_from_user_timeval(&tv[1],
8647                                               arg3 + sizeof(struct target_timeval)))
8648                     return -TARGET_EFAULT;
8649                 tvp = tv;
8650             } else {
8651                 tvp = NULL;
8652             }
8653             if (!(p = lock_user_string(arg2))) {
8654                 return -TARGET_EFAULT;
8655             }
8656             ret = get_errno(futimesat(arg1, path(p), tvp));
8657             unlock_user(p, arg2, 0);
8658         }
8659         return ret;
8660 #endif
8661 #ifdef TARGET_NR_access
8662     case TARGET_NR_access:
8663         if (!(p = lock_user_string(arg1))) {
8664             return -TARGET_EFAULT;
8665         }
8666         ret = get_errno(access(path(p), arg2));
8667         unlock_user(p, arg1, 0);
8668         return ret;
8669 #endif
8670 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
8671     case TARGET_NR_faccessat:
8672         if (!(p = lock_user_string(arg2))) {
8673             return -TARGET_EFAULT;
8674         }
8675         ret = get_errno(faccessat(arg1, p, arg3, 0));
8676         unlock_user(p, arg2, 0);
8677         return ret;
8678 #endif
8679 #ifdef TARGET_NR_nice /* not on alpha */
8680     case TARGET_NR_nice:
8681         return get_errno(nice(arg1));
8682 #endif
8683     case TARGET_NR_sync:
8684         sync();
8685         return 0;
8686 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
8687     case TARGET_NR_syncfs:
8688         return get_errno(syncfs(arg1));
8689 #endif
8690     case TARGET_NR_kill:
8691         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
8692 #ifdef TARGET_NR_rename
8693     case TARGET_NR_rename:
8694         {
8695             void *p2;
8696             p = lock_user_string(arg1);
8697             p2 = lock_user_string(arg2);
8698             if (!p || !p2)
8699                 ret = -TARGET_EFAULT;
8700             else
8701                 ret = get_errno(rename(p, p2));
8702             unlock_user(p2, arg2, 0);
8703             unlock_user(p, arg1, 0);
8704         }
8705         return ret;
8706 #endif
8707 #if defined(TARGET_NR_renameat)
8708     case TARGET_NR_renameat:
8709         {
8710             void *p2;
8711             p  = lock_user_string(arg2);
8712             p2 = lock_user_string(arg4);
8713             if (!p || !p2)
8714                 ret = -TARGET_EFAULT;
8715             else
8716                 ret = get_errno(renameat(arg1, p, arg3, p2));
8717             unlock_user(p2, arg4, 0);
8718             unlock_user(p, arg2, 0);
8719         }
8720         return ret;
8721 #endif
8722 #if defined(TARGET_NR_renameat2)
8723     case TARGET_NR_renameat2:
8724         {
8725             void *p2;
8726             p  = lock_user_string(arg2);
8727             p2 = lock_user_string(arg4);
8728             if (!p || !p2) {
8729                 ret = -TARGET_EFAULT;
8730             } else {
8731                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
8732             }
8733             unlock_user(p2, arg4, 0);
8734             unlock_user(p, arg2, 0);
8735         }
8736         return ret;
8737 #endif
8738 #ifdef TARGET_NR_mkdir
8739     case TARGET_NR_mkdir:
8740         if (!(p = lock_user_string(arg1)))
8741             return -TARGET_EFAULT;
8742         ret = get_errno(mkdir(p, arg2));
8743         unlock_user(p, arg1, 0);
8744         return ret;
8745 #endif
8746 #if defined(TARGET_NR_mkdirat)
8747     case TARGET_NR_mkdirat:
8748         if (!(p = lock_user_string(arg2)))
8749             return -TARGET_EFAULT;
8750         ret = get_errno(mkdirat(arg1, p, arg3));
8751         unlock_user(p, arg2, 0);
8752         return ret;
8753 #endif
8754 #ifdef TARGET_NR_rmdir
8755     case TARGET_NR_rmdir:
8756         if (!(p = lock_user_string(arg1)))
8757             return -TARGET_EFAULT;
8758         ret = get_errno(rmdir(p));
8759         unlock_user(p, arg1, 0);
8760         return ret;
8761 #endif
8762     case TARGET_NR_dup:
8763         ret = get_errno(dup(arg1));
8764         if (ret >= 0) {
8765             fd_trans_dup(arg1, ret);
8766         }
8767         return ret;
8768 #ifdef TARGET_NR_pipe
8769     case TARGET_NR_pipe:
8770         return do_pipe(cpu_env, arg1, 0, 0);
8771 #endif
8772 #ifdef TARGET_NR_pipe2
8773     case TARGET_NR_pipe2:
8774         return do_pipe(cpu_env, arg1,
8775                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
8776 #endif
8777     case TARGET_NR_times:
8778         {
8779             struct target_tms *tmsp;
8780             struct tms tms;
8781             ret = get_errno(times(&tms));
8782             if (arg1) {
8783                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
8784                 if (!tmsp)
8785                     return -TARGET_EFAULT;
8786                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
8787                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
8788                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
8789                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
8790             }
8791             if (!is_error(ret))
8792                 ret = host_to_target_clock_t(ret);
8793         }
8794         return ret;
8795     case TARGET_NR_acct:
8796         if (arg1 == 0) {
8797             ret = get_errno(acct(NULL));
8798         } else {
8799             if (!(p = lock_user_string(arg1))) {
8800                 return -TARGET_EFAULT;
8801             }
8802             ret = get_errno(acct(path(p)));
8803             unlock_user(p, arg1, 0);
8804         }
8805         return ret;
8806 #ifdef TARGET_NR_umount2
8807     case TARGET_NR_umount2:
8808         if (!(p = lock_user_string(arg1)))
8809             return -TARGET_EFAULT;
8810         ret = get_errno(umount2(p, arg2));
8811         unlock_user(p, arg1, 0);
8812         return ret;
8813 #endif
8814     case TARGET_NR_ioctl:
8815         return do_ioctl(arg1, arg2, arg3);
8816 #ifdef TARGET_NR_fcntl
8817     case TARGET_NR_fcntl:
8818         return do_fcntl(arg1, arg2, arg3);
8819 #endif
8820     case TARGET_NR_setpgid:
8821         return get_errno(setpgid(arg1, arg2));
8822     case TARGET_NR_umask:
8823         return get_errno(umask(arg1));
8824     case TARGET_NR_chroot:
8825         if (!(p = lock_user_string(arg1)))
8826             return -TARGET_EFAULT;
8827         ret = get_errno(chroot(p));
8828         unlock_user(p, arg1, 0);
8829         return ret;
8830 #ifdef TARGET_NR_dup2
8831     case TARGET_NR_dup2:
8832         ret = get_errno(dup2(arg1, arg2));
8833         if (ret >= 0) {
8834             fd_trans_dup(arg1, arg2);
8835         }
8836         return ret;
8837 #endif
8838 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
8839     case TARGET_NR_dup3:
8840     {
8841         int host_flags;
8842 
8843         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
8844             return -EINVAL;
8845         }
8846         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
8847         ret = get_errno(dup3(arg1, arg2, host_flags));
8848         if (ret >= 0) {
8849             fd_trans_dup(arg1, arg2);
8850         }
8851         return ret;
8852     }
8853 #endif
8854 #ifdef TARGET_NR_getppid /* not on alpha */
8855     case TARGET_NR_getppid:
8856         return get_errno(getppid());
8857 #endif
8858 #ifdef TARGET_NR_getpgrp
8859     case TARGET_NR_getpgrp:
8860         return get_errno(getpgrp());
8861 #endif
8862     case TARGET_NR_setsid:
8863         return get_errno(setsid());
8864 #ifdef TARGET_NR_sigaction
8865     case TARGET_NR_sigaction:
8866         {
8867 #if defined(TARGET_MIPS)
8868 	    struct target_sigaction act, oact, *pact, *old_act;
8869 
8870 	    if (arg2) {
8871                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8872                     return -TARGET_EFAULT;
8873 		act._sa_handler = old_act->_sa_handler;
8874 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
8875 		act.sa_flags = old_act->sa_flags;
8876 		unlock_user_struct(old_act, arg2, 0);
8877 		pact = &act;
8878 	    } else {
8879 		pact = NULL;
8880 	    }
8881 
8882         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8883 
8884 	    if (!is_error(ret) && arg3) {
8885                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8886                     return -TARGET_EFAULT;
8887 		old_act->_sa_handler = oact._sa_handler;
8888 		old_act->sa_flags = oact.sa_flags;
8889 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
8890 		old_act->sa_mask.sig[1] = 0;
8891 		old_act->sa_mask.sig[2] = 0;
8892 		old_act->sa_mask.sig[3] = 0;
8893 		unlock_user_struct(old_act, arg3, 1);
8894 	    }
8895 #else
8896             struct target_old_sigaction *old_act;
8897             struct target_sigaction act, oact, *pact;
8898             if (arg2) {
8899                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
8900                     return -TARGET_EFAULT;
8901                 act._sa_handler = old_act->_sa_handler;
8902                 target_siginitset(&act.sa_mask, old_act->sa_mask);
8903                 act.sa_flags = old_act->sa_flags;
8904 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8905                 act.sa_restorer = old_act->sa_restorer;
8906 #endif
8907                 unlock_user_struct(old_act, arg2, 0);
8908                 pact = &act;
8909             } else {
8910                 pact = NULL;
8911             }
8912             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
8913             if (!is_error(ret) && arg3) {
8914                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
8915                     return -TARGET_EFAULT;
8916                 old_act->_sa_handler = oact._sa_handler;
8917                 old_act->sa_mask = oact.sa_mask.sig[0];
8918                 old_act->sa_flags = oact.sa_flags;
8919 #ifdef TARGET_ARCH_HAS_SA_RESTORER
8920                 old_act->sa_restorer = oact.sa_restorer;
8921 #endif
8922                 unlock_user_struct(old_act, arg3, 1);
8923             }
8924 #endif
8925         }
8926         return ret;
8927 #endif
8928     case TARGET_NR_rt_sigaction:
8929         {
8930             /*
8931              * For Alpha and SPARC this is a 5 argument syscall, with
8932              * a 'restorer' parameter which must be copied into the
8933              * sa_restorer field of the sigaction struct.
8934              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
8935              * and arg5 is the sigsetsize.
8936              */
8937 #if defined(TARGET_ALPHA)
8938             target_ulong sigsetsize = arg4;
8939             target_ulong restorer = arg5;
8940 #elif defined(TARGET_SPARC)
8941             target_ulong restorer = arg4;
8942             target_ulong sigsetsize = arg5;
8943 #else
8944             target_ulong sigsetsize = arg4;
8945             target_ulong restorer = 0;
8946 #endif
8947             struct target_sigaction *act = NULL;
8948             struct target_sigaction *oact = NULL;
8949 
8950             if (sigsetsize != sizeof(target_sigset_t)) {
8951                 return -TARGET_EINVAL;
8952             }
8953             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
8954                 return -TARGET_EFAULT;
8955             }
8956             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
8957                 ret = -TARGET_EFAULT;
8958             } else {
8959                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
8960                 if (oact) {
8961                     unlock_user_struct(oact, arg3, 1);
8962                 }
8963             }
8964             if (act) {
8965                 unlock_user_struct(act, arg2, 0);
8966             }
8967         }
8968         return ret;
8969 #ifdef TARGET_NR_sgetmask /* not on alpha */
8970     case TARGET_NR_sgetmask:
8971         {
8972             sigset_t cur_set;
8973             abi_ulong target_set;
8974             ret = do_sigprocmask(0, NULL, &cur_set);
8975             if (!ret) {
8976                 host_to_target_old_sigset(&target_set, &cur_set);
8977                 ret = target_set;
8978             }
8979         }
8980         return ret;
8981 #endif
8982 #ifdef TARGET_NR_ssetmask /* not on alpha */
8983     case TARGET_NR_ssetmask:
8984         {
8985             sigset_t set, oset;
8986             abi_ulong target_set = arg1;
8987             target_to_host_old_sigset(&set, &target_set);
8988             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
8989             if (!ret) {
8990                 host_to_target_old_sigset(&target_set, &oset);
8991                 ret = target_set;
8992             }
8993         }
8994         return ret;
8995 #endif
8996 #ifdef TARGET_NR_sigprocmask
8997     case TARGET_NR_sigprocmask:
8998         {
8999 #if defined(TARGET_ALPHA)
9000             sigset_t set, oldset;
9001             abi_ulong mask;
9002             int how;
9003 
9004             switch (arg1) {
9005             case TARGET_SIG_BLOCK:
9006                 how = SIG_BLOCK;
9007                 break;
9008             case TARGET_SIG_UNBLOCK:
9009                 how = SIG_UNBLOCK;
9010                 break;
9011             case TARGET_SIG_SETMASK:
9012                 how = SIG_SETMASK;
9013                 break;
9014             default:
9015                 return -TARGET_EINVAL;
9016             }
9017             mask = arg2;
9018             target_to_host_old_sigset(&set, &mask);
9019 
9020             ret = do_sigprocmask(how, &set, &oldset);
9021             if (!is_error(ret)) {
9022                 host_to_target_old_sigset(&mask, &oldset);
9023                 ret = mask;
9024                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9025             }
9026 #else
9027             sigset_t set, oldset, *set_ptr;
9028             int how;
9029 
9030             if (arg2) {
9031                 switch (arg1) {
9032                 case TARGET_SIG_BLOCK:
9033                     how = SIG_BLOCK;
9034                     break;
9035                 case TARGET_SIG_UNBLOCK:
9036                     how = SIG_UNBLOCK;
9037                     break;
9038                 case TARGET_SIG_SETMASK:
9039                     how = SIG_SETMASK;
9040                     break;
9041                 default:
9042                     return -TARGET_EINVAL;
9043                 }
9044                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9045                     return -TARGET_EFAULT;
9046                 target_to_host_old_sigset(&set, p);
9047                 unlock_user(p, arg2, 0);
9048                 set_ptr = &set;
9049             } else {
9050                 how = 0;
9051                 set_ptr = NULL;
9052             }
9053             ret = do_sigprocmask(how, set_ptr, &oldset);
9054             if (!is_error(ret) && arg3) {
9055                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9056                     return -TARGET_EFAULT;
9057                 host_to_target_old_sigset(p, &oldset);
9058                 unlock_user(p, arg3, sizeof(target_sigset_t));
9059             }
9060 #endif
9061         }
9062         return ret;
9063 #endif
9064     case TARGET_NR_rt_sigprocmask:
9065         {
9066             int how = arg1;
9067             sigset_t set, oldset, *set_ptr;
9068 
9069             if (arg4 != sizeof(target_sigset_t)) {
9070                 return -TARGET_EINVAL;
9071             }
9072 
9073             if (arg2) {
9074                 switch(how) {
9075                 case TARGET_SIG_BLOCK:
9076                     how = SIG_BLOCK;
9077                     break;
9078                 case TARGET_SIG_UNBLOCK:
9079                     how = SIG_UNBLOCK;
9080                     break;
9081                 case TARGET_SIG_SETMASK:
9082                     how = SIG_SETMASK;
9083                     break;
9084                 default:
9085                     return -TARGET_EINVAL;
9086                 }
9087                 if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
9088                     return -TARGET_EFAULT;
9089                 target_to_host_sigset(&set, p);
9090                 unlock_user(p, arg2, 0);
9091                 set_ptr = &set;
9092             } else {
9093                 how = 0;
9094                 set_ptr = NULL;
9095             }
9096             ret = do_sigprocmask(how, set_ptr, &oldset);
9097             if (!is_error(ret) && arg3) {
9098                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9099                     return -TARGET_EFAULT;
9100                 host_to_target_sigset(p, &oldset);
9101                 unlock_user(p, arg3, sizeof(target_sigset_t));
9102             }
9103         }
9104         return ret;
9105 #ifdef TARGET_NR_sigpending
9106     case TARGET_NR_sigpending:
9107         {
9108             sigset_t set;
9109             ret = get_errno(sigpending(&set));
9110             if (!is_error(ret)) {
9111                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9112                     return -TARGET_EFAULT;
9113                 host_to_target_old_sigset(p, &set);
9114                 unlock_user(p, arg1, sizeof(target_sigset_t));
9115             }
9116         }
9117         return ret;
9118 #endif
9119     case TARGET_NR_rt_sigpending:
9120         {
9121             sigset_t set;
9122 
9123             /* Yes, this check is >, not != like most. We follow the kernel's
9124              * logic and it does it like this because it implements
9125              * NR_sigpending through the same code path, and in that case
9126              * the old_sigset_t is smaller in size.
9127              */
9128             if (arg2 > sizeof(target_sigset_t)) {
9129                 return -TARGET_EINVAL;
9130             }
9131 
9132             ret = get_errno(sigpending(&set));
9133             if (!is_error(ret)) {
9134                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9135                     return -TARGET_EFAULT;
9136                 host_to_target_sigset(p, &set);
9137                 unlock_user(p, arg1, sizeof(target_sigset_t));
9138             }
9139         }
9140         return ret;
9141 #ifdef TARGET_NR_sigsuspend
9142     case TARGET_NR_sigsuspend:
9143         {
9144             TaskState *ts = cpu->opaque;
9145 #if defined(TARGET_ALPHA)
9146             abi_ulong mask = arg1;
9147             target_to_host_old_sigset(&ts->sigsuspend_mask, &mask);
9148 #else
9149             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9150                 return -TARGET_EFAULT;
9151             target_to_host_old_sigset(&ts->sigsuspend_mask, p);
9152             unlock_user(p, arg1, 0);
9153 #endif
9154             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9155                                                SIGSET_T_SIZE));
9156             if (ret != -TARGET_ERESTARTSYS) {
9157                 ts->in_sigsuspend = 1;
9158             }
9159         }
9160         return ret;
9161 #endif
9162     case TARGET_NR_rt_sigsuspend:
9163         {
9164             TaskState *ts = cpu->opaque;
9165 
9166             if (arg2 != sizeof(target_sigset_t)) {
9167                 return -TARGET_EINVAL;
9168             }
9169             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9170                 return -TARGET_EFAULT;
9171             target_to_host_sigset(&ts->sigsuspend_mask, p);
9172             unlock_user(p, arg1, 0);
9173             ret = get_errno(safe_rt_sigsuspend(&ts->sigsuspend_mask,
9174                                                SIGSET_T_SIZE));
9175             if (ret != -TARGET_ERESTARTSYS) {
9176                 ts->in_sigsuspend = 1;
9177             }
9178         }
9179         return ret;
9180 #ifdef TARGET_NR_rt_sigtimedwait
9181     case TARGET_NR_rt_sigtimedwait:
9182         {
9183             sigset_t set;
9184             struct timespec uts, *puts;
9185             siginfo_t uinfo;
9186 
9187             if (arg4 != sizeof(target_sigset_t)) {
9188                 return -TARGET_EINVAL;
9189             }
9190 
9191             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9192                 return -TARGET_EFAULT;
9193             target_to_host_sigset(&set, p);
9194             unlock_user(p, arg1, 0);
9195             if (arg3) {
9196                 puts = &uts;
9197                 if (target_to_host_timespec(puts, arg3)) {
9198                     return -TARGET_EFAULT;
9199                 }
9200             } else {
9201                 puts = NULL;
9202             }
9203             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9204                                                  SIGSET_T_SIZE));
9205             if (!is_error(ret)) {
9206                 if (arg2) {
9207                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9208                                   0);
9209                     if (!p) {
9210                         return -TARGET_EFAULT;
9211                     }
9212                     host_to_target_siginfo(p, &uinfo);
9213                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9214                 }
9215                 ret = host_to_target_signal(ret);
9216             }
9217         }
9218         return ret;
9219 #endif
9220 #ifdef TARGET_NR_rt_sigtimedwait_time64
9221     case TARGET_NR_rt_sigtimedwait_time64:
9222         {
9223             sigset_t set;
9224             struct timespec uts, *puts;
9225             siginfo_t uinfo;
9226 
9227             if (arg4 != sizeof(target_sigset_t)) {
9228                 return -TARGET_EINVAL;
9229             }
9230 
9231             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9232             if (!p) {
9233                 return -TARGET_EFAULT;
9234             }
9235             target_to_host_sigset(&set, p);
9236             unlock_user(p, arg1, 0);
9237             if (arg3) {
9238                 puts = &uts;
9239                 if (target_to_host_timespec64(puts, arg3)) {
9240                     return -TARGET_EFAULT;
9241                 }
9242             } else {
9243                 puts = NULL;
9244             }
9245             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9246                                                  SIGSET_T_SIZE));
9247             if (!is_error(ret)) {
9248                 if (arg2) {
9249                     p = lock_user(VERIFY_WRITE, arg2,
9250                                   sizeof(target_siginfo_t), 0);
9251                     if (!p) {
9252                         return -TARGET_EFAULT;
9253                     }
9254                     host_to_target_siginfo(p, &uinfo);
9255                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9256                 }
9257                 ret = host_to_target_signal(ret);
9258             }
9259         }
9260         return ret;
9261 #endif
9262     case TARGET_NR_rt_sigqueueinfo:
9263         {
9264             siginfo_t uinfo;
9265 
9266             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9267             if (!p) {
9268                 return -TARGET_EFAULT;
9269             }
9270             target_to_host_siginfo(&uinfo, p);
9271             unlock_user(p, arg3, 0);
9272             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9273         }
9274         return ret;
9275     case TARGET_NR_rt_tgsigqueueinfo:
9276         {
9277             siginfo_t uinfo;
9278 
9279             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9280             if (!p) {
9281                 return -TARGET_EFAULT;
9282             }
9283             target_to_host_siginfo(&uinfo, p);
9284             unlock_user(p, arg4, 0);
9285             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9286         }
9287         return ret;
9288 #ifdef TARGET_NR_sigreturn
9289     case TARGET_NR_sigreturn:
9290         if (block_signals()) {
9291             return -TARGET_ERESTARTSYS;
9292         }
9293         return do_sigreturn(cpu_env);
9294 #endif
9295     case TARGET_NR_rt_sigreturn:
9296         if (block_signals()) {
9297             return -TARGET_ERESTARTSYS;
9298         }
9299         return do_rt_sigreturn(cpu_env);
9300     case TARGET_NR_sethostname:
9301         if (!(p = lock_user_string(arg1)))
9302             return -TARGET_EFAULT;
9303         ret = get_errno(sethostname(p, arg2));
9304         unlock_user(p, arg1, 0);
9305         return ret;
9306 #ifdef TARGET_NR_setrlimit
9307     case TARGET_NR_setrlimit:
9308         {
9309             int resource = target_to_host_resource(arg1);
9310             struct target_rlimit *target_rlim;
9311             struct rlimit rlim;
9312             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9313                 return -TARGET_EFAULT;
9314             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9315             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9316             unlock_user_struct(target_rlim, arg2, 0);
9317             /*
9318              * If we just passed through resource limit settings for memory then
9319              * they would also apply to QEMU's own allocations, and QEMU will
9320              * crash or hang or die if its allocations fail. Ideally we would
9321              * track the guest allocations in QEMU and apply the limits ourselves.
9322              * For now, just tell the guest the call succeeded but don't actually
9323              * limit anything.
9324              */
9325             if (resource != RLIMIT_AS &&
9326                 resource != RLIMIT_DATA &&
9327                 resource != RLIMIT_STACK) {
9328                 return get_errno(setrlimit(resource, &rlim));
9329             } else {
9330                 return 0;
9331             }
9332         }
9333 #endif
9334 #ifdef TARGET_NR_getrlimit
9335     case TARGET_NR_getrlimit:
9336         {
9337             int resource = target_to_host_resource(arg1);
9338             struct target_rlimit *target_rlim;
9339             struct rlimit rlim;
9340 
9341             ret = get_errno(getrlimit(resource, &rlim));
9342             if (!is_error(ret)) {
9343                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9344                     return -TARGET_EFAULT;
9345                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9346                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9347                 unlock_user_struct(target_rlim, arg2, 1);
9348             }
9349         }
9350         return ret;
9351 #endif
9352     case TARGET_NR_getrusage:
9353         {
9354             struct rusage rusage;
9355             ret = get_errno(getrusage(arg1, &rusage));
9356             if (!is_error(ret)) {
9357                 ret = host_to_target_rusage(arg2, &rusage);
9358             }
9359         }
9360         return ret;
9361 #if defined(TARGET_NR_gettimeofday)
9362     case TARGET_NR_gettimeofday:
9363         {
9364             struct timeval tv;
9365             struct timezone tz;
9366 
9367             ret = get_errno(gettimeofday(&tv, &tz));
9368             if (!is_error(ret)) {
9369                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9370                     return -TARGET_EFAULT;
9371                 }
9372                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9373                     return -TARGET_EFAULT;
9374                 }
9375             }
9376         }
9377         return ret;
9378 #endif
9379 #if defined(TARGET_NR_settimeofday)
9380     case TARGET_NR_settimeofday:
9381         {
9382             struct timeval tv, *ptv = NULL;
9383             struct timezone tz, *ptz = NULL;
9384 
9385             if (arg1) {
9386                 if (copy_from_user_timeval(&tv, arg1)) {
9387                     return -TARGET_EFAULT;
9388                 }
9389                 ptv = &tv;
9390             }
9391 
9392             if (arg2) {
9393                 if (copy_from_user_timezone(&tz, arg2)) {
9394                     return -TARGET_EFAULT;
9395                 }
9396                 ptz = &tz;
9397             }
9398 
9399             return get_errno(settimeofday(ptv, ptz));
9400         }
9401 #endif
9402 #if defined(TARGET_NR_select)
9403     case TARGET_NR_select:
9404 #if defined(TARGET_WANT_NI_OLD_SELECT)
9405         /* some architectures used to have old_select here
9406          * but now ENOSYS it.
9407          */
9408         ret = -TARGET_ENOSYS;
9409 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9410         ret = do_old_select(arg1);
9411 #else
9412         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9413 #endif
9414         return ret;
9415 #endif
9416 #ifdef TARGET_NR_pselect6
9417     case TARGET_NR_pselect6:
9418         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9419 #endif
9420 #ifdef TARGET_NR_pselect6_time64
9421     case TARGET_NR_pselect6_time64:
9422         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9423 #endif
9424 #ifdef TARGET_NR_symlink
9425     case TARGET_NR_symlink:
9426         {
9427             void *p2;
9428             p = lock_user_string(arg1);
9429             p2 = lock_user_string(arg2);
9430             if (!p || !p2)
9431                 ret = -TARGET_EFAULT;
9432             else
9433                 ret = get_errno(symlink(p, p2));
9434             unlock_user(p2, arg2, 0);
9435             unlock_user(p, arg1, 0);
9436         }
9437         return ret;
9438 #endif
9439 #if defined(TARGET_NR_symlinkat)
9440     case TARGET_NR_symlinkat:
9441         {
9442             void *p2;
9443             p  = lock_user_string(arg1);
9444             p2 = lock_user_string(arg3);
9445             if (!p || !p2)
9446                 ret = -TARGET_EFAULT;
9447             else
9448                 ret = get_errno(symlinkat(p, arg2, p2));
9449             unlock_user(p2, arg3, 0);
9450             unlock_user(p, arg1, 0);
9451         }
9452         return ret;
9453 #endif
9454 #ifdef TARGET_NR_readlink
9455     case TARGET_NR_readlink:
9456         {
9457             void *p2;
9458             p = lock_user_string(arg1);
9459             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9460             if (!p || !p2) {
9461                 ret = -TARGET_EFAULT;
9462             } else if (!arg3) {
9463                 /* Short circuit this for the magic exe check. */
9464                 ret = -TARGET_EINVAL;
9465             } else if (is_proc_myself((const char *)p, "exe")) {
9466                 char real[PATH_MAX], *temp;
9467                 temp = realpath(exec_path, real);
9468                 /* Return value is # of bytes that we wrote to the buffer. */
9469                 if (temp == NULL) {
9470                     ret = get_errno(-1);
9471                 } else {
9472                     /* Don't worry about sign mismatch as earlier mapping
9473                      * logic would have thrown a bad address error. */
9474                     ret = MIN(strlen(real), arg3);
9475                     /* We cannot NUL terminate the string. */
9476                     memcpy(p2, real, ret);
9477                 }
9478             } else {
9479                 ret = get_errno(readlink(path(p), p2, arg3));
9480             }
9481             unlock_user(p2, arg2, ret);
9482             unlock_user(p, arg1, 0);
9483         }
9484         return ret;
9485 #endif
9486 #if defined(TARGET_NR_readlinkat)
9487     case TARGET_NR_readlinkat:
9488         {
9489             void *p2;
9490             p  = lock_user_string(arg2);
9491             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9492             if (!p || !p2) {
9493                 ret = -TARGET_EFAULT;
9494             } else if (is_proc_myself((const char *)p, "exe")) {
9495                 char real[PATH_MAX], *temp;
9496                 temp = realpath(exec_path, real);
9497                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9498                 snprintf((char *)p2, arg4, "%s", real);
9499             } else {
9500                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9501             }
9502             unlock_user(p2, arg3, ret);
9503             unlock_user(p, arg2, 0);
9504         }
9505         return ret;
9506 #endif
9507 #ifdef TARGET_NR_swapon
9508     case TARGET_NR_swapon:
9509         if (!(p = lock_user_string(arg1)))
9510             return -TARGET_EFAULT;
9511         ret = get_errno(swapon(p, arg2));
9512         unlock_user(p, arg1, 0);
9513         return ret;
9514 #endif
9515     case TARGET_NR_reboot:
9516         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9517            /* arg4 must be ignored in all other cases */
9518            p = lock_user_string(arg4);
9519            if (!p) {
9520                return -TARGET_EFAULT;
9521            }
9522            ret = get_errno(reboot(arg1, arg2, arg3, p));
9523            unlock_user(p, arg4, 0);
9524         } else {
9525            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9526         }
9527         return ret;
9528 #ifdef TARGET_NR_mmap
9529     case TARGET_NR_mmap:
9530 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9531     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9532     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9533     || defined(TARGET_S390X)
9534         {
9535             abi_ulong *v;
9536             abi_ulong v1, v2, v3, v4, v5, v6;
9537             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9538                 return -TARGET_EFAULT;
9539             v1 = tswapal(v[0]);
9540             v2 = tswapal(v[1]);
9541             v3 = tswapal(v[2]);
9542             v4 = tswapal(v[3]);
9543             v5 = tswapal(v[4]);
9544             v6 = tswapal(v[5]);
9545             unlock_user(v, arg1, 0);
9546             ret = get_errno(target_mmap(v1, v2, v3,
9547                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9548                                         v5, v6));
9549         }
9550 #else
9551         /* mmap pointers are always untagged */
9552         ret = get_errno(target_mmap(arg1, arg2, arg3,
9553                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9554                                     arg5,
9555                                     arg6));
9556 #endif
9557         return ret;
9558 #endif
9559 #ifdef TARGET_NR_mmap2
9560     case TARGET_NR_mmap2:
9561 #ifndef MMAP_SHIFT
9562 #define MMAP_SHIFT 12
9563 #endif
9564         ret = target_mmap(arg1, arg2, arg3,
9565                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9566                           arg5, arg6 << MMAP_SHIFT);
9567         return get_errno(ret);
9568 #endif
9569     case TARGET_NR_munmap:
9570         arg1 = cpu_untagged_addr(cpu, arg1);
9571         return get_errno(target_munmap(arg1, arg2));
9572     case TARGET_NR_mprotect:
9573         arg1 = cpu_untagged_addr(cpu, arg1);
9574         {
9575             TaskState *ts = cpu->opaque;
9576             /* Special hack to detect libc making the stack executable.  */
9577             if ((arg3 & PROT_GROWSDOWN)
9578                 && arg1 >= ts->info->stack_limit
9579                 && arg1 <= ts->info->start_stack) {
9580                 arg3 &= ~PROT_GROWSDOWN;
9581                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9582                 arg1 = ts->info->stack_limit;
9583             }
9584         }
9585         return get_errno(target_mprotect(arg1, arg2, arg3));
9586 #ifdef TARGET_NR_mremap
9587     case TARGET_NR_mremap:
9588         arg1 = cpu_untagged_addr(cpu, arg1);
9589         /* mremap new_addr (arg5) is always untagged */
9590         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9591 #endif
9592         /* ??? msync/mlock/munlock are broken for softmmu.  */
9593 #ifdef TARGET_NR_msync
9594     case TARGET_NR_msync:
9595         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9596 #endif
9597 #ifdef TARGET_NR_mlock
9598     case TARGET_NR_mlock:
9599         return get_errno(mlock(g2h(cpu, arg1), arg2));
9600 #endif
9601 #ifdef TARGET_NR_munlock
9602     case TARGET_NR_munlock:
9603         return get_errno(munlock(g2h(cpu, arg1), arg2));
9604 #endif
9605 #ifdef TARGET_NR_mlockall
9606     case TARGET_NR_mlockall:
9607         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
9608 #endif
9609 #ifdef TARGET_NR_munlockall
9610     case TARGET_NR_munlockall:
9611         return get_errno(munlockall());
9612 #endif
9613 #ifdef TARGET_NR_truncate
9614     case TARGET_NR_truncate:
9615         if (!(p = lock_user_string(arg1)))
9616             return -TARGET_EFAULT;
9617         ret = get_errno(truncate(p, arg2));
9618         unlock_user(p, arg1, 0);
9619         return ret;
9620 #endif
9621 #ifdef TARGET_NR_ftruncate
9622     case TARGET_NR_ftruncate:
9623         return get_errno(ftruncate(arg1, arg2));
9624 #endif
9625     case TARGET_NR_fchmod:
9626         return get_errno(fchmod(arg1, arg2));
9627 #if defined(TARGET_NR_fchmodat)
9628     case TARGET_NR_fchmodat:
9629         if (!(p = lock_user_string(arg2)))
9630             return -TARGET_EFAULT;
9631         ret = get_errno(fchmodat(arg1, p, arg3, 0));
9632         unlock_user(p, arg2, 0);
9633         return ret;
9634 #endif
9635     case TARGET_NR_getpriority:
9636         /* Note that negative values are valid for getpriority, so we must
9637            differentiate based on errno settings.  */
9638         errno = 0;
9639         ret = getpriority(arg1, arg2);
9640         if (ret == -1 && errno != 0) {
9641             return -host_to_target_errno(errno);
9642         }
9643 #ifdef TARGET_ALPHA
9644         /* Return value is the unbiased priority.  Signal no error.  */
9645         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
9646 #else
9647         /* Return value is a biased priority to avoid negative numbers.  */
9648         ret = 20 - ret;
9649 #endif
9650         return ret;
9651     case TARGET_NR_setpriority:
9652         return get_errno(setpriority(arg1, arg2, arg3));
9653 #ifdef TARGET_NR_statfs
9654     case TARGET_NR_statfs:
9655         if (!(p = lock_user_string(arg1))) {
9656             return -TARGET_EFAULT;
9657         }
9658         ret = get_errno(statfs(path(p), &stfs));
9659         unlock_user(p, arg1, 0);
9660     convert_statfs:
9661         if (!is_error(ret)) {
9662             struct target_statfs *target_stfs;
9663 
9664             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
9665                 return -TARGET_EFAULT;
9666             __put_user(stfs.f_type, &target_stfs->f_type);
9667             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9668             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9669             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9670             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9671             __put_user(stfs.f_files, &target_stfs->f_files);
9672             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9673             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9674             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9675             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9676             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9677 #ifdef _STATFS_F_FLAGS
9678             __put_user(stfs.f_flags, &target_stfs->f_flags);
9679 #else
9680             __put_user(0, &target_stfs->f_flags);
9681 #endif
9682             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9683             unlock_user_struct(target_stfs, arg2, 1);
9684         }
9685         return ret;
9686 #endif
9687 #ifdef TARGET_NR_fstatfs
9688     case TARGET_NR_fstatfs:
9689         ret = get_errno(fstatfs(arg1, &stfs));
9690         goto convert_statfs;
9691 #endif
9692 #ifdef TARGET_NR_statfs64
9693     case TARGET_NR_statfs64:
9694         if (!(p = lock_user_string(arg1))) {
9695             return -TARGET_EFAULT;
9696         }
9697         ret = get_errno(statfs(path(p), &stfs));
9698         unlock_user(p, arg1, 0);
9699     convert_statfs64:
9700         if (!is_error(ret)) {
9701             struct target_statfs64 *target_stfs;
9702 
9703             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
9704                 return -TARGET_EFAULT;
9705             __put_user(stfs.f_type, &target_stfs->f_type);
9706             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
9707             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
9708             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
9709             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
9710             __put_user(stfs.f_files, &target_stfs->f_files);
9711             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
9712             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
9713             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
9714             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
9715             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
9716 #ifdef _STATFS_F_FLAGS
9717             __put_user(stfs.f_flags, &target_stfs->f_flags);
9718 #else
9719             __put_user(0, &target_stfs->f_flags);
9720 #endif
9721             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
9722             unlock_user_struct(target_stfs, arg3, 1);
9723         }
9724         return ret;
9725     case TARGET_NR_fstatfs64:
9726         ret = get_errno(fstatfs(arg1, &stfs));
9727         goto convert_statfs64;
9728 #endif
9729 #ifdef TARGET_NR_socketcall
9730     case TARGET_NR_socketcall:
9731         return do_socketcall(arg1, arg2);
9732 #endif
9733 #ifdef TARGET_NR_accept
9734     case TARGET_NR_accept:
9735         return do_accept4(arg1, arg2, arg3, 0);
9736 #endif
9737 #ifdef TARGET_NR_accept4
9738     case TARGET_NR_accept4:
9739         return do_accept4(arg1, arg2, arg3, arg4);
9740 #endif
9741 #ifdef TARGET_NR_bind
9742     case TARGET_NR_bind:
9743         return do_bind(arg1, arg2, arg3);
9744 #endif
9745 #ifdef TARGET_NR_connect
9746     case TARGET_NR_connect:
9747         return do_connect(arg1, arg2, arg3);
9748 #endif
9749 #ifdef TARGET_NR_getpeername
9750     case TARGET_NR_getpeername:
9751         return do_getpeername(arg1, arg2, arg3);
9752 #endif
9753 #ifdef TARGET_NR_getsockname
9754     case TARGET_NR_getsockname:
9755         return do_getsockname(arg1, arg2, arg3);
9756 #endif
9757 #ifdef TARGET_NR_getsockopt
9758     case TARGET_NR_getsockopt:
9759         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
9760 #endif
9761 #ifdef TARGET_NR_listen
9762     case TARGET_NR_listen:
9763         return get_errno(listen(arg1, arg2));
9764 #endif
9765 #ifdef TARGET_NR_recv
9766     case TARGET_NR_recv:
9767         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
9768 #endif
9769 #ifdef TARGET_NR_recvfrom
9770     case TARGET_NR_recvfrom:
9771         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
9772 #endif
9773 #ifdef TARGET_NR_recvmsg
9774     case TARGET_NR_recvmsg:
9775         return do_sendrecvmsg(arg1, arg2, arg3, 0);
9776 #endif
9777 #ifdef TARGET_NR_send
9778     case TARGET_NR_send:
9779         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
9780 #endif
9781 #ifdef TARGET_NR_sendmsg
9782     case TARGET_NR_sendmsg:
9783         return do_sendrecvmsg(arg1, arg2, arg3, 1);
9784 #endif
9785 #ifdef TARGET_NR_sendmmsg
9786     case TARGET_NR_sendmmsg:
9787         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
9788 #endif
9789 #ifdef TARGET_NR_recvmmsg
9790     case TARGET_NR_recvmmsg:
9791         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
9792 #endif
9793 #ifdef TARGET_NR_sendto
9794     case TARGET_NR_sendto:
9795         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
9796 #endif
9797 #ifdef TARGET_NR_shutdown
9798     case TARGET_NR_shutdown:
9799         return get_errno(shutdown(arg1, arg2));
9800 #endif
9801 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
9802     case TARGET_NR_getrandom:
9803         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
9804         if (!p) {
9805             return -TARGET_EFAULT;
9806         }
9807         ret = get_errno(getrandom(p, arg2, arg3));
9808         unlock_user(p, arg1, ret);
9809         return ret;
9810 #endif
9811 #ifdef TARGET_NR_socket
9812     case TARGET_NR_socket:
9813         return do_socket(arg1, arg2, arg3);
9814 #endif
9815 #ifdef TARGET_NR_socketpair
9816     case TARGET_NR_socketpair:
9817         return do_socketpair(arg1, arg2, arg3, arg4);
9818 #endif
9819 #ifdef TARGET_NR_setsockopt
9820     case TARGET_NR_setsockopt:
9821         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
9822 #endif
9823 #if defined(TARGET_NR_syslog)
9824     case TARGET_NR_syslog:
9825         {
9826             int len = arg2;
9827 
9828             switch (arg1) {
9829             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
9830             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
9831             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
9832             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
9833             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
9834             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
9835             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
9836             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
9837                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
9838             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
9839             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
9840             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
9841                 {
9842                     if (len < 0) {
9843                         return -TARGET_EINVAL;
9844                     }
9845                     if (len == 0) {
9846                         return 0;
9847                     }
9848                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9849                     if (!p) {
9850                         return -TARGET_EFAULT;
9851                     }
9852                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
9853                     unlock_user(p, arg2, arg3);
9854                 }
9855                 return ret;
9856             default:
9857                 return -TARGET_EINVAL;
9858             }
9859         }
9860         break;
9861 #endif
9862     case TARGET_NR_setitimer:
9863         {
9864             struct itimerval value, ovalue, *pvalue;
9865 
9866             if (arg2) {
9867                 pvalue = &value;
9868                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
9869                     || copy_from_user_timeval(&pvalue->it_value,
9870                                               arg2 + sizeof(struct target_timeval)))
9871                     return -TARGET_EFAULT;
9872             } else {
9873                 pvalue = NULL;
9874             }
9875             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
9876             if (!is_error(ret) && arg3) {
9877                 if (copy_to_user_timeval(arg3,
9878                                          &ovalue.it_interval)
9879                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
9880                                             &ovalue.it_value))
9881                     return -TARGET_EFAULT;
9882             }
9883         }
9884         return ret;
9885     case TARGET_NR_getitimer:
9886         {
9887             struct itimerval value;
9888 
9889             ret = get_errno(getitimer(arg1, &value));
9890             if (!is_error(ret) && arg2) {
9891                 if (copy_to_user_timeval(arg2,
9892                                          &value.it_interval)
9893                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
9894                                             &value.it_value))
9895                     return -TARGET_EFAULT;
9896             }
9897         }
9898         return ret;
9899 #ifdef TARGET_NR_stat
9900     case TARGET_NR_stat:
9901         if (!(p = lock_user_string(arg1))) {
9902             return -TARGET_EFAULT;
9903         }
9904         ret = get_errno(stat(path(p), &st));
9905         unlock_user(p, arg1, 0);
9906         goto do_stat;
9907 #endif
9908 #ifdef TARGET_NR_lstat
9909     case TARGET_NR_lstat:
9910         if (!(p = lock_user_string(arg1))) {
9911             return -TARGET_EFAULT;
9912         }
9913         ret = get_errno(lstat(path(p), &st));
9914         unlock_user(p, arg1, 0);
9915         goto do_stat;
9916 #endif
9917 #ifdef TARGET_NR_fstat
9918     case TARGET_NR_fstat:
9919         {
9920             ret = get_errno(fstat(arg1, &st));
9921 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
9922         do_stat:
9923 #endif
9924             if (!is_error(ret)) {
9925                 struct target_stat *target_st;
9926 
9927                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
9928                     return -TARGET_EFAULT;
9929                 memset(target_st, 0, sizeof(*target_st));
9930                 __put_user(st.st_dev, &target_st->st_dev);
9931                 __put_user(st.st_ino, &target_st->st_ino);
9932                 __put_user(st.st_mode, &target_st->st_mode);
9933                 __put_user(st.st_uid, &target_st->st_uid);
9934                 __put_user(st.st_gid, &target_st->st_gid);
9935                 __put_user(st.st_nlink, &target_st->st_nlink);
9936                 __put_user(st.st_rdev, &target_st->st_rdev);
9937                 __put_user(st.st_size, &target_st->st_size);
9938                 __put_user(st.st_blksize, &target_st->st_blksize);
9939                 __put_user(st.st_blocks, &target_st->st_blocks);
9940                 __put_user(st.st_atime, &target_st->target_st_atime);
9941                 __put_user(st.st_mtime, &target_st->target_st_mtime);
9942                 __put_user(st.st_ctime, &target_st->target_st_ctime);
9943 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
9944                 __put_user(st.st_atim.tv_nsec,
9945                            &target_st->target_st_atime_nsec);
9946                 __put_user(st.st_mtim.tv_nsec,
9947                            &target_st->target_st_mtime_nsec);
9948                 __put_user(st.st_ctim.tv_nsec,
9949                            &target_st->target_st_ctime_nsec);
9950 #endif
9951                 unlock_user_struct(target_st, arg2, 1);
9952             }
9953         }
9954         return ret;
9955 #endif
9956     case TARGET_NR_vhangup:
9957         return get_errno(vhangup());
9958 #ifdef TARGET_NR_syscall
9959     case TARGET_NR_syscall:
9960         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
9961                           arg6, arg7, arg8, 0);
9962 #endif
9963 #if defined(TARGET_NR_wait4)
9964     case TARGET_NR_wait4:
9965         {
9966             int status;
9967             abi_long status_ptr = arg2;
9968             struct rusage rusage, *rusage_ptr;
9969             abi_ulong target_rusage = arg4;
9970             abi_long rusage_err;
9971             if (target_rusage)
9972                 rusage_ptr = &rusage;
9973             else
9974                 rusage_ptr = NULL;
9975             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
9976             if (!is_error(ret)) {
9977                 if (status_ptr && ret) {
9978                     status = host_to_target_waitstatus(status);
9979                     if (put_user_s32(status, status_ptr))
9980                         return -TARGET_EFAULT;
9981                 }
9982                 if (target_rusage) {
9983                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
9984                     if (rusage_err) {
9985                         ret = rusage_err;
9986                     }
9987                 }
9988             }
9989         }
9990         return ret;
9991 #endif
9992 #ifdef TARGET_NR_swapoff
9993     case TARGET_NR_swapoff:
9994         if (!(p = lock_user_string(arg1)))
9995             return -TARGET_EFAULT;
9996         ret = get_errno(swapoff(p));
9997         unlock_user(p, arg1, 0);
9998         return ret;
9999 #endif
10000     case TARGET_NR_sysinfo:
10001         {
10002             struct target_sysinfo *target_value;
10003             struct sysinfo value;
10004             ret = get_errno(sysinfo(&value));
10005             if (!is_error(ret) && arg1)
10006             {
10007                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10008                     return -TARGET_EFAULT;
10009                 __put_user(value.uptime, &target_value->uptime);
10010                 __put_user(value.loads[0], &target_value->loads[0]);
10011                 __put_user(value.loads[1], &target_value->loads[1]);
10012                 __put_user(value.loads[2], &target_value->loads[2]);
10013                 __put_user(value.totalram, &target_value->totalram);
10014                 __put_user(value.freeram, &target_value->freeram);
10015                 __put_user(value.sharedram, &target_value->sharedram);
10016                 __put_user(value.bufferram, &target_value->bufferram);
10017                 __put_user(value.totalswap, &target_value->totalswap);
10018                 __put_user(value.freeswap, &target_value->freeswap);
10019                 __put_user(value.procs, &target_value->procs);
10020                 __put_user(value.totalhigh, &target_value->totalhigh);
10021                 __put_user(value.freehigh, &target_value->freehigh);
10022                 __put_user(value.mem_unit, &target_value->mem_unit);
10023                 unlock_user_struct(target_value, arg1, 1);
10024             }
10025         }
10026         return ret;
10027 #ifdef TARGET_NR_ipc
10028     case TARGET_NR_ipc:
10029         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10030 #endif
10031 #ifdef TARGET_NR_semget
10032     case TARGET_NR_semget:
10033         return get_errno(semget(arg1, arg2, arg3));
10034 #endif
10035 #ifdef TARGET_NR_semop
10036     case TARGET_NR_semop:
10037         return do_semtimedop(arg1, arg2, arg3, 0, false);
10038 #endif
10039 #ifdef TARGET_NR_semtimedop
10040     case TARGET_NR_semtimedop:
10041         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10042 #endif
10043 #ifdef TARGET_NR_semtimedop_time64
10044     case TARGET_NR_semtimedop_time64:
10045         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10046 #endif
10047 #ifdef TARGET_NR_semctl
10048     case TARGET_NR_semctl:
10049         return do_semctl(arg1, arg2, arg3, arg4);
10050 #endif
10051 #ifdef TARGET_NR_msgctl
10052     case TARGET_NR_msgctl:
10053         return do_msgctl(arg1, arg2, arg3);
10054 #endif
10055 #ifdef TARGET_NR_msgget
10056     case TARGET_NR_msgget:
10057         return get_errno(msgget(arg1, arg2));
10058 #endif
10059 #ifdef TARGET_NR_msgrcv
10060     case TARGET_NR_msgrcv:
10061         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10062 #endif
10063 #ifdef TARGET_NR_msgsnd
10064     case TARGET_NR_msgsnd:
10065         return do_msgsnd(arg1, arg2, arg3, arg4);
10066 #endif
10067 #ifdef TARGET_NR_shmget
10068     case TARGET_NR_shmget:
10069         return get_errno(shmget(arg1, arg2, arg3));
10070 #endif
10071 #ifdef TARGET_NR_shmctl
10072     case TARGET_NR_shmctl:
10073         return do_shmctl(arg1, arg2, arg3);
10074 #endif
10075 #ifdef TARGET_NR_shmat
10076     case TARGET_NR_shmat:
10077         return do_shmat(cpu_env, arg1, arg2, arg3);
10078 #endif
10079 #ifdef TARGET_NR_shmdt
10080     case TARGET_NR_shmdt:
10081         return do_shmdt(arg1);
10082 #endif
10083     case TARGET_NR_fsync:
10084         return get_errno(fsync(arg1));
10085     case TARGET_NR_clone:
10086         /* Linux manages to have three different orderings for its
10087          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10088          * match the kernel's CONFIG_CLONE_* settings.
10089          * Microblaze is further special in that it uses a sixth
10090          * implicit argument to clone for the TLS pointer.
10091          */
10092 #if defined(TARGET_MICROBLAZE)
10093         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10094 #elif defined(TARGET_CLONE_BACKWARDS)
10095         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10096 #elif defined(TARGET_CLONE_BACKWARDS2)
10097         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10098 #else
10099         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10100 #endif
10101         return ret;
10102 #ifdef __NR_exit_group
10103         /* new thread calls */
10104     case TARGET_NR_exit_group:
10105         preexit_cleanup(cpu_env, arg1);
10106         return get_errno(exit_group(arg1));
10107 #endif
10108     case TARGET_NR_setdomainname:
10109         if (!(p = lock_user_string(arg1)))
10110             return -TARGET_EFAULT;
10111         ret = get_errno(setdomainname(p, arg2));
10112         unlock_user(p, arg1, 0);
10113         return ret;
10114     case TARGET_NR_uname:
10115         /* no need to transcode because we use the linux syscall */
10116         {
10117             struct new_utsname * buf;
10118 
10119             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10120                 return -TARGET_EFAULT;
10121             ret = get_errno(sys_uname(buf));
10122             if (!is_error(ret)) {
10123                 /* Overwrite the native machine name with whatever is being
10124                    emulated. */
10125                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10126                           sizeof(buf->machine));
10127                 /* Allow the user to override the reported release.  */
10128                 if (qemu_uname_release && *qemu_uname_release) {
10129                     g_strlcpy(buf->release, qemu_uname_release,
10130                               sizeof(buf->release));
10131                 }
10132             }
10133             unlock_user_struct(buf, arg1, 1);
10134         }
10135         return ret;
10136 #ifdef TARGET_I386
10137     case TARGET_NR_modify_ldt:
10138         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10139 #if !defined(TARGET_X86_64)
10140     case TARGET_NR_vm86:
10141         return do_vm86(cpu_env, arg1, arg2);
10142 #endif
10143 #endif
10144 #if defined(TARGET_NR_adjtimex)
10145     case TARGET_NR_adjtimex:
10146         {
10147             struct timex host_buf;
10148 
10149             if (target_to_host_timex(&host_buf, arg1) != 0) {
10150                 return -TARGET_EFAULT;
10151             }
10152             ret = get_errno(adjtimex(&host_buf));
10153             if (!is_error(ret)) {
10154                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10155                     return -TARGET_EFAULT;
10156                 }
10157             }
10158         }
10159         return ret;
10160 #endif
10161 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10162     case TARGET_NR_clock_adjtime:
10163         {
10164             struct timex htx, *phtx = &htx;
10165 
10166             if (target_to_host_timex(phtx, arg2) != 0) {
10167                 return -TARGET_EFAULT;
10168             }
10169             ret = get_errno(clock_adjtime(arg1, phtx));
10170             if (!is_error(ret) && phtx) {
10171                 if (host_to_target_timex(arg2, phtx) != 0) {
10172                     return -TARGET_EFAULT;
10173                 }
10174             }
10175         }
10176         return ret;
10177 #endif
10178 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10179     case TARGET_NR_clock_adjtime64:
10180         {
10181             struct timex htx;
10182 
10183             if (target_to_host_timex64(&htx, arg2) != 0) {
10184                 return -TARGET_EFAULT;
10185             }
10186             ret = get_errno(clock_adjtime(arg1, &htx));
10187             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10188                     return -TARGET_EFAULT;
10189             }
10190         }
10191         return ret;
10192 #endif
10193     case TARGET_NR_getpgid:
10194         return get_errno(getpgid(arg1));
10195     case TARGET_NR_fchdir:
10196         return get_errno(fchdir(arg1));
10197     case TARGET_NR_personality:
10198         return get_errno(personality(arg1));
10199 #ifdef TARGET_NR__llseek /* Not on alpha */
10200     case TARGET_NR__llseek:
10201         {
10202             int64_t res;
10203 #if !defined(__NR_llseek)
10204             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10205             if (res == -1) {
10206                 ret = get_errno(res);
10207             } else {
10208                 ret = 0;
10209             }
10210 #else
10211             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10212 #endif
10213             if ((ret == 0) && put_user_s64(res, arg4)) {
10214                 return -TARGET_EFAULT;
10215             }
10216         }
10217         return ret;
10218 #endif
10219 #ifdef TARGET_NR_getdents
10220     case TARGET_NR_getdents:
10221 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
10222 #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
10223         {
10224             struct target_dirent *target_dirp;
10225             struct linux_dirent *dirp;
10226             abi_long count = arg3;
10227 
10228             dirp = g_try_malloc(count);
10229             if (!dirp) {
10230                 return -TARGET_ENOMEM;
10231             }
10232 
10233             ret = get_errno(sys_getdents(arg1, dirp, count));
10234             if (!is_error(ret)) {
10235                 struct linux_dirent *de;
10236 		struct target_dirent *tde;
10237                 int len = ret;
10238                 int reclen, treclen;
10239 		int count1, tnamelen;
10240 
10241 		count1 = 0;
10242                 de = dirp;
10243                 if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10244                     return -TARGET_EFAULT;
10245 		tde = target_dirp;
10246                 while (len > 0) {
10247                     reclen = de->d_reclen;
10248                     tnamelen = reclen - offsetof(struct linux_dirent, d_name);
10249                     assert(tnamelen >= 0);
10250                     treclen = tnamelen + offsetof(struct target_dirent, d_name);
10251                     assert(count1 + treclen <= count);
10252                     tde->d_reclen = tswap16(treclen);
10253                     tde->d_ino = tswapal(de->d_ino);
10254                     tde->d_off = tswapal(de->d_off);
10255                     memcpy(tde->d_name, de->d_name, tnamelen);
10256                     de = (struct linux_dirent *)((char *)de + reclen);
10257                     len -= reclen;
10258                     tde = (struct target_dirent *)((char *)tde + treclen);
10259 		    count1 += treclen;
10260                 }
10261 		ret = count1;
10262                 unlock_user(target_dirp, arg2, ret);
10263             }
10264             g_free(dirp);
10265         }
10266 #else
10267         {
10268             struct linux_dirent *dirp;
10269             abi_long count = arg3;
10270 
10271             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10272                 return -TARGET_EFAULT;
10273             ret = get_errno(sys_getdents(arg1, dirp, count));
10274             if (!is_error(ret)) {
10275                 struct linux_dirent *de;
10276                 int len = ret;
10277                 int reclen;
10278                 de = dirp;
10279                 while (len > 0) {
10280                     reclen = de->d_reclen;
10281                     if (reclen > len)
10282                         break;
10283                     de->d_reclen = tswap16(reclen);
10284                     tswapls(&de->d_ino);
10285                     tswapls(&de->d_off);
10286                     de = (struct linux_dirent *)((char *)de + reclen);
10287                     len -= reclen;
10288                 }
10289             }
10290             unlock_user(dirp, arg2, ret);
10291         }
10292 #endif
10293 #else
10294         /* Implement getdents in terms of getdents64 */
10295         {
10296             struct linux_dirent64 *dirp;
10297             abi_long count = arg3;
10298 
10299             dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
10300             if (!dirp) {
10301                 return -TARGET_EFAULT;
10302             }
10303             ret = get_errno(sys_getdents64(arg1, dirp, count));
10304             if (!is_error(ret)) {
10305                 /* Convert the dirent64 structs to target dirent.  We do this
10306                  * in-place, since we can guarantee that a target_dirent is no
10307                  * larger than a dirent64; however this means we have to be
10308                  * careful to read everything before writing in the new format.
10309                  */
10310                 struct linux_dirent64 *de;
10311                 struct target_dirent *tde;
10312                 int len = ret;
10313                 int tlen = 0;
10314 
10315                 de = dirp;
10316                 tde = (struct target_dirent *)dirp;
10317                 while (len > 0) {
10318                     int namelen, treclen;
10319                     int reclen = de->d_reclen;
10320                     uint64_t ino = de->d_ino;
10321                     int64_t off = de->d_off;
10322                     uint8_t type = de->d_type;
10323 
10324                     namelen = strlen(de->d_name);
10325                     treclen = offsetof(struct target_dirent, d_name)
10326                         + namelen + 2;
10327                     treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
10328 
10329                     memmove(tde->d_name, de->d_name, namelen + 1);
10330                     tde->d_ino = tswapal(ino);
10331                     tde->d_off = tswapal(off);
10332                     tde->d_reclen = tswap16(treclen);
10333                     /* The target_dirent type is in what was formerly a padding
10334                      * byte at the end of the structure:
10335                      */
10336                     *(((char *)tde) + treclen - 1) = type;
10337 
10338                     de = (struct linux_dirent64 *)((char *)de + reclen);
10339                     tde = (struct target_dirent *)((char *)tde + treclen);
10340                     len -= reclen;
10341                     tlen += treclen;
10342                 }
10343                 ret = tlen;
10344             }
10345             unlock_user(dirp, arg2, ret);
10346         }
10347 #endif
10348         return ret;
10349 #endif /* TARGET_NR_getdents */
10350 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10351     case TARGET_NR_getdents64:
10352         {
10353             struct linux_dirent64 *dirp;
10354             abi_long count = arg3;
10355             if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
10356                 return -TARGET_EFAULT;
10357             ret = get_errno(sys_getdents64(arg1, dirp, count));
10358             if (!is_error(ret)) {
10359                 struct linux_dirent64 *de;
10360                 int len = ret;
10361                 int reclen;
10362                 de = dirp;
10363                 while (len > 0) {
10364                     reclen = de->d_reclen;
10365                     if (reclen > len)
10366                         break;
10367                     de->d_reclen = tswap16(reclen);
10368                     tswap64s((uint64_t *)&de->d_ino);
10369                     tswap64s((uint64_t *)&de->d_off);
10370                     de = (struct linux_dirent64 *)((char *)de + reclen);
10371                     len -= reclen;
10372                 }
10373             }
10374             unlock_user(dirp, arg2, ret);
10375         }
10376         return ret;
10377 #endif /* TARGET_NR_getdents64 */
10378 #if defined(TARGET_NR__newselect)
10379     case TARGET_NR__newselect:
10380         return do_select(arg1, arg2, arg3, arg4, arg5);
10381 #endif
10382 #ifdef TARGET_NR_poll
10383     case TARGET_NR_poll:
10384         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10385 #endif
10386 #ifdef TARGET_NR_ppoll
10387     case TARGET_NR_ppoll:
10388         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10389 #endif
10390 #ifdef TARGET_NR_ppoll_time64
10391     case TARGET_NR_ppoll_time64:
10392         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10393 #endif
10394     case TARGET_NR_flock:
10395         /* NOTE: the flock constant seems to be the same for every
10396            Linux platform */
10397         return get_errno(safe_flock(arg1, arg2));
10398     case TARGET_NR_readv:
10399         {
10400             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10401             if (vec != NULL) {
10402                 ret = get_errno(safe_readv(arg1, vec, arg3));
10403                 unlock_iovec(vec, arg2, arg3, 1);
10404             } else {
10405                 ret = -host_to_target_errno(errno);
10406             }
10407         }
10408         return ret;
10409     case TARGET_NR_writev:
10410         {
10411             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10412             if (vec != NULL) {
10413                 ret = get_errno(safe_writev(arg1, vec, arg3));
10414                 unlock_iovec(vec, arg2, arg3, 0);
10415             } else {
10416                 ret = -host_to_target_errno(errno);
10417             }
10418         }
10419         return ret;
10420 #if defined(TARGET_NR_preadv)
10421     case TARGET_NR_preadv:
10422         {
10423             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10424             if (vec != NULL) {
10425                 unsigned long low, high;
10426 
10427                 target_to_host_low_high(arg4, arg5, &low, &high);
10428                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10429                 unlock_iovec(vec, arg2, arg3, 1);
10430             } else {
10431                 ret = -host_to_target_errno(errno);
10432            }
10433         }
10434         return ret;
10435 #endif
10436 #if defined(TARGET_NR_pwritev)
10437     case TARGET_NR_pwritev:
10438         {
10439             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10440             if (vec != NULL) {
10441                 unsigned long low, high;
10442 
10443                 target_to_host_low_high(arg4, arg5, &low, &high);
10444                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10445                 unlock_iovec(vec, arg2, arg3, 0);
10446             } else {
10447                 ret = -host_to_target_errno(errno);
10448            }
10449         }
10450         return ret;
10451 #endif
10452     case TARGET_NR_getsid:
10453         return get_errno(getsid(arg1));
10454 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10455     case TARGET_NR_fdatasync:
10456         return get_errno(fdatasync(arg1));
10457 #endif
10458     case TARGET_NR_sched_getaffinity:
10459         {
10460             unsigned int mask_size;
10461             unsigned long *mask;
10462 
10463             /*
10464              * sched_getaffinity needs multiples of ulong, so need to take
10465              * care of mismatches between target ulong and host ulong sizes.
10466              */
10467             if (arg2 & (sizeof(abi_ulong) - 1)) {
10468                 return -TARGET_EINVAL;
10469             }
10470             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10471 
10472             mask = alloca(mask_size);
10473             memset(mask, 0, mask_size);
10474             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10475 
10476             if (!is_error(ret)) {
10477                 if (ret > arg2) {
10478                     /* More data returned than the caller's buffer will fit.
10479                      * This only happens if sizeof(abi_long) < sizeof(long)
10480                      * and the caller passed us a buffer holding an odd number
10481                      * of abi_longs. If the host kernel is actually using the
10482                      * extra 4 bytes then fail EINVAL; otherwise we can just
10483                      * ignore them and only copy the interesting part.
10484                      */
10485                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10486                     if (numcpus > arg2 * 8) {
10487                         return -TARGET_EINVAL;
10488                     }
10489                     ret = arg2;
10490                 }
10491 
10492                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10493                     return -TARGET_EFAULT;
10494                 }
10495             }
10496         }
10497         return ret;
10498     case TARGET_NR_sched_setaffinity:
10499         {
10500             unsigned int mask_size;
10501             unsigned long *mask;
10502 
10503             /*
10504              * sched_setaffinity needs multiples of ulong, so need to take
10505              * care of mismatches between target ulong and host ulong sizes.
10506              */
10507             if (arg2 & (sizeof(abi_ulong) - 1)) {
10508                 return -TARGET_EINVAL;
10509             }
10510             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10511             mask = alloca(mask_size);
10512 
10513             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10514             if (ret) {
10515                 return ret;
10516             }
10517 
10518             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10519         }
10520     case TARGET_NR_getcpu:
10521         {
10522             unsigned cpu, node;
10523             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10524                                        arg2 ? &node : NULL,
10525                                        NULL));
10526             if (is_error(ret)) {
10527                 return ret;
10528             }
10529             if (arg1 && put_user_u32(cpu, arg1)) {
10530                 return -TARGET_EFAULT;
10531             }
10532             if (arg2 && put_user_u32(node, arg2)) {
10533                 return -TARGET_EFAULT;
10534             }
10535         }
10536         return ret;
10537     case TARGET_NR_sched_setparam:
10538         {
10539             struct sched_param *target_schp;
10540             struct sched_param schp;
10541 
10542             if (arg2 == 0) {
10543                 return -TARGET_EINVAL;
10544             }
10545             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
10546                 return -TARGET_EFAULT;
10547             schp.sched_priority = tswap32(target_schp->sched_priority);
10548             unlock_user_struct(target_schp, arg2, 0);
10549             return get_errno(sched_setparam(arg1, &schp));
10550         }
10551     case TARGET_NR_sched_getparam:
10552         {
10553             struct sched_param *target_schp;
10554             struct sched_param schp;
10555 
10556             if (arg2 == 0) {
10557                 return -TARGET_EINVAL;
10558             }
10559             ret = get_errno(sched_getparam(arg1, &schp));
10560             if (!is_error(ret)) {
10561                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
10562                     return -TARGET_EFAULT;
10563                 target_schp->sched_priority = tswap32(schp.sched_priority);
10564                 unlock_user_struct(target_schp, arg2, 1);
10565             }
10566         }
10567         return ret;
10568     case TARGET_NR_sched_setscheduler:
10569         {
10570             struct sched_param *target_schp;
10571             struct sched_param schp;
10572             if (arg3 == 0) {
10573                 return -TARGET_EINVAL;
10574             }
10575             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
10576                 return -TARGET_EFAULT;
10577             schp.sched_priority = tswap32(target_schp->sched_priority);
10578             unlock_user_struct(target_schp, arg3, 0);
10579             return get_errno(sched_setscheduler(arg1, arg2, &schp));
10580         }
10581     case TARGET_NR_sched_getscheduler:
10582         return get_errno(sched_getscheduler(arg1));
10583     case TARGET_NR_sched_yield:
10584         return get_errno(sched_yield());
10585     case TARGET_NR_sched_get_priority_max:
10586         return get_errno(sched_get_priority_max(arg1));
10587     case TARGET_NR_sched_get_priority_min:
10588         return get_errno(sched_get_priority_min(arg1));
10589 #ifdef TARGET_NR_sched_rr_get_interval
10590     case TARGET_NR_sched_rr_get_interval:
10591         {
10592             struct timespec ts;
10593             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10594             if (!is_error(ret)) {
10595                 ret = host_to_target_timespec(arg2, &ts);
10596             }
10597         }
10598         return ret;
10599 #endif
10600 #ifdef TARGET_NR_sched_rr_get_interval_time64
10601     case TARGET_NR_sched_rr_get_interval_time64:
10602         {
10603             struct timespec ts;
10604             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10605             if (!is_error(ret)) {
10606                 ret = host_to_target_timespec64(arg2, &ts);
10607             }
10608         }
10609         return ret;
10610 #endif
10611 #if defined(TARGET_NR_nanosleep)
10612     case TARGET_NR_nanosleep:
10613         {
10614             struct timespec req, rem;
10615             target_to_host_timespec(&req, arg1);
10616             ret = get_errno(safe_nanosleep(&req, &rem));
10617             if (is_error(ret) && arg2) {
10618                 host_to_target_timespec(arg2, &rem);
10619             }
10620         }
10621         return ret;
10622 #endif
10623     case TARGET_NR_prctl:
10624         switch (arg1) {
10625         case PR_GET_PDEATHSIG:
10626         {
10627             int deathsig;
10628             ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
10629             if (!is_error(ret) && arg2
10630                 && put_user_s32(deathsig, arg2)) {
10631                 return -TARGET_EFAULT;
10632             }
10633             return ret;
10634         }
10635 #ifdef PR_GET_NAME
10636         case PR_GET_NAME:
10637         {
10638             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
10639             if (!name) {
10640                 return -TARGET_EFAULT;
10641             }
10642             ret = get_errno(prctl(arg1, (unsigned long)name,
10643                                   arg3, arg4, arg5));
10644             unlock_user(name, arg2, 16);
10645             return ret;
10646         }
10647         case PR_SET_NAME:
10648         {
10649             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
10650             if (!name) {
10651                 return -TARGET_EFAULT;
10652             }
10653             ret = get_errno(prctl(arg1, (unsigned long)name,
10654                                   arg3, arg4, arg5));
10655             unlock_user(name, arg2, 0);
10656             return ret;
10657         }
10658 #endif
10659 #ifdef TARGET_MIPS
10660         case TARGET_PR_GET_FP_MODE:
10661         {
10662             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10663             ret = 0;
10664             if (env->CP0_Status & (1 << CP0St_FR)) {
10665                 ret |= TARGET_PR_FP_MODE_FR;
10666             }
10667             if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
10668                 ret |= TARGET_PR_FP_MODE_FRE;
10669             }
10670             return ret;
10671         }
10672         case TARGET_PR_SET_FP_MODE:
10673         {
10674             CPUMIPSState *env = ((CPUMIPSState *)cpu_env);
10675             bool old_fr = env->CP0_Status & (1 << CP0St_FR);
10676             bool old_fre = env->CP0_Config5 & (1 << CP0C5_FRE);
10677             bool new_fr = arg2 & TARGET_PR_FP_MODE_FR;
10678             bool new_fre = arg2 & TARGET_PR_FP_MODE_FRE;
10679 
10680             const unsigned int known_bits = TARGET_PR_FP_MODE_FR |
10681                                             TARGET_PR_FP_MODE_FRE;
10682 
10683             /* If nothing to change, return right away, successfully.  */
10684             if (old_fr == new_fr && old_fre == new_fre) {
10685                 return 0;
10686             }
10687             /* Check the value is valid */
10688             if (arg2 & ~known_bits) {
10689                 return -TARGET_EOPNOTSUPP;
10690             }
10691             /* Setting FRE without FR is not supported.  */
10692             if (new_fre && !new_fr) {
10693                 return -TARGET_EOPNOTSUPP;
10694             }
10695             if (new_fr && !(env->active_fpu.fcr0 & (1 << FCR0_F64))) {
10696                 /* FR1 is not supported */
10697                 return -TARGET_EOPNOTSUPP;
10698             }
10699             if (!new_fr && (env->active_fpu.fcr0 & (1 << FCR0_F64))
10700                 && !(env->CP0_Status_rw_bitmask & (1 << CP0St_FR))) {
10701                 /* cannot set FR=0 */
10702                 return -TARGET_EOPNOTSUPP;
10703             }
10704             if (new_fre && !(env->active_fpu.fcr0 & (1 << FCR0_FREP))) {
10705                 /* Cannot set FRE=1 */
10706                 return -TARGET_EOPNOTSUPP;
10707             }
10708 
10709             int i;
10710             fpr_t *fpr = env->active_fpu.fpr;
10711             for (i = 0; i < 32 ; i += 2) {
10712                 if (!old_fr && new_fr) {
10713                     fpr[i].w[!FP_ENDIAN_IDX] = fpr[i + 1].w[FP_ENDIAN_IDX];
10714                 } else if (old_fr && !new_fr) {
10715                     fpr[i + 1].w[FP_ENDIAN_IDX] = fpr[i].w[!FP_ENDIAN_IDX];
10716                 }
10717             }
10718 
10719             if (new_fr) {
10720                 env->CP0_Status |= (1 << CP0St_FR);
10721                 env->hflags |= MIPS_HFLAG_F64;
10722             } else {
10723                 env->CP0_Status &= ~(1 << CP0St_FR);
10724                 env->hflags &= ~MIPS_HFLAG_F64;
10725             }
10726             if (new_fre) {
10727                 env->CP0_Config5 |= (1 << CP0C5_FRE);
10728                 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
10729                     env->hflags |= MIPS_HFLAG_FRE;
10730                 }
10731             } else {
10732                 env->CP0_Config5 &= ~(1 << CP0C5_FRE);
10733                 env->hflags &= ~MIPS_HFLAG_FRE;
10734             }
10735 
10736             return 0;
10737         }
10738 #endif /* MIPS */
10739 #ifdef TARGET_AARCH64
10740         case TARGET_PR_SVE_SET_VL:
10741             /*
10742              * We cannot support either PR_SVE_SET_VL_ONEXEC or
10743              * PR_SVE_VL_INHERIT.  Note the kernel definition
10744              * of sve_vl_valid allows for VQ=512, i.e. VL=8192,
10745              * even though the current architectural maximum is VQ=16.
10746              */
10747             ret = -TARGET_EINVAL;
10748             if (cpu_isar_feature(aa64_sve, env_archcpu(cpu_env))
10749                 && arg2 >= 0 && arg2 <= 512 * 16 && !(arg2 & 15)) {
10750                 CPUARMState *env = cpu_env;
10751                 ARMCPU *cpu = env_archcpu(env);
10752                 uint32_t vq, old_vq;
10753 
10754                 old_vq = (env->vfp.zcr_el[1] & 0xf) + 1;
10755                 vq = MAX(arg2 / 16, 1);
10756                 vq = MIN(vq, cpu->sve_max_vq);
10757 
10758                 if (vq < old_vq) {
10759                     aarch64_sve_narrow_vq(env, vq);
10760                 }
10761                 env->vfp.zcr_el[1] = vq - 1;
10762                 arm_rebuild_hflags(env);
10763                 ret = vq * 16;
10764             }
10765             return ret;
10766         case TARGET_PR_SVE_GET_VL:
10767             ret = -TARGET_EINVAL;
10768             {
10769                 ARMCPU *cpu = env_archcpu(cpu_env);
10770                 if (cpu_isar_feature(aa64_sve, cpu)) {
10771                     ret = ((cpu->env.vfp.zcr_el[1] & 0xf) + 1) * 16;
10772                 }
10773             }
10774             return ret;
10775         case TARGET_PR_PAC_RESET_KEYS:
10776             {
10777                 CPUARMState *env = cpu_env;
10778                 ARMCPU *cpu = env_archcpu(env);
10779 
10780                 if (arg3 || arg4 || arg5) {
10781                     return -TARGET_EINVAL;
10782                 }
10783                 if (cpu_isar_feature(aa64_pauth, cpu)) {
10784                     int all = (TARGET_PR_PAC_APIAKEY | TARGET_PR_PAC_APIBKEY |
10785                                TARGET_PR_PAC_APDAKEY | TARGET_PR_PAC_APDBKEY |
10786                                TARGET_PR_PAC_APGAKEY);
10787                     int ret = 0;
10788                     Error *err = NULL;
10789 
10790                     if (arg2 == 0) {
10791                         arg2 = all;
10792                     } else if (arg2 & ~all) {
10793                         return -TARGET_EINVAL;
10794                     }
10795                     if (arg2 & TARGET_PR_PAC_APIAKEY) {
10796                         ret |= qemu_guest_getrandom(&env->keys.apia,
10797                                                     sizeof(ARMPACKey), &err);
10798                     }
10799                     if (arg2 & TARGET_PR_PAC_APIBKEY) {
10800                         ret |= qemu_guest_getrandom(&env->keys.apib,
10801                                                     sizeof(ARMPACKey), &err);
10802                     }
10803                     if (arg2 & TARGET_PR_PAC_APDAKEY) {
10804                         ret |= qemu_guest_getrandom(&env->keys.apda,
10805                                                     sizeof(ARMPACKey), &err);
10806                     }
10807                     if (arg2 & TARGET_PR_PAC_APDBKEY) {
10808                         ret |= qemu_guest_getrandom(&env->keys.apdb,
10809                                                     sizeof(ARMPACKey), &err);
10810                     }
10811                     if (arg2 & TARGET_PR_PAC_APGAKEY) {
10812                         ret |= qemu_guest_getrandom(&env->keys.apga,
10813                                                     sizeof(ARMPACKey), &err);
10814                     }
10815                     if (ret != 0) {
10816                         /*
10817                          * Some unknown failure in the crypto.  The best
10818                          * we can do is log it and fail the syscall.
10819                          * The real syscall cannot fail this way.
10820                          */
10821                         qemu_log_mask(LOG_UNIMP,
10822                                       "PR_PAC_RESET_KEYS: Crypto failure: %s",
10823                                       error_get_pretty(err));
10824                         error_free(err);
10825                         return -TARGET_EIO;
10826                     }
10827                     return 0;
10828                 }
10829             }
10830             return -TARGET_EINVAL;
10831         case TARGET_PR_SET_TAGGED_ADDR_CTRL:
10832             {
10833                 abi_ulong valid_mask = TARGET_PR_TAGGED_ADDR_ENABLE;
10834                 CPUARMState *env = cpu_env;
10835                 ARMCPU *cpu = env_archcpu(env);
10836 
10837                 if (cpu_isar_feature(aa64_mte, cpu)) {
10838                     valid_mask |= TARGET_PR_MTE_TCF_MASK;
10839                     valid_mask |= TARGET_PR_MTE_TAG_MASK;
10840                 }
10841 
10842                 if ((arg2 & ~valid_mask) || arg3 || arg4 || arg5) {
10843                     return -TARGET_EINVAL;
10844                 }
10845                 env->tagged_addr_enable = arg2 & TARGET_PR_TAGGED_ADDR_ENABLE;
10846 
10847                 if (cpu_isar_feature(aa64_mte, cpu)) {
10848                     switch (arg2 & TARGET_PR_MTE_TCF_MASK) {
10849                     case TARGET_PR_MTE_TCF_NONE:
10850                     case TARGET_PR_MTE_TCF_SYNC:
10851                     case TARGET_PR_MTE_TCF_ASYNC:
10852                         break;
10853                     default:
10854                         return -EINVAL;
10855                     }
10856 
10857                     /*
10858                      * Write PR_MTE_TCF to SCTLR_EL1[TCF0].
10859                      * Note that the syscall values are consistent with hw.
10860                      */
10861                     env->cp15.sctlr_el[1] =
10862                         deposit64(env->cp15.sctlr_el[1], 38, 2,
10863                                   arg2 >> TARGET_PR_MTE_TCF_SHIFT);
10864 
10865                     /*
10866                      * Write PR_MTE_TAG to GCR_EL1[Exclude].
10867                      * Note that the syscall uses an include mask,
10868                      * and hardware uses an exclude mask -- invert.
10869                      */
10870                     env->cp15.gcr_el1 =
10871                         deposit64(env->cp15.gcr_el1, 0, 16,
10872                                   ~arg2 >> TARGET_PR_MTE_TAG_SHIFT);
10873                     arm_rebuild_hflags(env);
10874                 }
10875                 return 0;
10876             }
10877         case TARGET_PR_GET_TAGGED_ADDR_CTRL:
10878             {
10879                 abi_long ret = 0;
10880                 CPUARMState *env = cpu_env;
10881                 ARMCPU *cpu = env_archcpu(env);
10882 
10883                 if (arg2 || arg3 || arg4 || arg5) {
10884                     return -TARGET_EINVAL;
10885                 }
10886                 if (env->tagged_addr_enable) {
10887                     ret |= TARGET_PR_TAGGED_ADDR_ENABLE;
10888                 }
10889                 if (cpu_isar_feature(aa64_mte, cpu)) {
10890                     /* See above. */
10891                     ret |= (extract64(env->cp15.sctlr_el[1], 38, 2)
10892                             << TARGET_PR_MTE_TCF_SHIFT);
10893                     ret = deposit64(ret, TARGET_PR_MTE_TAG_SHIFT, 16,
10894                                     ~env->cp15.gcr_el1);
10895                 }
10896                 return ret;
10897             }
10898 #endif /* AARCH64 */
10899         case PR_GET_SECCOMP:
10900         case PR_SET_SECCOMP:
10901             /* Disable seccomp to prevent the target disabling syscalls we
10902              * need. */
10903             return -TARGET_EINVAL;
10904         default:
10905             /* Most prctl options have no pointer arguments */
10906             return get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
10907         }
10908         break;
10909 #ifdef TARGET_NR_arch_prctl
10910     case TARGET_NR_arch_prctl:
10911         return do_arch_prctl(cpu_env, arg1, arg2);
10912 #endif
10913 #ifdef TARGET_NR_pread64
10914     case TARGET_NR_pread64:
10915         if (regpairs_aligned(cpu_env, num)) {
10916             arg4 = arg5;
10917             arg5 = arg6;
10918         }
10919         if (arg2 == 0 && arg3 == 0) {
10920             /* Special-case NULL buffer and zero length, which should succeed */
10921             p = 0;
10922         } else {
10923             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10924             if (!p) {
10925                 return -TARGET_EFAULT;
10926             }
10927         }
10928         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10929         unlock_user(p, arg2, ret);
10930         return ret;
10931     case TARGET_NR_pwrite64:
10932         if (regpairs_aligned(cpu_env, num)) {
10933             arg4 = arg5;
10934             arg5 = arg6;
10935         }
10936         if (arg2 == 0 && arg3 == 0) {
10937             /* Special-case NULL buffer and zero length, which should succeed */
10938             p = 0;
10939         } else {
10940             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10941             if (!p) {
10942                 return -TARGET_EFAULT;
10943             }
10944         }
10945         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
10946         unlock_user(p, arg2, 0);
10947         return ret;
10948 #endif
10949     case TARGET_NR_getcwd:
10950         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
10951             return -TARGET_EFAULT;
10952         ret = get_errno(sys_getcwd1(p, arg2));
10953         unlock_user(p, arg1, ret);
10954         return ret;
10955     case TARGET_NR_capget:
10956     case TARGET_NR_capset:
10957     {
10958         struct target_user_cap_header *target_header;
10959         struct target_user_cap_data *target_data = NULL;
10960         struct __user_cap_header_struct header;
10961         struct __user_cap_data_struct data[2];
10962         struct __user_cap_data_struct *dataptr = NULL;
10963         int i, target_datalen;
10964         int data_items = 1;
10965 
10966         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
10967             return -TARGET_EFAULT;
10968         }
10969         header.version = tswap32(target_header->version);
10970         header.pid = tswap32(target_header->pid);
10971 
10972         if (header.version != _LINUX_CAPABILITY_VERSION) {
10973             /* Version 2 and up takes pointer to two user_data structs */
10974             data_items = 2;
10975         }
10976 
10977         target_datalen = sizeof(*target_data) * data_items;
10978 
10979         if (arg2) {
10980             if (num == TARGET_NR_capget) {
10981                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
10982             } else {
10983                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
10984             }
10985             if (!target_data) {
10986                 unlock_user_struct(target_header, arg1, 0);
10987                 return -TARGET_EFAULT;
10988             }
10989 
10990             if (num == TARGET_NR_capset) {
10991                 for (i = 0; i < data_items; i++) {
10992                     data[i].effective = tswap32(target_data[i].effective);
10993                     data[i].permitted = tswap32(target_data[i].permitted);
10994                     data[i].inheritable = tswap32(target_data[i].inheritable);
10995                 }
10996             }
10997 
10998             dataptr = data;
10999         }
11000 
11001         if (num == TARGET_NR_capget) {
11002             ret = get_errno(capget(&header, dataptr));
11003         } else {
11004             ret = get_errno(capset(&header, dataptr));
11005         }
11006 
11007         /* The kernel always updates version for both capget and capset */
11008         target_header->version = tswap32(header.version);
11009         unlock_user_struct(target_header, arg1, 1);
11010 
11011         if (arg2) {
11012             if (num == TARGET_NR_capget) {
11013                 for (i = 0; i < data_items; i++) {
11014                     target_data[i].effective = tswap32(data[i].effective);
11015                     target_data[i].permitted = tswap32(data[i].permitted);
11016                     target_data[i].inheritable = tswap32(data[i].inheritable);
11017                 }
11018                 unlock_user(target_data, arg2, target_datalen);
11019             } else {
11020                 unlock_user(target_data, arg2, 0);
11021             }
11022         }
11023         return ret;
11024     }
11025     case TARGET_NR_sigaltstack:
11026         return do_sigaltstack(arg1, arg2, cpu_env);
11027 
11028 #ifdef CONFIG_SENDFILE
11029 #ifdef TARGET_NR_sendfile
11030     case TARGET_NR_sendfile:
11031     {
11032         off_t *offp = NULL;
11033         off_t off;
11034         if (arg3) {
11035             ret = get_user_sal(off, arg3);
11036             if (is_error(ret)) {
11037                 return ret;
11038             }
11039             offp = &off;
11040         }
11041         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11042         if (!is_error(ret) && arg3) {
11043             abi_long ret2 = put_user_sal(off, arg3);
11044             if (is_error(ret2)) {
11045                 ret = ret2;
11046             }
11047         }
11048         return ret;
11049     }
11050 #endif
11051 #ifdef TARGET_NR_sendfile64
11052     case TARGET_NR_sendfile64:
11053     {
11054         off_t *offp = NULL;
11055         off_t off;
11056         if (arg3) {
11057             ret = get_user_s64(off, arg3);
11058             if (is_error(ret)) {
11059                 return ret;
11060             }
11061             offp = &off;
11062         }
11063         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11064         if (!is_error(ret) && arg3) {
11065             abi_long ret2 = put_user_s64(off, arg3);
11066             if (is_error(ret2)) {
11067                 ret = ret2;
11068             }
11069         }
11070         return ret;
11071     }
11072 #endif
11073 #endif
11074 #ifdef TARGET_NR_vfork
11075     case TARGET_NR_vfork:
11076         return get_errno(do_fork(cpu_env,
11077                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11078                          0, 0, 0, 0));
11079 #endif
11080 #ifdef TARGET_NR_ugetrlimit
11081     case TARGET_NR_ugetrlimit:
11082     {
11083 	struct rlimit rlim;
11084 	int resource = target_to_host_resource(arg1);
11085 	ret = get_errno(getrlimit(resource, &rlim));
11086 	if (!is_error(ret)) {
11087 	    struct target_rlimit *target_rlim;
11088             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11089                 return -TARGET_EFAULT;
11090 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11091 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11092             unlock_user_struct(target_rlim, arg2, 1);
11093 	}
11094         return ret;
11095     }
11096 #endif
11097 #ifdef TARGET_NR_truncate64
11098     case TARGET_NR_truncate64:
11099         if (!(p = lock_user_string(arg1)))
11100             return -TARGET_EFAULT;
11101 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11102         unlock_user(p, arg1, 0);
11103         return ret;
11104 #endif
11105 #ifdef TARGET_NR_ftruncate64
11106     case TARGET_NR_ftruncate64:
11107         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11108 #endif
11109 #ifdef TARGET_NR_stat64
11110     case TARGET_NR_stat64:
11111         if (!(p = lock_user_string(arg1))) {
11112             return -TARGET_EFAULT;
11113         }
11114         ret = get_errno(stat(path(p), &st));
11115         unlock_user(p, arg1, 0);
11116         if (!is_error(ret))
11117             ret = host_to_target_stat64(cpu_env, arg2, &st);
11118         return ret;
11119 #endif
11120 #ifdef TARGET_NR_lstat64
11121     case TARGET_NR_lstat64:
11122         if (!(p = lock_user_string(arg1))) {
11123             return -TARGET_EFAULT;
11124         }
11125         ret = get_errno(lstat(path(p), &st));
11126         unlock_user(p, arg1, 0);
11127         if (!is_error(ret))
11128             ret = host_to_target_stat64(cpu_env, arg2, &st);
11129         return ret;
11130 #endif
11131 #ifdef TARGET_NR_fstat64
11132     case TARGET_NR_fstat64:
11133         ret = get_errno(fstat(arg1, &st));
11134         if (!is_error(ret))
11135             ret = host_to_target_stat64(cpu_env, arg2, &st);
11136         return ret;
11137 #endif
11138 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11139 #ifdef TARGET_NR_fstatat64
11140     case TARGET_NR_fstatat64:
11141 #endif
11142 #ifdef TARGET_NR_newfstatat
11143     case TARGET_NR_newfstatat:
11144 #endif
11145         if (!(p = lock_user_string(arg2))) {
11146             return -TARGET_EFAULT;
11147         }
11148         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11149         unlock_user(p, arg2, 0);
11150         if (!is_error(ret))
11151             ret = host_to_target_stat64(cpu_env, arg3, &st);
11152         return ret;
11153 #endif
11154 #if defined(TARGET_NR_statx)
11155     case TARGET_NR_statx:
11156         {
11157             struct target_statx *target_stx;
11158             int dirfd = arg1;
11159             int flags = arg3;
11160 
11161             p = lock_user_string(arg2);
11162             if (p == NULL) {
11163                 return -TARGET_EFAULT;
11164             }
11165 #if defined(__NR_statx)
11166             {
11167                 /*
11168                  * It is assumed that struct statx is architecture independent.
11169                  */
11170                 struct target_statx host_stx;
11171                 int mask = arg4;
11172 
11173                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11174                 if (!is_error(ret)) {
11175                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11176                         unlock_user(p, arg2, 0);
11177                         return -TARGET_EFAULT;
11178                     }
11179                 }
11180 
11181                 if (ret != -TARGET_ENOSYS) {
11182                     unlock_user(p, arg2, 0);
11183                     return ret;
11184                 }
11185             }
11186 #endif
11187             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11188             unlock_user(p, arg2, 0);
11189 
11190             if (!is_error(ret)) {
11191                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11192                     return -TARGET_EFAULT;
11193                 }
11194                 memset(target_stx, 0, sizeof(*target_stx));
11195                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11196                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11197                 __put_user(st.st_ino, &target_stx->stx_ino);
11198                 __put_user(st.st_mode, &target_stx->stx_mode);
11199                 __put_user(st.st_uid, &target_stx->stx_uid);
11200                 __put_user(st.st_gid, &target_stx->stx_gid);
11201                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11202                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11203                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11204                 __put_user(st.st_size, &target_stx->stx_size);
11205                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11206                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11207                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11208                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11209                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11210                 unlock_user_struct(target_stx, arg5, 1);
11211             }
11212         }
11213         return ret;
11214 #endif
11215 #ifdef TARGET_NR_lchown
11216     case TARGET_NR_lchown:
11217         if (!(p = lock_user_string(arg1)))
11218             return -TARGET_EFAULT;
11219         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11220         unlock_user(p, arg1, 0);
11221         return ret;
11222 #endif
11223 #ifdef TARGET_NR_getuid
11224     case TARGET_NR_getuid:
11225         return get_errno(high2lowuid(getuid()));
11226 #endif
11227 #ifdef TARGET_NR_getgid
11228     case TARGET_NR_getgid:
11229         return get_errno(high2lowgid(getgid()));
11230 #endif
11231 #ifdef TARGET_NR_geteuid
11232     case TARGET_NR_geteuid:
11233         return get_errno(high2lowuid(geteuid()));
11234 #endif
11235 #ifdef TARGET_NR_getegid
11236     case TARGET_NR_getegid:
11237         return get_errno(high2lowgid(getegid()));
11238 #endif
11239     case TARGET_NR_setreuid:
11240         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11241     case TARGET_NR_setregid:
11242         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11243     case TARGET_NR_getgroups:
11244         {
11245             int gidsetsize = arg1;
11246             target_id *target_grouplist;
11247             gid_t *grouplist;
11248             int i;
11249 
11250             grouplist = alloca(gidsetsize * sizeof(gid_t));
11251             ret = get_errno(getgroups(gidsetsize, grouplist));
11252             if (gidsetsize == 0)
11253                 return ret;
11254             if (!is_error(ret)) {
11255                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11256                 if (!target_grouplist)
11257                     return -TARGET_EFAULT;
11258                 for(i = 0;i < ret; i++)
11259                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11260                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11261             }
11262         }
11263         return ret;
11264     case TARGET_NR_setgroups:
11265         {
11266             int gidsetsize = arg1;
11267             target_id *target_grouplist;
11268             gid_t *grouplist = NULL;
11269             int i;
11270             if (gidsetsize) {
11271                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11272                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11273                 if (!target_grouplist) {
11274                     return -TARGET_EFAULT;
11275                 }
11276                 for (i = 0; i < gidsetsize; i++) {
11277                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11278                 }
11279                 unlock_user(target_grouplist, arg2, 0);
11280             }
11281             return get_errno(setgroups(gidsetsize, grouplist));
11282         }
11283     case TARGET_NR_fchown:
11284         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11285 #if defined(TARGET_NR_fchownat)
11286     case TARGET_NR_fchownat:
11287         if (!(p = lock_user_string(arg2)))
11288             return -TARGET_EFAULT;
11289         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11290                                  low2highgid(arg4), arg5));
11291         unlock_user(p, arg2, 0);
11292         return ret;
11293 #endif
11294 #ifdef TARGET_NR_setresuid
11295     case TARGET_NR_setresuid:
11296         return get_errno(sys_setresuid(low2highuid(arg1),
11297                                        low2highuid(arg2),
11298                                        low2highuid(arg3)));
11299 #endif
11300 #ifdef TARGET_NR_getresuid
11301     case TARGET_NR_getresuid:
11302         {
11303             uid_t ruid, euid, suid;
11304             ret = get_errno(getresuid(&ruid, &euid, &suid));
11305             if (!is_error(ret)) {
11306                 if (put_user_id(high2lowuid(ruid), arg1)
11307                     || put_user_id(high2lowuid(euid), arg2)
11308                     || put_user_id(high2lowuid(suid), arg3))
11309                     return -TARGET_EFAULT;
11310             }
11311         }
11312         return ret;
11313 #endif
11314 #ifdef TARGET_NR_getresgid
11315     case TARGET_NR_setresgid:
11316         return get_errno(sys_setresgid(low2highgid(arg1),
11317                                        low2highgid(arg2),
11318                                        low2highgid(arg3)));
11319 #endif
11320 #ifdef TARGET_NR_getresgid
11321     case TARGET_NR_getresgid:
11322         {
11323             gid_t rgid, egid, sgid;
11324             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11325             if (!is_error(ret)) {
11326                 if (put_user_id(high2lowgid(rgid), arg1)
11327                     || put_user_id(high2lowgid(egid), arg2)
11328                     || put_user_id(high2lowgid(sgid), arg3))
11329                     return -TARGET_EFAULT;
11330             }
11331         }
11332         return ret;
11333 #endif
11334 #ifdef TARGET_NR_chown
11335     case TARGET_NR_chown:
11336         if (!(p = lock_user_string(arg1)))
11337             return -TARGET_EFAULT;
11338         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11339         unlock_user(p, arg1, 0);
11340         return ret;
11341 #endif
11342     case TARGET_NR_setuid:
11343         return get_errno(sys_setuid(low2highuid(arg1)));
11344     case TARGET_NR_setgid:
11345         return get_errno(sys_setgid(low2highgid(arg1)));
11346     case TARGET_NR_setfsuid:
11347         return get_errno(setfsuid(arg1));
11348     case TARGET_NR_setfsgid:
11349         return get_errno(setfsgid(arg1));
11350 
11351 #ifdef TARGET_NR_lchown32
11352     case TARGET_NR_lchown32:
11353         if (!(p = lock_user_string(arg1)))
11354             return -TARGET_EFAULT;
11355         ret = get_errno(lchown(p, arg2, arg3));
11356         unlock_user(p, arg1, 0);
11357         return ret;
11358 #endif
11359 #ifdef TARGET_NR_getuid32
11360     case TARGET_NR_getuid32:
11361         return get_errno(getuid());
11362 #endif
11363 
11364 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11365    /* Alpha specific */
11366     case TARGET_NR_getxuid:
11367          {
11368             uid_t euid;
11369             euid=geteuid();
11370             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11371          }
11372         return get_errno(getuid());
11373 #endif
11374 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11375    /* Alpha specific */
11376     case TARGET_NR_getxgid:
11377          {
11378             uid_t egid;
11379             egid=getegid();
11380             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11381          }
11382         return get_errno(getgid());
11383 #endif
11384 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11385     /* Alpha specific */
11386     case TARGET_NR_osf_getsysinfo:
11387         ret = -TARGET_EOPNOTSUPP;
11388         switch (arg1) {
11389           case TARGET_GSI_IEEE_FP_CONTROL:
11390             {
11391                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11392                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11393 
11394                 swcr &= ~SWCR_STATUS_MASK;
11395                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11396 
11397                 if (put_user_u64 (swcr, arg2))
11398                         return -TARGET_EFAULT;
11399                 ret = 0;
11400             }
11401             break;
11402 
11403           /* case GSI_IEEE_STATE_AT_SIGNAL:
11404              -- Not implemented in linux kernel.
11405              case GSI_UACPROC:
11406              -- Retrieves current unaligned access state; not much used.
11407              case GSI_PROC_TYPE:
11408              -- Retrieves implver information; surely not used.
11409              case GSI_GET_HWRPB:
11410              -- Grabs a copy of the HWRPB; surely not used.
11411           */
11412         }
11413         return ret;
11414 #endif
11415 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11416     /* Alpha specific */
11417     case TARGET_NR_osf_setsysinfo:
11418         ret = -TARGET_EOPNOTSUPP;
11419         switch (arg1) {
11420           case TARGET_SSI_IEEE_FP_CONTROL:
11421             {
11422                 uint64_t swcr, fpcr;
11423 
11424                 if (get_user_u64 (swcr, arg2)) {
11425                     return -TARGET_EFAULT;
11426                 }
11427 
11428                 /*
11429                  * The kernel calls swcr_update_status to update the
11430                  * status bits from the fpcr at every point that it
11431                  * could be queried.  Therefore, we store the status
11432                  * bits only in FPCR.
11433                  */
11434                 ((CPUAlphaState *)cpu_env)->swcr
11435                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11436 
11437                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11438                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11439                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11440                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11441                 ret = 0;
11442             }
11443             break;
11444 
11445           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11446             {
11447                 uint64_t exc, fpcr, fex;
11448 
11449                 if (get_user_u64(exc, arg2)) {
11450                     return -TARGET_EFAULT;
11451                 }
11452                 exc &= SWCR_STATUS_MASK;
11453                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11454 
11455                 /* Old exceptions are not signaled.  */
11456                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11457                 fex = exc & ~fex;
11458                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11459                 fex &= ((CPUArchState *)cpu_env)->swcr;
11460 
11461                 /* Update the hardware fpcr.  */
11462                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11463                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11464 
11465                 if (fex) {
11466                     int si_code = TARGET_FPE_FLTUNK;
11467                     target_siginfo_t info;
11468 
11469                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11470                         si_code = TARGET_FPE_FLTUND;
11471                     }
11472                     if (fex & SWCR_TRAP_ENABLE_INE) {
11473                         si_code = TARGET_FPE_FLTRES;
11474                     }
11475                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11476                         si_code = TARGET_FPE_FLTUND;
11477                     }
11478                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11479                         si_code = TARGET_FPE_FLTOVF;
11480                     }
11481                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11482                         si_code = TARGET_FPE_FLTDIV;
11483                     }
11484                     if (fex & SWCR_TRAP_ENABLE_INV) {
11485                         si_code = TARGET_FPE_FLTINV;
11486                     }
11487 
11488                     info.si_signo = SIGFPE;
11489                     info.si_errno = 0;
11490                     info.si_code = si_code;
11491                     info._sifields._sigfault._addr
11492                         = ((CPUArchState *)cpu_env)->pc;
11493                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11494                                  QEMU_SI_FAULT, &info);
11495                 }
11496                 ret = 0;
11497             }
11498             break;
11499 
11500           /* case SSI_NVPAIRS:
11501              -- Used with SSIN_UACPROC to enable unaligned accesses.
11502              case SSI_IEEE_STATE_AT_SIGNAL:
11503              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11504              -- Not implemented in linux kernel
11505           */
11506         }
11507         return ret;
11508 #endif
11509 #ifdef TARGET_NR_osf_sigprocmask
11510     /* Alpha specific.  */
11511     case TARGET_NR_osf_sigprocmask:
11512         {
11513             abi_ulong mask;
11514             int how;
11515             sigset_t set, oldset;
11516 
11517             switch(arg1) {
11518             case TARGET_SIG_BLOCK:
11519                 how = SIG_BLOCK;
11520                 break;
11521             case TARGET_SIG_UNBLOCK:
11522                 how = SIG_UNBLOCK;
11523                 break;
11524             case TARGET_SIG_SETMASK:
11525                 how = SIG_SETMASK;
11526                 break;
11527             default:
11528                 return -TARGET_EINVAL;
11529             }
11530             mask = arg2;
11531             target_to_host_old_sigset(&set, &mask);
11532             ret = do_sigprocmask(how, &set, &oldset);
11533             if (!ret) {
11534                 host_to_target_old_sigset(&mask, &oldset);
11535                 ret = mask;
11536             }
11537         }
11538         return ret;
11539 #endif
11540 
11541 #ifdef TARGET_NR_getgid32
11542     case TARGET_NR_getgid32:
11543         return get_errno(getgid());
11544 #endif
11545 #ifdef TARGET_NR_geteuid32
11546     case TARGET_NR_geteuid32:
11547         return get_errno(geteuid());
11548 #endif
11549 #ifdef TARGET_NR_getegid32
11550     case TARGET_NR_getegid32:
11551         return get_errno(getegid());
11552 #endif
11553 #ifdef TARGET_NR_setreuid32
11554     case TARGET_NR_setreuid32:
11555         return get_errno(setreuid(arg1, arg2));
11556 #endif
11557 #ifdef TARGET_NR_setregid32
11558     case TARGET_NR_setregid32:
11559         return get_errno(setregid(arg1, arg2));
11560 #endif
11561 #ifdef TARGET_NR_getgroups32
11562     case TARGET_NR_getgroups32:
11563         {
11564             int gidsetsize = arg1;
11565             uint32_t *target_grouplist;
11566             gid_t *grouplist;
11567             int i;
11568 
11569             grouplist = alloca(gidsetsize * sizeof(gid_t));
11570             ret = get_errno(getgroups(gidsetsize, grouplist));
11571             if (gidsetsize == 0)
11572                 return ret;
11573             if (!is_error(ret)) {
11574                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11575                 if (!target_grouplist) {
11576                     return -TARGET_EFAULT;
11577                 }
11578                 for(i = 0;i < ret; i++)
11579                     target_grouplist[i] = tswap32(grouplist[i]);
11580                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11581             }
11582         }
11583         return ret;
11584 #endif
11585 #ifdef TARGET_NR_setgroups32
11586     case TARGET_NR_setgroups32:
11587         {
11588             int gidsetsize = arg1;
11589             uint32_t *target_grouplist;
11590             gid_t *grouplist;
11591             int i;
11592 
11593             grouplist = alloca(gidsetsize * sizeof(gid_t));
11594             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11595             if (!target_grouplist) {
11596                 return -TARGET_EFAULT;
11597             }
11598             for(i = 0;i < gidsetsize; i++)
11599                 grouplist[i] = tswap32(target_grouplist[i]);
11600             unlock_user(target_grouplist, arg2, 0);
11601             return get_errno(setgroups(gidsetsize, grouplist));
11602         }
11603 #endif
11604 #ifdef TARGET_NR_fchown32
11605     case TARGET_NR_fchown32:
11606         return get_errno(fchown(arg1, arg2, arg3));
11607 #endif
11608 #ifdef TARGET_NR_setresuid32
11609     case TARGET_NR_setresuid32:
11610         return get_errno(sys_setresuid(arg1, arg2, arg3));
11611 #endif
11612 #ifdef TARGET_NR_getresuid32
11613     case TARGET_NR_getresuid32:
11614         {
11615             uid_t ruid, euid, suid;
11616             ret = get_errno(getresuid(&ruid, &euid, &suid));
11617             if (!is_error(ret)) {
11618                 if (put_user_u32(ruid, arg1)
11619                     || put_user_u32(euid, arg2)
11620                     || put_user_u32(suid, arg3))
11621                     return -TARGET_EFAULT;
11622             }
11623         }
11624         return ret;
11625 #endif
11626 #ifdef TARGET_NR_setresgid32
11627     case TARGET_NR_setresgid32:
11628         return get_errno(sys_setresgid(arg1, arg2, arg3));
11629 #endif
11630 #ifdef TARGET_NR_getresgid32
11631     case TARGET_NR_getresgid32:
11632         {
11633             gid_t rgid, egid, sgid;
11634             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11635             if (!is_error(ret)) {
11636                 if (put_user_u32(rgid, arg1)
11637                     || put_user_u32(egid, arg2)
11638                     || put_user_u32(sgid, arg3))
11639                     return -TARGET_EFAULT;
11640             }
11641         }
11642         return ret;
11643 #endif
11644 #ifdef TARGET_NR_chown32
11645     case TARGET_NR_chown32:
11646         if (!(p = lock_user_string(arg1)))
11647             return -TARGET_EFAULT;
11648         ret = get_errno(chown(p, arg2, arg3));
11649         unlock_user(p, arg1, 0);
11650         return ret;
11651 #endif
11652 #ifdef TARGET_NR_setuid32
11653     case TARGET_NR_setuid32:
11654         return get_errno(sys_setuid(arg1));
11655 #endif
11656 #ifdef TARGET_NR_setgid32
11657     case TARGET_NR_setgid32:
11658         return get_errno(sys_setgid(arg1));
11659 #endif
11660 #ifdef TARGET_NR_setfsuid32
11661     case TARGET_NR_setfsuid32:
11662         return get_errno(setfsuid(arg1));
11663 #endif
11664 #ifdef TARGET_NR_setfsgid32
11665     case TARGET_NR_setfsgid32:
11666         return get_errno(setfsgid(arg1));
11667 #endif
11668 #ifdef TARGET_NR_mincore
11669     case TARGET_NR_mincore:
11670         {
11671             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11672             if (!a) {
11673                 return -TARGET_ENOMEM;
11674             }
11675             p = lock_user_string(arg3);
11676             if (!p) {
11677                 ret = -TARGET_EFAULT;
11678             } else {
11679                 ret = get_errno(mincore(a, arg2, p));
11680                 unlock_user(p, arg3, ret);
11681             }
11682             unlock_user(a, arg1, 0);
11683         }
11684         return ret;
11685 #endif
11686 #ifdef TARGET_NR_arm_fadvise64_64
11687     case TARGET_NR_arm_fadvise64_64:
11688         /* arm_fadvise64_64 looks like fadvise64_64 but
11689          * with different argument order: fd, advice, offset, len
11690          * rather than the usual fd, offset, len, advice.
11691          * Note that offset and len are both 64-bit so appear as
11692          * pairs of 32-bit registers.
11693          */
11694         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11695                             target_offset64(arg5, arg6), arg2);
11696         return -host_to_target_errno(ret);
11697 #endif
11698 
11699 #if TARGET_ABI_BITS == 32
11700 
11701 #ifdef TARGET_NR_fadvise64_64
11702     case TARGET_NR_fadvise64_64:
11703 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11704         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11705         ret = arg2;
11706         arg2 = arg3;
11707         arg3 = arg4;
11708         arg4 = arg5;
11709         arg5 = arg6;
11710         arg6 = ret;
11711 #else
11712         /* 6 args: fd, offset (high, low), len (high, low), advice */
11713         if (regpairs_aligned(cpu_env, num)) {
11714             /* offset is in (3,4), len in (5,6) and advice in 7 */
11715             arg2 = arg3;
11716             arg3 = arg4;
11717             arg4 = arg5;
11718             arg5 = arg6;
11719             arg6 = arg7;
11720         }
11721 #endif
11722         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11723                             target_offset64(arg4, arg5), arg6);
11724         return -host_to_target_errno(ret);
11725 #endif
11726 
11727 #ifdef TARGET_NR_fadvise64
11728     case TARGET_NR_fadvise64:
11729         /* 5 args: fd, offset (high, low), len, advice */
11730         if (regpairs_aligned(cpu_env, num)) {
11731             /* offset is in (3,4), len in 5 and advice in 6 */
11732             arg2 = arg3;
11733             arg3 = arg4;
11734             arg4 = arg5;
11735             arg5 = arg6;
11736         }
11737         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11738         return -host_to_target_errno(ret);
11739 #endif
11740 
11741 #else /* not a 32-bit ABI */
11742 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11743 #ifdef TARGET_NR_fadvise64_64
11744     case TARGET_NR_fadvise64_64:
11745 #endif
11746 #ifdef TARGET_NR_fadvise64
11747     case TARGET_NR_fadvise64:
11748 #endif
11749 #ifdef TARGET_S390X
11750         switch (arg4) {
11751         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11752         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11753         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11754         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11755         default: break;
11756         }
11757 #endif
11758         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11759 #endif
11760 #endif /* end of 64-bit ABI fadvise handling */
11761 
11762 #ifdef TARGET_NR_madvise
11763     case TARGET_NR_madvise:
11764         /* A straight passthrough may not be safe because qemu sometimes
11765            turns private file-backed mappings into anonymous mappings.
11766            This will break MADV_DONTNEED.
11767            This is a hint, so ignoring and returning success is ok.  */
11768         return 0;
11769 #endif
11770 #ifdef TARGET_NR_fcntl64
11771     case TARGET_NR_fcntl64:
11772     {
11773         int cmd;
11774         struct flock64 fl;
11775         from_flock64_fn *copyfrom = copy_from_user_flock64;
11776         to_flock64_fn *copyto = copy_to_user_flock64;
11777 
11778 #ifdef TARGET_ARM
11779         if (!((CPUARMState *)cpu_env)->eabi) {
11780             copyfrom = copy_from_user_oabi_flock64;
11781             copyto = copy_to_user_oabi_flock64;
11782         }
11783 #endif
11784 
11785         cmd = target_to_host_fcntl_cmd(arg2);
11786         if (cmd == -TARGET_EINVAL) {
11787             return cmd;
11788         }
11789 
11790         switch(arg2) {
11791         case TARGET_F_GETLK64:
11792             ret = copyfrom(&fl, arg3);
11793             if (ret) {
11794                 break;
11795             }
11796             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11797             if (ret == 0) {
11798                 ret = copyto(arg3, &fl);
11799             }
11800 	    break;
11801 
11802         case TARGET_F_SETLK64:
11803         case TARGET_F_SETLKW64:
11804             ret = copyfrom(&fl, arg3);
11805             if (ret) {
11806                 break;
11807             }
11808             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11809 	    break;
11810         default:
11811             ret = do_fcntl(arg1, arg2, arg3);
11812             break;
11813         }
11814         return ret;
11815     }
11816 #endif
11817 #ifdef TARGET_NR_cacheflush
11818     case TARGET_NR_cacheflush:
11819         /* self-modifying code is handled automatically, so nothing needed */
11820         return 0;
11821 #endif
11822 #ifdef TARGET_NR_getpagesize
11823     case TARGET_NR_getpagesize:
11824         return TARGET_PAGE_SIZE;
11825 #endif
11826     case TARGET_NR_gettid:
11827         return get_errno(sys_gettid());
11828 #ifdef TARGET_NR_readahead
11829     case TARGET_NR_readahead:
11830 #if TARGET_ABI_BITS == 32
11831         if (regpairs_aligned(cpu_env, num)) {
11832             arg2 = arg3;
11833             arg3 = arg4;
11834             arg4 = arg5;
11835         }
11836         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11837 #else
11838         ret = get_errno(readahead(arg1, arg2, arg3));
11839 #endif
11840         return ret;
11841 #endif
11842 #ifdef CONFIG_ATTR
11843 #ifdef TARGET_NR_setxattr
11844     case TARGET_NR_listxattr:
11845     case TARGET_NR_llistxattr:
11846     {
11847         void *p, *b = 0;
11848         if (arg2) {
11849             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11850             if (!b) {
11851                 return -TARGET_EFAULT;
11852             }
11853         }
11854         p = lock_user_string(arg1);
11855         if (p) {
11856             if (num == TARGET_NR_listxattr) {
11857                 ret = get_errno(listxattr(p, b, arg3));
11858             } else {
11859                 ret = get_errno(llistxattr(p, b, arg3));
11860             }
11861         } else {
11862             ret = -TARGET_EFAULT;
11863         }
11864         unlock_user(p, arg1, 0);
11865         unlock_user(b, arg2, arg3);
11866         return ret;
11867     }
11868     case TARGET_NR_flistxattr:
11869     {
11870         void *b = 0;
11871         if (arg2) {
11872             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11873             if (!b) {
11874                 return -TARGET_EFAULT;
11875             }
11876         }
11877         ret = get_errno(flistxattr(arg1, b, arg3));
11878         unlock_user(b, arg2, arg3);
11879         return ret;
11880     }
11881     case TARGET_NR_setxattr:
11882     case TARGET_NR_lsetxattr:
11883         {
11884             void *p, *n, *v = 0;
11885             if (arg3) {
11886                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11887                 if (!v) {
11888                     return -TARGET_EFAULT;
11889                 }
11890             }
11891             p = lock_user_string(arg1);
11892             n = lock_user_string(arg2);
11893             if (p && n) {
11894                 if (num == TARGET_NR_setxattr) {
11895                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11896                 } else {
11897                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11898                 }
11899             } else {
11900                 ret = -TARGET_EFAULT;
11901             }
11902             unlock_user(p, arg1, 0);
11903             unlock_user(n, arg2, 0);
11904             unlock_user(v, arg3, 0);
11905         }
11906         return ret;
11907     case TARGET_NR_fsetxattr:
11908         {
11909             void *n, *v = 0;
11910             if (arg3) {
11911                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11912                 if (!v) {
11913                     return -TARGET_EFAULT;
11914                 }
11915             }
11916             n = lock_user_string(arg2);
11917             if (n) {
11918                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11919             } else {
11920                 ret = -TARGET_EFAULT;
11921             }
11922             unlock_user(n, arg2, 0);
11923             unlock_user(v, arg3, 0);
11924         }
11925         return ret;
11926     case TARGET_NR_getxattr:
11927     case TARGET_NR_lgetxattr:
11928         {
11929             void *p, *n, *v = 0;
11930             if (arg3) {
11931                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11932                 if (!v) {
11933                     return -TARGET_EFAULT;
11934                 }
11935             }
11936             p = lock_user_string(arg1);
11937             n = lock_user_string(arg2);
11938             if (p && n) {
11939                 if (num == TARGET_NR_getxattr) {
11940                     ret = get_errno(getxattr(p, n, v, arg4));
11941                 } else {
11942                     ret = get_errno(lgetxattr(p, n, v, arg4));
11943                 }
11944             } else {
11945                 ret = -TARGET_EFAULT;
11946             }
11947             unlock_user(p, arg1, 0);
11948             unlock_user(n, arg2, 0);
11949             unlock_user(v, arg3, arg4);
11950         }
11951         return ret;
11952     case TARGET_NR_fgetxattr:
11953         {
11954             void *n, *v = 0;
11955             if (arg3) {
11956                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11957                 if (!v) {
11958                     return -TARGET_EFAULT;
11959                 }
11960             }
11961             n = lock_user_string(arg2);
11962             if (n) {
11963                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
11964             } else {
11965                 ret = -TARGET_EFAULT;
11966             }
11967             unlock_user(n, arg2, 0);
11968             unlock_user(v, arg3, arg4);
11969         }
11970         return ret;
11971     case TARGET_NR_removexattr:
11972     case TARGET_NR_lremovexattr:
11973         {
11974             void *p, *n;
11975             p = lock_user_string(arg1);
11976             n = lock_user_string(arg2);
11977             if (p && n) {
11978                 if (num == TARGET_NR_removexattr) {
11979                     ret = get_errno(removexattr(p, n));
11980                 } else {
11981                     ret = get_errno(lremovexattr(p, n));
11982                 }
11983             } else {
11984                 ret = -TARGET_EFAULT;
11985             }
11986             unlock_user(p, arg1, 0);
11987             unlock_user(n, arg2, 0);
11988         }
11989         return ret;
11990     case TARGET_NR_fremovexattr:
11991         {
11992             void *n;
11993             n = lock_user_string(arg2);
11994             if (n) {
11995                 ret = get_errno(fremovexattr(arg1, n));
11996             } else {
11997                 ret = -TARGET_EFAULT;
11998             }
11999             unlock_user(n, arg2, 0);
12000         }
12001         return ret;
12002 #endif
12003 #endif /* CONFIG_ATTR */
12004 #ifdef TARGET_NR_set_thread_area
12005     case TARGET_NR_set_thread_area:
12006 #if defined(TARGET_MIPS)
12007       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12008       return 0;
12009 #elif defined(TARGET_CRIS)
12010       if (arg1 & 0xff)
12011           ret = -TARGET_EINVAL;
12012       else {
12013           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12014           ret = 0;
12015       }
12016       return ret;
12017 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12018       return do_set_thread_area(cpu_env, arg1);
12019 #elif defined(TARGET_M68K)
12020       {
12021           TaskState *ts = cpu->opaque;
12022           ts->tp_value = arg1;
12023           return 0;
12024       }
12025 #else
12026       return -TARGET_ENOSYS;
12027 #endif
12028 #endif
12029 #ifdef TARGET_NR_get_thread_area
12030     case TARGET_NR_get_thread_area:
12031 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12032         return do_get_thread_area(cpu_env, arg1);
12033 #elif defined(TARGET_M68K)
12034         {
12035             TaskState *ts = cpu->opaque;
12036             return ts->tp_value;
12037         }
12038 #else
12039         return -TARGET_ENOSYS;
12040 #endif
12041 #endif
12042 #ifdef TARGET_NR_getdomainname
12043     case TARGET_NR_getdomainname:
12044         return -TARGET_ENOSYS;
12045 #endif
12046 
12047 #ifdef TARGET_NR_clock_settime
12048     case TARGET_NR_clock_settime:
12049     {
12050         struct timespec ts;
12051 
12052         ret = target_to_host_timespec(&ts, arg2);
12053         if (!is_error(ret)) {
12054             ret = get_errno(clock_settime(arg1, &ts));
12055         }
12056         return ret;
12057     }
12058 #endif
12059 #ifdef TARGET_NR_clock_settime64
12060     case TARGET_NR_clock_settime64:
12061     {
12062         struct timespec ts;
12063 
12064         ret = target_to_host_timespec64(&ts, arg2);
12065         if (!is_error(ret)) {
12066             ret = get_errno(clock_settime(arg1, &ts));
12067         }
12068         return ret;
12069     }
12070 #endif
12071 #ifdef TARGET_NR_clock_gettime
12072     case TARGET_NR_clock_gettime:
12073     {
12074         struct timespec ts;
12075         ret = get_errno(clock_gettime(arg1, &ts));
12076         if (!is_error(ret)) {
12077             ret = host_to_target_timespec(arg2, &ts);
12078         }
12079         return ret;
12080     }
12081 #endif
12082 #ifdef TARGET_NR_clock_gettime64
12083     case TARGET_NR_clock_gettime64:
12084     {
12085         struct timespec ts;
12086         ret = get_errno(clock_gettime(arg1, &ts));
12087         if (!is_error(ret)) {
12088             ret = host_to_target_timespec64(arg2, &ts);
12089         }
12090         return ret;
12091     }
12092 #endif
12093 #ifdef TARGET_NR_clock_getres
12094     case TARGET_NR_clock_getres:
12095     {
12096         struct timespec ts;
12097         ret = get_errno(clock_getres(arg1, &ts));
12098         if (!is_error(ret)) {
12099             host_to_target_timespec(arg2, &ts);
12100         }
12101         return ret;
12102     }
12103 #endif
12104 #ifdef TARGET_NR_clock_getres_time64
12105     case TARGET_NR_clock_getres_time64:
12106     {
12107         struct timespec ts;
12108         ret = get_errno(clock_getres(arg1, &ts));
12109         if (!is_error(ret)) {
12110             host_to_target_timespec64(arg2, &ts);
12111         }
12112         return ret;
12113     }
12114 #endif
12115 #ifdef TARGET_NR_clock_nanosleep
12116     case TARGET_NR_clock_nanosleep:
12117     {
12118         struct timespec ts;
12119         if (target_to_host_timespec(&ts, arg3)) {
12120             return -TARGET_EFAULT;
12121         }
12122         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12123                                              &ts, arg4 ? &ts : NULL));
12124         /*
12125          * if the call is interrupted by a signal handler, it fails
12126          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12127          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12128          */
12129         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12130             host_to_target_timespec(arg4, &ts)) {
12131               return -TARGET_EFAULT;
12132         }
12133 
12134         return ret;
12135     }
12136 #endif
12137 #ifdef TARGET_NR_clock_nanosleep_time64
12138     case TARGET_NR_clock_nanosleep_time64:
12139     {
12140         struct timespec ts;
12141 
12142         if (target_to_host_timespec64(&ts, arg3)) {
12143             return -TARGET_EFAULT;
12144         }
12145 
12146         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12147                                              &ts, arg4 ? &ts : NULL));
12148 
12149         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12150             host_to_target_timespec64(arg4, &ts)) {
12151             return -TARGET_EFAULT;
12152         }
12153         return ret;
12154     }
12155 #endif
12156 
12157 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12158     case TARGET_NR_set_tid_address:
12159         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12160 #endif
12161 
12162     case TARGET_NR_tkill:
12163         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12164 
12165     case TARGET_NR_tgkill:
12166         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12167                          target_to_host_signal(arg3)));
12168 
12169 #ifdef TARGET_NR_set_robust_list
12170     case TARGET_NR_set_robust_list:
12171     case TARGET_NR_get_robust_list:
12172         /* The ABI for supporting robust futexes has userspace pass
12173          * the kernel a pointer to a linked list which is updated by
12174          * userspace after the syscall; the list is walked by the kernel
12175          * when the thread exits. Since the linked list in QEMU guest
12176          * memory isn't a valid linked list for the host and we have
12177          * no way to reliably intercept the thread-death event, we can't
12178          * support these. Silently return ENOSYS so that guest userspace
12179          * falls back to a non-robust futex implementation (which should
12180          * be OK except in the corner case of the guest crashing while
12181          * holding a mutex that is shared with another process via
12182          * shared memory).
12183          */
12184         return -TARGET_ENOSYS;
12185 #endif
12186 
12187 #if defined(TARGET_NR_utimensat)
12188     case TARGET_NR_utimensat:
12189         {
12190             struct timespec *tsp, ts[2];
12191             if (!arg3) {
12192                 tsp = NULL;
12193             } else {
12194                 if (target_to_host_timespec(ts, arg3)) {
12195                     return -TARGET_EFAULT;
12196                 }
12197                 if (target_to_host_timespec(ts + 1, arg3 +
12198                                             sizeof(struct target_timespec))) {
12199                     return -TARGET_EFAULT;
12200                 }
12201                 tsp = ts;
12202             }
12203             if (!arg2)
12204                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12205             else {
12206                 if (!(p = lock_user_string(arg2))) {
12207                     return -TARGET_EFAULT;
12208                 }
12209                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12210                 unlock_user(p, arg2, 0);
12211             }
12212         }
12213         return ret;
12214 #endif
12215 #ifdef TARGET_NR_utimensat_time64
12216     case TARGET_NR_utimensat_time64:
12217         {
12218             struct timespec *tsp, ts[2];
12219             if (!arg3) {
12220                 tsp = NULL;
12221             } else {
12222                 if (target_to_host_timespec64(ts, arg3)) {
12223                     return -TARGET_EFAULT;
12224                 }
12225                 if (target_to_host_timespec64(ts + 1, arg3 +
12226                                      sizeof(struct target__kernel_timespec))) {
12227                     return -TARGET_EFAULT;
12228                 }
12229                 tsp = ts;
12230             }
12231             if (!arg2)
12232                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12233             else {
12234                 p = lock_user_string(arg2);
12235                 if (!p) {
12236                     return -TARGET_EFAULT;
12237                 }
12238                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12239                 unlock_user(p, arg2, 0);
12240             }
12241         }
12242         return ret;
12243 #endif
12244 #ifdef TARGET_NR_futex
12245     case TARGET_NR_futex:
12246         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12247 #endif
12248 #ifdef TARGET_NR_futex_time64
12249     case TARGET_NR_futex_time64:
12250         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12251 #endif
12252 #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
12253     case TARGET_NR_inotify_init:
12254         ret = get_errno(sys_inotify_init());
12255         if (ret >= 0) {
12256             fd_trans_register(ret, &target_inotify_trans);
12257         }
12258         return ret;
12259 #endif
12260 #ifdef CONFIG_INOTIFY1
12261 #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
12262     case TARGET_NR_inotify_init1:
12263         ret = get_errno(sys_inotify_init1(target_to_host_bitmask(arg1,
12264                                           fcntl_flags_tbl)));
12265         if (ret >= 0) {
12266             fd_trans_register(ret, &target_inotify_trans);
12267         }
12268         return ret;
12269 #endif
12270 #endif
12271 #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
12272     case TARGET_NR_inotify_add_watch:
12273         p = lock_user_string(arg2);
12274         ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
12275         unlock_user(p, arg2, 0);
12276         return ret;
12277 #endif
12278 #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
12279     case TARGET_NR_inotify_rm_watch:
12280         return get_errno(sys_inotify_rm_watch(arg1, arg2));
12281 #endif
12282 
12283 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12284     case TARGET_NR_mq_open:
12285         {
12286             struct mq_attr posix_mq_attr;
12287             struct mq_attr *pposix_mq_attr;
12288             int host_flags;
12289 
12290             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12291             pposix_mq_attr = NULL;
12292             if (arg4) {
12293                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12294                     return -TARGET_EFAULT;
12295                 }
12296                 pposix_mq_attr = &posix_mq_attr;
12297             }
12298             p = lock_user_string(arg1 - 1);
12299             if (!p) {
12300                 return -TARGET_EFAULT;
12301             }
12302             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12303             unlock_user (p, arg1, 0);
12304         }
12305         return ret;
12306 
12307     case TARGET_NR_mq_unlink:
12308         p = lock_user_string(arg1 - 1);
12309         if (!p) {
12310             return -TARGET_EFAULT;
12311         }
12312         ret = get_errno(mq_unlink(p));
12313         unlock_user (p, arg1, 0);
12314         return ret;
12315 
12316 #ifdef TARGET_NR_mq_timedsend
12317     case TARGET_NR_mq_timedsend:
12318         {
12319             struct timespec ts;
12320 
12321             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12322             if (arg5 != 0) {
12323                 if (target_to_host_timespec(&ts, arg5)) {
12324                     return -TARGET_EFAULT;
12325                 }
12326                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12327                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12328                     return -TARGET_EFAULT;
12329                 }
12330             } else {
12331                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12332             }
12333             unlock_user (p, arg2, arg3);
12334         }
12335         return ret;
12336 #endif
12337 #ifdef TARGET_NR_mq_timedsend_time64
12338     case TARGET_NR_mq_timedsend_time64:
12339         {
12340             struct timespec ts;
12341 
12342             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12343             if (arg5 != 0) {
12344                 if (target_to_host_timespec64(&ts, arg5)) {
12345                     return -TARGET_EFAULT;
12346                 }
12347                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12348                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12349                     return -TARGET_EFAULT;
12350                 }
12351             } else {
12352                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12353             }
12354             unlock_user(p, arg2, arg3);
12355         }
12356         return ret;
12357 #endif
12358 
12359 #ifdef TARGET_NR_mq_timedreceive
12360     case TARGET_NR_mq_timedreceive:
12361         {
12362             struct timespec ts;
12363             unsigned int prio;
12364 
12365             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12366             if (arg5 != 0) {
12367                 if (target_to_host_timespec(&ts, arg5)) {
12368                     return -TARGET_EFAULT;
12369                 }
12370                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12371                                                      &prio, &ts));
12372                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12373                     return -TARGET_EFAULT;
12374                 }
12375             } else {
12376                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12377                                                      &prio, NULL));
12378             }
12379             unlock_user (p, arg2, arg3);
12380             if (arg4 != 0)
12381                 put_user_u32(prio, arg4);
12382         }
12383         return ret;
12384 #endif
12385 #ifdef TARGET_NR_mq_timedreceive_time64
12386     case TARGET_NR_mq_timedreceive_time64:
12387         {
12388             struct timespec ts;
12389             unsigned int prio;
12390 
12391             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12392             if (arg5 != 0) {
12393                 if (target_to_host_timespec64(&ts, arg5)) {
12394                     return -TARGET_EFAULT;
12395                 }
12396                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12397                                                      &prio, &ts));
12398                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12399                     return -TARGET_EFAULT;
12400                 }
12401             } else {
12402                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12403                                                      &prio, NULL));
12404             }
12405             unlock_user(p, arg2, arg3);
12406             if (arg4 != 0) {
12407                 put_user_u32(prio, arg4);
12408             }
12409         }
12410         return ret;
12411 #endif
12412 
12413     /* Not implemented for now... */
12414 /*     case TARGET_NR_mq_notify: */
12415 /*         break; */
12416 
12417     case TARGET_NR_mq_getsetattr:
12418         {
12419             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12420             ret = 0;
12421             if (arg2 != 0) {
12422                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12423                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12424                                            &posix_mq_attr_out));
12425             } else if (arg3 != 0) {
12426                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12427             }
12428             if (ret == 0 && arg3 != 0) {
12429                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12430             }
12431         }
12432         return ret;
12433 #endif
12434 
12435 #ifdef CONFIG_SPLICE
12436 #ifdef TARGET_NR_tee
12437     case TARGET_NR_tee:
12438         {
12439             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12440         }
12441         return ret;
12442 #endif
12443 #ifdef TARGET_NR_splice
12444     case TARGET_NR_splice:
12445         {
12446             loff_t loff_in, loff_out;
12447             loff_t *ploff_in = NULL, *ploff_out = NULL;
12448             if (arg2) {
12449                 if (get_user_u64(loff_in, arg2)) {
12450                     return -TARGET_EFAULT;
12451                 }
12452                 ploff_in = &loff_in;
12453             }
12454             if (arg4) {
12455                 if (get_user_u64(loff_out, arg4)) {
12456                     return -TARGET_EFAULT;
12457                 }
12458                 ploff_out = &loff_out;
12459             }
12460             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12461             if (arg2) {
12462                 if (put_user_u64(loff_in, arg2)) {
12463                     return -TARGET_EFAULT;
12464                 }
12465             }
12466             if (arg4) {
12467                 if (put_user_u64(loff_out, arg4)) {
12468                     return -TARGET_EFAULT;
12469                 }
12470             }
12471         }
12472         return ret;
12473 #endif
12474 #ifdef TARGET_NR_vmsplice
12475 	case TARGET_NR_vmsplice:
12476         {
12477             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12478             if (vec != NULL) {
12479                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12480                 unlock_iovec(vec, arg2, arg3, 0);
12481             } else {
12482                 ret = -host_to_target_errno(errno);
12483             }
12484         }
12485         return ret;
12486 #endif
12487 #endif /* CONFIG_SPLICE */
12488 #ifdef CONFIG_EVENTFD
12489 #if defined(TARGET_NR_eventfd)
12490     case TARGET_NR_eventfd:
12491         ret = get_errno(eventfd(arg1, 0));
12492         if (ret >= 0) {
12493             fd_trans_register(ret, &target_eventfd_trans);
12494         }
12495         return ret;
12496 #endif
12497 #if defined(TARGET_NR_eventfd2)
12498     case TARGET_NR_eventfd2:
12499     {
12500         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12501         if (arg2 & TARGET_O_NONBLOCK) {
12502             host_flags |= O_NONBLOCK;
12503         }
12504         if (arg2 & TARGET_O_CLOEXEC) {
12505             host_flags |= O_CLOEXEC;
12506         }
12507         ret = get_errno(eventfd(arg1, host_flags));
12508         if (ret >= 0) {
12509             fd_trans_register(ret, &target_eventfd_trans);
12510         }
12511         return ret;
12512     }
12513 #endif
12514 #endif /* CONFIG_EVENTFD  */
12515 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12516     case TARGET_NR_fallocate:
12517 #if TARGET_ABI_BITS == 32
12518         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12519                                   target_offset64(arg5, arg6)));
12520 #else
12521         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12522 #endif
12523         return ret;
12524 #endif
12525 #if defined(CONFIG_SYNC_FILE_RANGE)
12526 #if defined(TARGET_NR_sync_file_range)
12527     case TARGET_NR_sync_file_range:
12528 #if TARGET_ABI_BITS == 32
12529 #if defined(TARGET_MIPS)
12530         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12531                                         target_offset64(arg5, arg6), arg7));
12532 #else
12533         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12534                                         target_offset64(arg4, arg5), arg6));
12535 #endif /* !TARGET_MIPS */
12536 #else
12537         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12538 #endif
12539         return ret;
12540 #endif
12541 #if defined(TARGET_NR_sync_file_range2) || \
12542     defined(TARGET_NR_arm_sync_file_range)
12543 #if defined(TARGET_NR_sync_file_range2)
12544     case TARGET_NR_sync_file_range2:
12545 #endif
12546 #if defined(TARGET_NR_arm_sync_file_range)
12547     case TARGET_NR_arm_sync_file_range:
12548 #endif
12549         /* This is like sync_file_range but the arguments are reordered */
12550 #if TARGET_ABI_BITS == 32
12551         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12552                                         target_offset64(arg5, arg6), arg2));
12553 #else
12554         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12555 #endif
12556         return ret;
12557 #endif
12558 #endif
12559 #if defined(TARGET_NR_signalfd4)
12560     case TARGET_NR_signalfd4:
12561         return do_signalfd4(arg1, arg2, arg4);
12562 #endif
12563 #if defined(TARGET_NR_signalfd)
12564     case TARGET_NR_signalfd:
12565         return do_signalfd4(arg1, arg2, 0);
12566 #endif
12567 #if defined(CONFIG_EPOLL)
12568 #if defined(TARGET_NR_epoll_create)
12569     case TARGET_NR_epoll_create:
12570         return get_errno(epoll_create(arg1));
12571 #endif
12572 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12573     case TARGET_NR_epoll_create1:
12574         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12575 #endif
12576 #if defined(TARGET_NR_epoll_ctl)
12577     case TARGET_NR_epoll_ctl:
12578     {
12579         struct epoll_event ep;
12580         struct epoll_event *epp = 0;
12581         if (arg4) {
12582             if (arg2 != EPOLL_CTL_DEL) {
12583                 struct target_epoll_event *target_ep;
12584                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12585                     return -TARGET_EFAULT;
12586                 }
12587                 ep.events = tswap32(target_ep->events);
12588                 /*
12589                  * The epoll_data_t union is just opaque data to the kernel,
12590                  * so we transfer all 64 bits across and need not worry what
12591                  * actual data type it is.
12592                  */
12593                 ep.data.u64 = tswap64(target_ep->data.u64);
12594                 unlock_user_struct(target_ep, arg4, 0);
12595             }
12596             /*
12597              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12598              * non-null pointer, even though this argument is ignored.
12599              *
12600              */
12601             epp = &ep;
12602         }
12603         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12604     }
12605 #endif
12606 
12607 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12608 #if defined(TARGET_NR_epoll_wait)
12609     case TARGET_NR_epoll_wait:
12610 #endif
12611 #if defined(TARGET_NR_epoll_pwait)
12612     case TARGET_NR_epoll_pwait:
12613 #endif
12614     {
12615         struct target_epoll_event *target_ep;
12616         struct epoll_event *ep;
12617         int epfd = arg1;
12618         int maxevents = arg3;
12619         int timeout = arg4;
12620 
12621         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12622             return -TARGET_EINVAL;
12623         }
12624 
12625         target_ep = lock_user(VERIFY_WRITE, arg2,
12626                               maxevents * sizeof(struct target_epoll_event), 1);
12627         if (!target_ep) {
12628             return -TARGET_EFAULT;
12629         }
12630 
12631         ep = g_try_new(struct epoll_event, maxevents);
12632         if (!ep) {
12633             unlock_user(target_ep, arg2, 0);
12634             return -TARGET_ENOMEM;
12635         }
12636 
12637         switch (num) {
12638 #if defined(TARGET_NR_epoll_pwait)
12639         case TARGET_NR_epoll_pwait:
12640         {
12641             target_sigset_t *target_set;
12642             sigset_t _set, *set = &_set;
12643 
12644             if (arg5) {
12645                 if (arg6 != sizeof(target_sigset_t)) {
12646                     ret = -TARGET_EINVAL;
12647                     break;
12648                 }
12649 
12650                 target_set = lock_user(VERIFY_READ, arg5,
12651                                        sizeof(target_sigset_t), 1);
12652                 if (!target_set) {
12653                     ret = -TARGET_EFAULT;
12654                     break;
12655                 }
12656                 target_to_host_sigset(set, target_set);
12657                 unlock_user(target_set, arg5, 0);
12658             } else {
12659                 set = NULL;
12660             }
12661 
12662             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12663                                              set, SIGSET_T_SIZE));
12664             break;
12665         }
12666 #endif
12667 #if defined(TARGET_NR_epoll_wait)
12668         case TARGET_NR_epoll_wait:
12669             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12670                                              NULL, 0));
12671             break;
12672 #endif
12673         default:
12674             ret = -TARGET_ENOSYS;
12675         }
12676         if (!is_error(ret)) {
12677             int i;
12678             for (i = 0; i < ret; i++) {
12679                 target_ep[i].events = tswap32(ep[i].events);
12680                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12681             }
12682             unlock_user(target_ep, arg2,
12683                         ret * sizeof(struct target_epoll_event));
12684         } else {
12685             unlock_user(target_ep, arg2, 0);
12686         }
12687         g_free(ep);
12688         return ret;
12689     }
12690 #endif
12691 #endif
12692 #ifdef TARGET_NR_prlimit64
12693     case TARGET_NR_prlimit64:
12694     {
12695         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12696         struct target_rlimit64 *target_rnew, *target_rold;
12697         struct host_rlimit64 rnew, rold, *rnewp = 0;
12698         int resource = target_to_host_resource(arg2);
12699 
12700         if (arg3 && (resource != RLIMIT_AS &&
12701                      resource != RLIMIT_DATA &&
12702                      resource != RLIMIT_STACK)) {
12703             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12704                 return -TARGET_EFAULT;
12705             }
12706             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12707             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12708             unlock_user_struct(target_rnew, arg3, 0);
12709             rnewp = &rnew;
12710         }
12711 
12712         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12713         if (!is_error(ret) && arg4) {
12714             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12715                 return -TARGET_EFAULT;
12716             }
12717             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12718             target_rold->rlim_max = tswap64(rold.rlim_max);
12719             unlock_user_struct(target_rold, arg4, 1);
12720         }
12721         return ret;
12722     }
12723 #endif
12724 #ifdef TARGET_NR_gethostname
12725     case TARGET_NR_gethostname:
12726     {
12727         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12728         if (name) {
12729             ret = get_errno(gethostname(name, arg2));
12730             unlock_user(name, arg1, arg2);
12731         } else {
12732             ret = -TARGET_EFAULT;
12733         }
12734         return ret;
12735     }
12736 #endif
12737 #ifdef TARGET_NR_atomic_cmpxchg_32
12738     case TARGET_NR_atomic_cmpxchg_32:
12739     {
12740         /* should use start_exclusive from main.c */
12741         abi_ulong mem_value;
12742         if (get_user_u32(mem_value, arg6)) {
12743             target_siginfo_t info;
12744             info.si_signo = SIGSEGV;
12745             info.si_errno = 0;
12746             info.si_code = TARGET_SEGV_MAPERR;
12747             info._sifields._sigfault._addr = arg6;
12748             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12749                          QEMU_SI_FAULT, &info);
12750             ret = 0xdeadbeef;
12751 
12752         }
12753         if (mem_value == arg2)
12754             put_user_u32(arg1, arg6);
12755         return mem_value;
12756     }
12757 #endif
12758 #ifdef TARGET_NR_atomic_barrier
12759     case TARGET_NR_atomic_barrier:
12760         /* Like the kernel implementation and the
12761            qemu arm barrier, no-op this? */
12762         return 0;
12763 #endif
12764 
12765 #ifdef TARGET_NR_timer_create
12766     case TARGET_NR_timer_create:
12767     {
12768         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12769 
12770         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12771 
12772         int clkid = arg1;
12773         int timer_index = next_free_host_timer();
12774 
12775         if (timer_index < 0) {
12776             ret = -TARGET_EAGAIN;
12777         } else {
12778             timer_t *phtimer = g_posix_timers  + timer_index;
12779 
12780             if (arg2) {
12781                 phost_sevp = &host_sevp;
12782                 ret = target_to_host_sigevent(phost_sevp, arg2);
12783                 if (ret != 0) {
12784                     return ret;
12785                 }
12786             }
12787 
12788             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12789             if (ret) {
12790                 phtimer = NULL;
12791             } else {
12792                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12793                     return -TARGET_EFAULT;
12794                 }
12795             }
12796         }
12797         return ret;
12798     }
12799 #endif
12800 
12801 #ifdef TARGET_NR_timer_settime
12802     case TARGET_NR_timer_settime:
12803     {
12804         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12805          * struct itimerspec * old_value */
12806         target_timer_t timerid = get_timer_id(arg1);
12807 
12808         if (timerid < 0) {
12809             ret = timerid;
12810         } else if (arg3 == 0) {
12811             ret = -TARGET_EINVAL;
12812         } else {
12813             timer_t htimer = g_posix_timers[timerid];
12814             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12815 
12816             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12817                 return -TARGET_EFAULT;
12818             }
12819             ret = get_errno(
12820                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12821             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12822                 return -TARGET_EFAULT;
12823             }
12824         }
12825         return ret;
12826     }
12827 #endif
12828 
12829 #ifdef TARGET_NR_timer_settime64
12830     case TARGET_NR_timer_settime64:
12831     {
12832         target_timer_t timerid = get_timer_id(arg1);
12833 
12834         if (timerid < 0) {
12835             ret = timerid;
12836         } else if (arg3 == 0) {
12837             ret = -TARGET_EINVAL;
12838         } else {
12839             timer_t htimer = g_posix_timers[timerid];
12840             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12841 
12842             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12843                 return -TARGET_EFAULT;
12844             }
12845             ret = get_errno(
12846                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12847             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12848                 return -TARGET_EFAULT;
12849             }
12850         }
12851         return ret;
12852     }
12853 #endif
12854 
12855 #ifdef TARGET_NR_timer_gettime
12856     case TARGET_NR_timer_gettime:
12857     {
12858         /* args: timer_t timerid, struct itimerspec *curr_value */
12859         target_timer_t timerid = get_timer_id(arg1);
12860 
12861         if (timerid < 0) {
12862             ret = timerid;
12863         } else if (!arg2) {
12864             ret = -TARGET_EFAULT;
12865         } else {
12866             timer_t htimer = g_posix_timers[timerid];
12867             struct itimerspec hspec;
12868             ret = get_errno(timer_gettime(htimer, &hspec));
12869 
12870             if (host_to_target_itimerspec(arg2, &hspec)) {
12871                 ret = -TARGET_EFAULT;
12872             }
12873         }
12874         return ret;
12875     }
12876 #endif
12877 
12878 #ifdef TARGET_NR_timer_gettime64
12879     case TARGET_NR_timer_gettime64:
12880     {
12881         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12882         target_timer_t timerid = get_timer_id(arg1);
12883 
12884         if (timerid < 0) {
12885             ret = timerid;
12886         } else if (!arg2) {
12887             ret = -TARGET_EFAULT;
12888         } else {
12889             timer_t htimer = g_posix_timers[timerid];
12890             struct itimerspec hspec;
12891             ret = get_errno(timer_gettime(htimer, &hspec));
12892 
12893             if (host_to_target_itimerspec64(arg2, &hspec)) {
12894                 ret = -TARGET_EFAULT;
12895             }
12896         }
12897         return ret;
12898     }
12899 #endif
12900 
12901 #ifdef TARGET_NR_timer_getoverrun
12902     case TARGET_NR_timer_getoverrun:
12903     {
12904         /* args: timer_t timerid */
12905         target_timer_t timerid = get_timer_id(arg1);
12906 
12907         if (timerid < 0) {
12908             ret = timerid;
12909         } else {
12910             timer_t htimer = g_posix_timers[timerid];
12911             ret = get_errno(timer_getoverrun(htimer));
12912         }
12913         return ret;
12914     }
12915 #endif
12916 
12917 #ifdef TARGET_NR_timer_delete
12918     case TARGET_NR_timer_delete:
12919     {
12920         /* args: timer_t timerid */
12921         target_timer_t timerid = get_timer_id(arg1);
12922 
12923         if (timerid < 0) {
12924             ret = timerid;
12925         } else {
12926             timer_t htimer = g_posix_timers[timerid];
12927             ret = get_errno(timer_delete(htimer));
12928             g_posix_timers[timerid] = 0;
12929         }
12930         return ret;
12931     }
12932 #endif
12933 
12934 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12935     case TARGET_NR_timerfd_create:
12936         return get_errno(timerfd_create(arg1,
12937                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12938 #endif
12939 
12940 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12941     case TARGET_NR_timerfd_gettime:
12942         {
12943             struct itimerspec its_curr;
12944 
12945             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12946 
12947             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12948                 return -TARGET_EFAULT;
12949             }
12950         }
12951         return ret;
12952 #endif
12953 
12954 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
12955     case TARGET_NR_timerfd_gettime64:
12956         {
12957             struct itimerspec its_curr;
12958 
12959             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12960 
12961             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
12962                 return -TARGET_EFAULT;
12963             }
12964         }
12965         return ret;
12966 #endif
12967 
12968 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
12969     case TARGET_NR_timerfd_settime:
12970         {
12971             struct itimerspec its_new, its_old, *p_new;
12972 
12973             if (arg3) {
12974                 if (target_to_host_itimerspec(&its_new, arg3)) {
12975                     return -TARGET_EFAULT;
12976                 }
12977                 p_new = &its_new;
12978             } else {
12979                 p_new = NULL;
12980             }
12981 
12982             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
12983 
12984             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
12985                 return -TARGET_EFAULT;
12986             }
12987         }
12988         return ret;
12989 #endif
12990 
12991 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
12992     case TARGET_NR_timerfd_settime64:
12993         {
12994             struct itimerspec its_new, its_old, *p_new;
12995 
12996             if (arg3) {
12997                 if (target_to_host_itimerspec64(&its_new, arg3)) {
12998                     return -TARGET_EFAULT;
12999                 }
13000                 p_new = &its_new;
13001             } else {
13002                 p_new = NULL;
13003             }
13004 
13005             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13006 
13007             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13008                 return -TARGET_EFAULT;
13009             }
13010         }
13011         return ret;
13012 #endif
13013 
13014 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13015     case TARGET_NR_ioprio_get:
13016         return get_errno(ioprio_get(arg1, arg2));
13017 #endif
13018 
13019 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13020     case TARGET_NR_ioprio_set:
13021         return get_errno(ioprio_set(arg1, arg2, arg3));
13022 #endif
13023 
13024 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13025     case TARGET_NR_setns:
13026         return get_errno(setns(arg1, arg2));
13027 #endif
13028 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13029     case TARGET_NR_unshare:
13030         return get_errno(unshare(arg1));
13031 #endif
13032 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13033     case TARGET_NR_kcmp:
13034         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13035 #endif
13036 #ifdef TARGET_NR_swapcontext
13037     case TARGET_NR_swapcontext:
13038         /* PowerPC specific.  */
13039         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13040 #endif
13041 #ifdef TARGET_NR_memfd_create
13042     case TARGET_NR_memfd_create:
13043         p = lock_user_string(arg1);
13044         if (!p) {
13045             return -TARGET_EFAULT;
13046         }
13047         ret = get_errno(memfd_create(p, arg2));
13048         fd_trans_unregister(ret);
13049         unlock_user(p, arg1, 0);
13050         return ret;
13051 #endif
13052 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13053     case TARGET_NR_membarrier:
13054         return get_errno(membarrier(arg1, arg2));
13055 #endif
13056 
13057 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13058     case TARGET_NR_copy_file_range:
13059         {
13060             loff_t inoff, outoff;
13061             loff_t *pinoff = NULL, *poutoff = NULL;
13062 
13063             if (arg2) {
13064                 if (get_user_u64(inoff, arg2)) {
13065                     return -TARGET_EFAULT;
13066                 }
13067                 pinoff = &inoff;
13068             }
13069             if (arg4) {
13070                 if (get_user_u64(outoff, arg4)) {
13071                     return -TARGET_EFAULT;
13072                 }
13073                 poutoff = &outoff;
13074             }
13075             /* Do not sign-extend the count parameter. */
13076             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13077                                                  (abi_ulong)arg5, arg6));
13078             if (!is_error(ret) && ret > 0) {
13079                 if (arg2) {
13080                     if (put_user_u64(inoff, arg2)) {
13081                         return -TARGET_EFAULT;
13082                     }
13083                 }
13084                 if (arg4) {
13085                     if (put_user_u64(outoff, arg4)) {
13086                         return -TARGET_EFAULT;
13087                     }
13088                 }
13089             }
13090         }
13091         return ret;
13092 #endif
13093 
13094 #if defined(TARGET_NR_pivot_root)
13095     case TARGET_NR_pivot_root:
13096         {
13097             void *p2;
13098             p = lock_user_string(arg1); /* new_root */
13099             p2 = lock_user_string(arg2); /* put_old */
13100             if (!p || !p2) {
13101                 ret = -TARGET_EFAULT;
13102             } else {
13103                 ret = get_errno(pivot_root(p, p2));
13104             }
13105             unlock_user(p2, arg2, 0);
13106             unlock_user(p, arg1, 0);
13107         }
13108         return ret;
13109 #endif
13110 
13111     default:
13112         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13113         return -TARGET_ENOSYS;
13114     }
13115     return ret;
13116 }
13117 
13118 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13119                     abi_long arg2, abi_long arg3, abi_long arg4,
13120                     abi_long arg5, abi_long arg6, abi_long arg7,
13121                     abi_long arg8)
13122 {
13123     CPUState *cpu = env_cpu(cpu_env);
13124     abi_long ret;
13125 
13126 #ifdef DEBUG_ERESTARTSYS
13127     /* Debug-only code for exercising the syscall-restart code paths
13128      * in the per-architecture cpu main loops: restart every syscall
13129      * the guest makes once before letting it through.
13130      */
13131     {
13132         static bool flag;
13133         flag = !flag;
13134         if (flag) {
13135             return -TARGET_ERESTARTSYS;
13136         }
13137     }
13138 #endif
13139 
13140     record_syscall_start(cpu, num, arg1,
13141                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13142 
13143     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13144         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13145     }
13146 
13147     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13148                       arg5, arg6, arg7, arg8);
13149 
13150     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13151         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13152                           arg3, arg4, arg5, arg6);
13153     }
13154 
13155     record_syscall_return(cpu, num, ret);
13156     return ret;
13157 }
13158