xref: /openbmc/qemu/linux-user/syscall.c (revision 24d87c18)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_statx __NR_statx
276 
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
280 
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
285 
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
290 
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
293 
294 /* For the 64-bit guest on 32-bit host case we must emulate
295  * getdents using getdents64, because otherwise the host
296  * might hand us back more dirent records than we can fit
297  * into the guest buffer after structure format conversion.
298  * Otherwise we emulate getdents with getdents if the host has it.
299  */
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
303 
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
314           loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318           siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(__NR_futex)
324 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
325           const struct timespec *,timeout,int *,uaddr2,int,val3)
326 #endif
327 #if defined(__NR_futex_time64)
328 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
329           const struct timespec *,timeout,int *,uaddr2,int,val3)
330 #endif
331 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
332 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
333           unsigned long *, user_mask_ptr);
334 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
335 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
336           unsigned long *, user_mask_ptr);
337 /* sched_attr is not defined in glibc */
338 struct sched_attr {
339     uint32_t size;
340     uint32_t sched_policy;
341     uint64_t sched_flags;
342     int32_t sched_nice;
343     uint32_t sched_priority;
344     uint64_t sched_runtime;
345     uint64_t sched_deadline;
346     uint64_t sched_period;
347     uint32_t sched_util_min;
348     uint32_t sched_util_max;
349 };
350 #define __NR_sys_sched_getattr __NR_sched_getattr
351 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
352           unsigned int, size, unsigned int, flags);
353 #define __NR_sys_sched_setattr __NR_sched_setattr
354 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
355           unsigned int, flags);
356 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
357 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
358 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
359 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
360           const struct sched_param *, param);
361 #define __NR_sys_sched_getparam __NR_sched_getparam
362 _syscall2(int, sys_sched_getparam, pid_t, pid,
363           struct sched_param *, param);
364 #define __NR_sys_sched_setparam __NR_sched_setparam
365 _syscall2(int, sys_sched_setparam, pid_t, pid,
366           const struct sched_param *, param);
367 #define __NR_sys_getcpu __NR_getcpu
368 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
369 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
370           void *, arg);
371 _syscall2(int, capget, struct __user_cap_header_struct *, header,
372           struct __user_cap_data_struct *, data);
373 _syscall2(int, capset, struct __user_cap_header_struct *, header,
374           struct __user_cap_data_struct *, data);
375 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
376 _syscall2(int, ioprio_get, int, which, int, who)
377 #endif
378 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
379 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
380 #endif
381 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
382 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
383 #endif
384 
385 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
386 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
387           unsigned long, idx1, unsigned long, idx2)
388 #endif
389 
390 /*
391  * It is assumed that struct statx is architecture independent.
392  */
393 #if defined(TARGET_NR_statx) && defined(__NR_statx)
394 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
395           unsigned int, mask, struct target_statx *, statxbuf)
396 #endif
397 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
398 _syscall2(int, membarrier, int, cmd, int, flags)
399 #endif
400 
401 static const bitmask_transtbl fcntl_flags_tbl[] = {
402   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
403   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
404   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
405   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
406   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
407   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
408   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
409   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
410   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
411   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
412   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
413   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
414   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
415 #if defined(O_DIRECT)
416   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
417 #endif
418 #if defined(O_NOATIME)
419   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
420 #endif
421 #if defined(O_CLOEXEC)
422   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
423 #endif
424 #if defined(O_PATH)
425   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
426 #endif
427 #if defined(O_TMPFILE)
428   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
429 #endif
430   /* Don't terminate the list prematurely on 64-bit host+guest.  */
431 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
432   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
433 #endif
434   { 0, 0, 0, 0 }
435 };
436 
437 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
438 
439 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
440 #if defined(__NR_utimensat)
441 #define __NR_sys_utimensat __NR_utimensat
442 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
443           const struct timespec *,tsp,int,flags)
444 #else
445 static int sys_utimensat(int dirfd, const char *pathname,
446                          const struct timespec times[2], int flags)
447 {
448     errno = ENOSYS;
449     return -1;
450 }
451 #endif
452 #endif /* TARGET_NR_utimensat */
453 
454 #ifdef TARGET_NR_renameat2
455 #if defined(__NR_renameat2)
456 #define __NR_sys_renameat2 __NR_renameat2
457 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
458           const char *, new, unsigned int, flags)
459 #else
460 static int sys_renameat2(int oldfd, const char *old,
461                          int newfd, const char *new, int flags)
462 {
463     if (flags == 0) {
464         return renameat(oldfd, old, newfd, new);
465     }
466     errno = ENOSYS;
467     return -1;
468 }
469 #endif
470 #endif /* TARGET_NR_renameat2 */
471 
472 #ifdef CONFIG_INOTIFY
473 #include <sys/inotify.h>
474 #else
475 /* Userspace can usually survive runtime without inotify */
476 #undef TARGET_NR_inotify_init
477 #undef TARGET_NR_inotify_init1
478 #undef TARGET_NR_inotify_add_watch
479 #undef TARGET_NR_inotify_rm_watch
480 #endif /* CONFIG_INOTIFY  */
481 
482 #if defined(TARGET_NR_prlimit64)
483 #ifndef __NR_prlimit64
484 # define __NR_prlimit64 -1
485 #endif
486 #define __NR_sys_prlimit64 __NR_prlimit64
487 /* The glibc rlimit structure may not be that used by the underlying syscall */
488 struct host_rlimit64 {
489     uint64_t rlim_cur;
490     uint64_t rlim_max;
491 };
492 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
493           const struct host_rlimit64 *, new_limit,
494           struct host_rlimit64 *, old_limit)
495 #endif
496 
497 
498 #if defined(TARGET_NR_timer_create)
499 /* Maximum of 32 active POSIX timers allowed at any one time. */
500 static timer_t g_posix_timers[32] = { 0, } ;
501 
502 static inline int next_free_host_timer(void)
503 {
504     int k ;
505     /* FIXME: Does finding the next free slot require a lock? */
506     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
507         if (g_posix_timers[k] == 0) {
508             g_posix_timers[k] = (timer_t) 1;
509             return k;
510         }
511     }
512     return -1;
513 }
514 #endif
515 
516 static inline int host_to_target_errno(int host_errno)
517 {
518     switch (host_errno) {
519 #define E(X)  case X: return TARGET_##X;
520 #include "errnos.c.inc"
521 #undef E
522     default:
523         return host_errno;
524     }
525 }
526 
527 static inline int target_to_host_errno(int target_errno)
528 {
529     switch (target_errno) {
530 #define E(X)  case TARGET_##X: return X;
531 #include "errnos.c.inc"
532 #undef E
533     default:
534         return target_errno;
535     }
536 }
537 
538 abi_long get_errno(abi_long ret)
539 {
540     if (ret == -1)
541         return -host_to_target_errno(errno);
542     else
543         return ret;
544 }
545 
546 const char *target_strerror(int err)
547 {
548     if (err == QEMU_ERESTARTSYS) {
549         return "To be restarted";
550     }
551     if (err == QEMU_ESIGRETURN) {
552         return "Successful exit from sigreturn";
553     }
554 
555     return strerror(target_to_host_errno(err));
556 }
557 
558 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
559 {
560     int i;
561     uint8_t b;
562     if (usize <= ksize) {
563         return 1;
564     }
565     for (i = ksize; i < usize; i++) {
566         if (get_user_u8(b, addr + i)) {
567             return -TARGET_EFAULT;
568         }
569         if (b != 0) {
570             return 0;
571         }
572     }
573     return 1;
574 }
575 
576 #define safe_syscall0(type, name) \
577 static type safe_##name(void) \
578 { \
579     return safe_syscall(__NR_##name); \
580 }
581 
582 #define safe_syscall1(type, name, type1, arg1) \
583 static type safe_##name(type1 arg1) \
584 { \
585     return safe_syscall(__NR_##name, arg1); \
586 }
587 
588 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
589 static type safe_##name(type1 arg1, type2 arg2) \
590 { \
591     return safe_syscall(__NR_##name, arg1, arg2); \
592 }
593 
594 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
595 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
596 { \
597     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
598 }
599 
600 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
601     type4, arg4) \
602 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
603 { \
604     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
605 }
606 
607 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
608     type4, arg4, type5, arg5) \
609 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
610     type5 arg5) \
611 { \
612     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
613 }
614 
615 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
616     type4, arg4, type5, arg5, type6, arg6) \
617 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
618     type5 arg5, type6 arg6) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
621 }
622 
623 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
624 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
625 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
626               int, flags, mode_t, mode)
627 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
628 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
629               struct rusage *, rusage)
630 #endif
631 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
632               int, options, struct rusage *, rusage)
633 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
634 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
635     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
636 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
637               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
638 #endif
639 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
640 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
641               struct timespec *, tsp, const sigset_t *, sigmask,
642               size_t, sigsetsize)
643 #endif
644 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
645               int, maxevents, int, timeout, const sigset_t *, sigmask,
646               size_t, sigsetsize)
647 #if defined(__NR_futex)
648 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
649               const struct timespec *,timeout,int *,uaddr2,int,val3)
650 #endif
651 #if defined(__NR_futex_time64)
652 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
653               const struct timespec *,timeout,int *,uaddr2,int,val3)
654 #endif
655 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
656 safe_syscall2(int, kill, pid_t, pid, int, sig)
657 safe_syscall2(int, tkill, int, tid, int, sig)
658 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
659 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
660 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
661 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
662               unsigned long, pos_l, unsigned long, pos_h)
663 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
664               unsigned long, pos_l, unsigned long, pos_h)
665 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
666               socklen_t, addrlen)
667 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
668               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
669 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
670               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
671 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
672 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
673 safe_syscall2(int, flock, int, fd, int, operation)
674 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
675 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
676               const struct timespec *, uts, size_t, sigsetsize)
677 #endif
678 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
679               int, flags)
680 #if defined(TARGET_NR_nanosleep)
681 safe_syscall2(int, nanosleep, const struct timespec *, req,
682               struct timespec *, rem)
683 #endif
684 #if defined(TARGET_NR_clock_nanosleep) || \
685     defined(TARGET_NR_clock_nanosleep_time64)
686 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
687               const struct timespec *, req, struct timespec *, rem)
688 #endif
689 #ifdef __NR_ipc
690 #ifdef __s390x__
691 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
692               void *, ptr)
693 #else
694 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
695               void *, ptr, long, fifth)
696 #endif
697 #endif
698 #ifdef __NR_msgsnd
699 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
700               int, flags)
701 #endif
702 #ifdef __NR_msgrcv
703 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
704               long, msgtype, int, flags)
705 #endif
706 #ifdef __NR_semtimedop
707 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
708               unsigned, nsops, const struct timespec *, timeout)
709 #endif
710 #if defined(TARGET_NR_mq_timedsend) || \
711     defined(TARGET_NR_mq_timedsend_time64)
712 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
713               size_t, len, unsigned, prio, const struct timespec *, timeout)
714 #endif
715 #if defined(TARGET_NR_mq_timedreceive) || \
716     defined(TARGET_NR_mq_timedreceive_time64)
717 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
718               size_t, len, unsigned *, prio, const struct timespec *, timeout)
719 #endif
720 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
721 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
722               int, outfd, loff_t *, poutoff, size_t, length,
723               unsigned int, flags)
724 #endif
725 
726 /* We do ioctl like this rather than via safe_syscall3 to preserve the
727  * "third argument might be integer or pointer or not present" behaviour of
728  * the libc function.
729  */
730 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
731 /* Similarly for fcntl. Note that callers must always:
732  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
733  *  use the flock64 struct rather than unsuffixed flock
734  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
735  */
736 #ifdef __NR_fcntl64
737 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
738 #else
739 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
740 #endif
741 
742 static inline int host_to_target_sock_type(int host_type)
743 {
744     int target_type;
745 
746     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
747     case SOCK_DGRAM:
748         target_type = TARGET_SOCK_DGRAM;
749         break;
750     case SOCK_STREAM:
751         target_type = TARGET_SOCK_STREAM;
752         break;
753     default:
754         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
755         break;
756     }
757 
758 #if defined(SOCK_CLOEXEC)
759     if (host_type & SOCK_CLOEXEC) {
760         target_type |= TARGET_SOCK_CLOEXEC;
761     }
762 #endif
763 
764 #if defined(SOCK_NONBLOCK)
765     if (host_type & SOCK_NONBLOCK) {
766         target_type |= TARGET_SOCK_NONBLOCK;
767     }
768 #endif
769 
770     return target_type;
771 }
772 
773 static abi_ulong target_brk;
774 static abi_ulong target_original_brk;
775 static abi_ulong brk_page;
776 
777 void target_set_brk(abi_ulong new_brk)
778 {
779     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
780     brk_page = HOST_PAGE_ALIGN(target_brk);
781 }
782 
783 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
784 #define DEBUGF_BRK(message, args...)
785 
786 /* do_brk() must return target values and target errnos. */
787 abi_long do_brk(abi_ulong new_brk)
788 {
789     abi_long mapped_addr;
790     abi_ulong new_alloc_size;
791 
792     /* brk pointers are always untagged */
793 
794     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
795 
796     if (!new_brk) {
797         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
798         return target_brk;
799     }
800     if (new_brk < target_original_brk) {
801         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
802                    target_brk);
803         return target_brk;
804     }
805 
806     /* If the new brk is less than the highest page reserved to the
807      * target heap allocation, set it and we're almost done...  */
808     if (new_brk <= brk_page) {
809         /* Heap contents are initialized to zero, as for anonymous
810          * mapped pages.  */
811         if (new_brk > target_brk) {
812             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
813         }
814 	target_brk = new_brk;
815         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
816 	return target_brk;
817     }
818 
819     /* We need to allocate more memory after the brk... Note that
820      * we don't use MAP_FIXED because that will map over the top of
821      * any existing mapping (like the one with the host libc or qemu
822      * itself); instead we treat "mapped but at wrong address" as
823      * a failure and unmap again.
824      */
825     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
826     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
827                                         PROT_READ|PROT_WRITE,
828                                         MAP_ANON|MAP_PRIVATE, 0, 0));
829 
830     if (mapped_addr == brk_page) {
831         /* Heap contents are initialized to zero, as for anonymous
832          * mapped pages.  Technically the new pages are already
833          * initialized to zero since they *are* anonymous mapped
834          * pages, however we have to take care with the contents that
835          * come from the remaining part of the previous page: it may
836          * contains garbage data due to a previous heap usage (grown
837          * then shrunken).  */
838         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
839 
840         target_brk = new_brk;
841         brk_page = HOST_PAGE_ALIGN(target_brk);
842         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
843             target_brk);
844         return target_brk;
845     } else if (mapped_addr != -1) {
846         /* Mapped but at wrong address, meaning there wasn't actually
847          * enough space for this brk.
848          */
849         target_munmap(mapped_addr, new_alloc_size);
850         mapped_addr = -1;
851         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
852     }
853     else {
854         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
855     }
856 
857 #if defined(TARGET_ALPHA)
858     /* We (partially) emulate OSF/1 on Alpha, which requires we
859        return a proper errno, not an unchanged brk value.  */
860     return -TARGET_ENOMEM;
861 #endif
862     /* For everything else, return the previous break. */
863     return target_brk;
864 }
865 
866 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
867     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
868 static inline abi_long copy_from_user_fdset(fd_set *fds,
869                                             abi_ulong target_fds_addr,
870                                             int n)
871 {
872     int i, nw, j, k;
873     abi_ulong b, *target_fds;
874 
875     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
876     if (!(target_fds = lock_user(VERIFY_READ,
877                                  target_fds_addr,
878                                  sizeof(abi_ulong) * nw,
879                                  1)))
880         return -TARGET_EFAULT;
881 
882     FD_ZERO(fds);
883     k = 0;
884     for (i = 0; i < nw; i++) {
885         /* grab the abi_ulong */
886         __get_user(b, &target_fds[i]);
887         for (j = 0; j < TARGET_ABI_BITS; j++) {
888             /* check the bit inside the abi_ulong */
889             if ((b >> j) & 1)
890                 FD_SET(k, fds);
891             k++;
892         }
893     }
894 
895     unlock_user(target_fds, target_fds_addr, 0);
896 
897     return 0;
898 }
899 
900 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
901                                                  abi_ulong target_fds_addr,
902                                                  int n)
903 {
904     if (target_fds_addr) {
905         if (copy_from_user_fdset(fds, target_fds_addr, n))
906             return -TARGET_EFAULT;
907         *fds_ptr = fds;
908     } else {
909         *fds_ptr = NULL;
910     }
911     return 0;
912 }
913 
914 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
915                                           const fd_set *fds,
916                                           int n)
917 {
918     int i, nw, j, k;
919     abi_long v;
920     abi_ulong *target_fds;
921 
922     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
923     if (!(target_fds = lock_user(VERIFY_WRITE,
924                                  target_fds_addr,
925                                  sizeof(abi_ulong) * nw,
926                                  0)))
927         return -TARGET_EFAULT;
928 
929     k = 0;
930     for (i = 0; i < nw; i++) {
931         v = 0;
932         for (j = 0; j < TARGET_ABI_BITS; j++) {
933             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
934             k++;
935         }
936         __put_user(v, &target_fds[i]);
937     }
938 
939     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
940 
941     return 0;
942 }
943 #endif
944 
945 #if defined(__alpha__)
946 #define HOST_HZ 1024
947 #else
948 #define HOST_HZ 100
949 #endif
950 
951 static inline abi_long host_to_target_clock_t(long ticks)
952 {
953 #if HOST_HZ == TARGET_HZ
954     return ticks;
955 #else
956     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
957 #endif
958 }
959 
960 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
961                                              const struct rusage *rusage)
962 {
963     struct target_rusage *target_rusage;
964 
965     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
966         return -TARGET_EFAULT;
967     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
968     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
969     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
970     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
971     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
972     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
973     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
974     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
975     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
976     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
977     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
978     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
979     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
980     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
981     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
982     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
983     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
984     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
985     unlock_user_struct(target_rusage, target_addr, 1);
986 
987     return 0;
988 }
989 
990 #ifdef TARGET_NR_setrlimit
991 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
992 {
993     abi_ulong target_rlim_swap;
994     rlim_t result;
995 
996     target_rlim_swap = tswapal(target_rlim);
997     if (target_rlim_swap == TARGET_RLIM_INFINITY)
998         return RLIM_INFINITY;
999 
1000     result = target_rlim_swap;
1001     if (target_rlim_swap != (rlim_t)result)
1002         return RLIM_INFINITY;
1003 
1004     return result;
1005 }
1006 #endif
1007 
1008 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1009 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1010 {
1011     abi_ulong target_rlim_swap;
1012     abi_ulong result;
1013 
1014     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1015         target_rlim_swap = TARGET_RLIM_INFINITY;
1016     else
1017         target_rlim_swap = rlim;
1018     result = tswapal(target_rlim_swap);
1019 
1020     return result;
1021 }
1022 #endif
1023 
1024 static inline int target_to_host_resource(int code)
1025 {
1026     switch (code) {
1027     case TARGET_RLIMIT_AS:
1028         return RLIMIT_AS;
1029     case TARGET_RLIMIT_CORE:
1030         return RLIMIT_CORE;
1031     case TARGET_RLIMIT_CPU:
1032         return RLIMIT_CPU;
1033     case TARGET_RLIMIT_DATA:
1034         return RLIMIT_DATA;
1035     case TARGET_RLIMIT_FSIZE:
1036         return RLIMIT_FSIZE;
1037     case TARGET_RLIMIT_LOCKS:
1038         return RLIMIT_LOCKS;
1039     case TARGET_RLIMIT_MEMLOCK:
1040         return RLIMIT_MEMLOCK;
1041     case TARGET_RLIMIT_MSGQUEUE:
1042         return RLIMIT_MSGQUEUE;
1043     case TARGET_RLIMIT_NICE:
1044         return RLIMIT_NICE;
1045     case TARGET_RLIMIT_NOFILE:
1046         return RLIMIT_NOFILE;
1047     case TARGET_RLIMIT_NPROC:
1048         return RLIMIT_NPROC;
1049     case TARGET_RLIMIT_RSS:
1050         return RLIMIT_RSS;
1051     case TARGET_RLIMIT_RTPRIO:
1052         return RLIMIT_RTPRIO;
1053 #ifdef RLIMIT_RTTIME
1054     case TARGET_RLIMIT_RTTIME:
1055         return RLIMIT_RTTIME;
1056 #endif
1057     case TARGET_RLIMIT_SIGPENDING:
1058         return RLIMIT_SIGPENDING;
1059     case TARGET_RLIMIT_STACK:
1060         return RLIMIT_STACK;
1061     default:
1062         return code;
1063     }
1064 }
1065 
1066 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1067                                               abi_ulong target_tv_addr)
1068 {
1069     struct target_timeval *target_tv;
1070 
1071     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1072         return -TARGET_EFAULT;
1073     }
1074 
1075     __get_user(tv->tv_sec, &target_tv->tv_sec);
1076     __get_user(tv->tv_usec, &target_tv->tv_usec);
1077 
1078     unlock_user_struct(target_tv, target_tv_addr, 0);
1079 
1080     return 0;
1081 }
1082 
1083 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1084                                             const struct timeval *tv)
1085 {
1086     struct target_timeval *target_tv;
1087 
1088     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1089         return -TARGET_EFAULT;
1090     }
1091 
1092     __put_user(tv->tv_sec, &target_tv->tv_sec);
1093     __put_user(tv->tv_usec, &target_tv->tv_usec);
1094 
1095     unlock_user_struct(target_tv, target_tv_addr, 1);
1096 
1097     return 0;
1098 }
1099 
1100 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1101 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1102                                                 abi_ulong target_tv_addr)
1103 {
1104     struct target__kernel_sock_timeval *target_tv;
1105 
1106     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1107         return -TARGET_EFAULT;
1108     }
1109 
1110     __get_user(tv->tv_sec, &target_tv->tv_sec);
1111     __get_user(tv->tv_usec, &target_tv->tv_usec);
1112 
1113     unlock_user_struct(target_tv, target_tv_addr, 0);
1114 
1115     return 0;
1116 }
1117 #endif
1118 
1119 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1120                                               const struct timeval *tv)
1121 {
1122     struct target__kernel_sock_timeval *target_tv;
1123 
1124     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1125         return -TARGET_EFAULT;
1126     }
1127 
1128     __put_user(tv->tv_sec, &target_tv->tv_sec);
1129     __put_user(tv->tv_usec, &target_tv->tv_usec);
1130 
1131     unlock_user_struct(target_tv, target_tv_addr, 1);
1132 
1133     return 0;
1134 }
1135 
1136 #if defined(TARGET_NR_futex) || \
1137     defined(TARGET_NR_rt_sigtimedwait) || \
1138     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1139     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1140     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1141     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1142     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1143     defined(TARGET_NR_timer_settime) || \
1144     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1145 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1146                                                abi_ulong target_addr)
1147 {
1148     struct target_timespec *target_ts;
1149 
1150     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1151         return -TARGET_EFAULT;
1152     }
1153     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1154     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1155     unlock_user_struct(target_ts, target_addr, 0);
1156     return 0;
1157 }
1158 #endif
1159 
1160 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1161     defined(TARGET_NR_timer_settime64) || \
1162     defined(TARGET_NR_mq_timedsend_time64) || \
1163     defined(TARGET_NR_mq_timedreceive_time64) || \
1164     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1165     defined(TARGET_NR_clock_nanosleep_time64) || \
1166     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1167     defined(TARGET_NR_utimensat) || \
1168     defined(TARGET_NR_utimensat_time64) || \
1169     defined(TARGET_NR_semtimedop_time64) || \
1170     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1171 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1172                                                  abi_ulong target_addr)
1173 {
1174     struct target__kernel_timespec *target_ts;
1175 
1176     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1177         return -TARGET_EFAULT;
1178     }
1179     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1180     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1181     /* in 32bit mode, this drops the padding */
1182     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1183     unlock_user_struct(target_ts, target_addr, 0);
1184     return 0;
1185 }
1186 #endif
1187 
1188 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1189                                                struct timespec *host_ts)
1190 {
1191     struct target_timespec *target_ts;
1192 
1193     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1194         return -TARGET_EFAULT;
1195     }
1196     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1197     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1198     unlock_user_struct(target_ts, target_addr, 1);
1199     return 0;
1200 }
1201 
1202 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1203                                                  struct timespec *host_ts)
1204 {
1205     struct target__kernel_timespec *target_ts;
1206 
1207     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1208         return -TARGET_EFAULT;
1209     }
1210     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1211     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1212     unlock_user_struct(target_ts, target_addr, 1);
1213     return 0;
1214 }
1215 
1216 #if defined(TARGET_NR_gettimeofday)
1217 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1218                                              struct timezone *tz)
1219 {
1220     struct target_timezone *target_tz;
1221 
1222     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1223         return -TARGET_EFAULT;
1224     }
1225 
1226     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1227     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1228 
1229     unlock_user_struct(target_tz, target_tz_addr, 1);
1230 
1231     return 0;
1232 }
1233 #endif
1234 
1235 #if defined(TARGET_NR_settimeofday)
1236 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1237                                                abi_ulong target_tz_addr)
1238 {
1239     struct target_timezone *target_tz;
1240 
1241     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1242         return -TARGET_EFAULT;
1243     }
1244 
1245     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1246     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1247 
1248     unlock_user_struct(target_tz, target_tz_addr, 0);
1249 
1250     return 0;
1251 }
1252 #endif
1253 
1254 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1255 #include <mqueue.h>
1256 
1257 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1258                                               abi_ulong target_mq_attr_addr)
1259 {
1260     struct target_mq_attr *target_mq_attr;
1261 
1262     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1263                           target_mq_attr_addr, 1))
1264         return -TARGET_EFAULT;
1265 
1266     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1267     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1268     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1269     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1270 
1271     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1272 
1273     return 0;
1274 }
1275 
1276 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1277                                             const struct mq_attr *attr)
1278 {
1279     struct target_mq_attr *target_mq_attr;
1280 
1281     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1282                           target_mq_attr_addr, 0))
1283         return -TARGET_EFAULT;
1284 
1285     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1286     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1287     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1288     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1289 
1290     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1291 
1292     return 0;
1293 }
1294 #endif
1295 
1296 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1297 /* do_select() must return target values and target errnos. */
1298 static abi_long do_select(int n,
1299                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1300                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1301 {
1302     fd_set rfds, wfds, efds;
1303     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1304     struct timeval tv;
1305     struct timespec ts, *ts_ptr;
1306     abi_long ret;
1307 
1308     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1309     if (ret) {
1310         return ret;
1311     }
1312     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1313     if (ret) {
1314         return ret;
1315     }
1316     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1317     if (ret) {
1318         return ret;
1319     }
1320 
1321     if (target_tv_addr) {
1322         if (copy_from_user_timeval(&tv, target_tv_addr))
1323             return -TARGET_EFAULT;
1324         ts.tv_sec = tv.tv_sec;
1325         ts.tv_nsec = tv.tv_usec * 1000;
1326         ts_ptr = &ts;
1327     } else {
1328         ts_ptr = NULL;
1329     }
1330 
1331     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1332                                   ts_ptr, NULL));
1333 
1334     if (!is_error(ret)) {
1335         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1336             return -TARGET_EFAULT;
1337         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1338             return -TARGET_EFAULT;
1339         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1340             return -TARGET_EFAULT;
1341 
1342         if (target_tv_addr) {
1343             tv.tv_sec = ts.tv_sec;
1344             tv.tv_usec = ts.tv_nsec / 1000;
1345             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1346                 return -TARGET_EFAULT;
1347             }
1348         }
1349     }
1350 
1351     return ret;
1352 }
1353 
1354 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1355 static abi_long do_old_select(abi_ulong arg1)
1356 {
1357     struct target_sel_arg_struct *sel;
1358     abi_ulong inp, outp, exp, tvp;
1359     long nsel;
1360 
1361     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1362         return -TARGET_EFAULT;
1363     }
1364 
1365     nsel = tswapal(sel->n);
1366     inp = tswapal(sel->inp);
1367     outp = tswapal(sel->outp);
1368     exp = tswapal(sel->exp);
1369     tvp = tswapal(sel->tvp);
1370 
1371     unlock_user_struct(sel, arg1, 0);
1372 
1373     return do_select(nsel, inp, outp, exp, tvp);
1374 }
1375 #endif
1376 #endif
1377 
1378 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1379 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1380                             abi_long arg4, abi_long arg5, abi_long arg6,
1381                             bool time64)
1382 {
1383     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1384     fd_set rfds, wfds, efds;
1385     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1386     struct timespec ts, *ts_ptr;
1387     abi_long ret;
1388 
1389     /*
1390      * The 6th arg is actually two args smashed together,
1391      * so we cannot use the C library.
1392      */
1393     struct {
1394         sigset_t *set;
1395         size_t size;
1396     } sig, *sig_ptr;
1397 
1398     abi_ulong arg_sigset, arg_sigsize, *arg7;
1399 
1400     n = arg1;
1401     rfd_addr = arg2;
1402     wfd_addr = arg3;
1403     efd_addr = arg4;
1404     ts_addr = arg5;
1405 
1406     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1407     if (ret) {
1408         return ret;
1409     }
1410     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1411     if (ret) {
1412         return ret;
1413     }
1414     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1415     if (ret) {
1416         return ret;
1417     }
1418 
1419     /*
1420      * This takes a timespec, and not a timeval, so we cannot
1421      * use the do_select() helper ...
1422      */
1423     if (ts_addr) {
1424         if (time64) {
1425             if (target_to_host_timespec64(&ts, ts_addr)) {
1426                 return -TARGET_EFAULT;
1427             }
1428         } else {
1429             if (target_to_host_timespec(&ts, ts_addr)) {
1430                 return -TARGET_EFAULT;
1431             }
1432         }
1433             ts_ptr = &ts;
1434     } else {
1435         ts_ptr = NULL;
1436     }
1437 
1438     /* Extract the two packed args for the sigset */
1439     sig_ptr = NULL;
1440     if (arg6) {
1441         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1442         if (!arg7) {
1443             return -TARGET_EFAULT;
1444         }
1445         arg_sigset = tswapal(arg7[0]);
1446         arg_sigsize = tswapal(arg7[1]);
1447         unlock_user(arg7, arg6, 0);
1448 
1449         if (arg_sigset) {
1450             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1451             if (ret != 0) {
1452                 return ret;
1453             }
1454             sig_ptr = &sig;
1455             sig.size = SIGSET_T_SIZE;
1456         }
1457     }
1458 
1459     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1460                                   ts_ptr, sig_ptr));
1461 
1462     if (sig_ptr) {
1463         finish_sigsuspend_mask(ret);
1464     }
1465 
1466     if (!is_error(ret)) {
1467         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1468             return -TARGET_EFAULT;
1469         }
1470         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1471             return -TARGET_EFAULT;
1472         }
1473         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1474             return -TARGET_EFAULT;
1475         }
1476         if (time64) {
1477             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1478                 return -TARGET_EFAULT;
1479             }
1480         } else {
1481             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1482                 return -TARGET_EFAULT;
1483             }
1484         }
1485     }
1486     return ret;
1487 }
1488 #endif
1489 
1490 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1491     defined(TARGET_NR_ppoll_time64)
1492 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1493                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1494 {
1495     struct target_pollfd *target_pfd;
1496     unsigned int nfds = arg2;
1497     struct pollfd *pfd;
1498     unsigned int i;
1499     abi_long ret;
1500 
1501     pfd = NULL;
1502     target_pfd = NULL;
1503     if (nfds) {
1504         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1505             return -TARGET_EINVAL;
1506         }
1507         target_pfd = lock_user(VERIFY_WRITE, arg1,
1508                                sizeof(struct target_pollfd) * nfds, 1);
1509         if (!target_pfd) {
1510             return -TARGET_EFAULT;
1511         }
1512 
1513         pfd = alloca(sizeof(struct pollfd) * nfds);
1514         for (i = 0; i < nfds; i++) {
1515             pfd[i].fd = tswap32(target_pfd[i].fd);
1516             pfd[i].events = tswap16(target_pfd[i].events);
1517         }
1518     }
1519     if (ppoll) {
1520         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1521         sigset_t *set = NULL;
1522 
1523         if (arg3) {
1524             if (time64) {
1525                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1526                     unlock_user(target_pfd, arg1, 0);
1527                     return -TARGET_EFAULT;
1528                 }
1529             } else {
1530                 if (target_to_host_timespec(timeout_ts, arg3)) {
1531                     unlock_user(target_pfd, arg1, 0);
1532                     return -TARGET_EFAULT;
1533                 }
1534             }
1535         } else {
1536             timeout_ts = NULL;
1537         }
1538 
1539         if (arg4) {
1540             ret = process_sigsuspend_mask(&set, arg4, arg5);
1541             if (ret != 0) {
1542                 unlock_user(target_pfd, arg1, 0);
1543                 return ret;
1544             }
1545         }
1546 
1547         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1548                                    set, SIGSET_T_SIZE));
1549 
1550         if (set) {
1551             finish_sigsuspend_mask(ret);
1552         }
1553         if (!is_error(ret) && arg3) {
1554             if (time64) {
1555                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1556                     return -TARGET_EFAULT;
1557                 }
1558             } else {
1559                 if (host_to_target_timespec(arg3, timeout_ts)) {
1560                     return -TARGET_EFAULT;
1561                 }
1562             }
1563         }
1564     } else {
1565           struct timespec ts, *pts;
1566 
1567           if (arg3 >= 0) {
1568               /* Convert ms to secs, ns */
1569               ts.tv_sec = arg3 / 1000;
1570               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1571               pts = &ts;
1572           } else {
1573               /* -ve poll() timeout means "infinite" */
1574               pts = NULL;
1575           }
1576           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1577     }
1578 
1579     if (!is_error(ret)) {
1580         for (i = 0; i < nfds; i++) {
1581             target_pfd[i].revents = tswap16(pfd[i].revents);
1582         }
1583     }
1584     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1585     return ret;
1586 }
1587 #endif
1588 
1589 static abi_long do_pipe2(int host_pipe[], int flags)
1590 {
1591 #ifdef CONFIG_PIPE2
1592     return pipe2(host_pipe, flags);
1593 #else
1594     return -ENOSYS;
1595 #endif
1596 }
1597 
1598 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1599                         int flags, int is_pipe2)
1600 {
1601     int host_pipe[2];
1602     abi_long ret;
1603     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1604 
1605     if (is_error(ret))
1606         return get_errno(ret);
1607 
1608     /* Several targets have special calling conventions for the original
1609        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1610     if (!is_pipe2) {
1611 #if defined(TARGET_ALPHA)
1612         cpu_env->ir[IR_A4] = host_pipe[1];
1613         return host_pipe[0];
1614 #elif defined(TARGET_MIPS)
1615         cpu_env->active_tc.gpr[3] = host_pipe[1];
1616         return host_pipe[0];
1617 #elif defined(TARGET_SH4)
1618         cpu_env->gregs[1] = host_pipe[1];
1619         return host_pipe[0];
1620 #elif defined(TARGET_SPARC)
1621         cpu_env->regwptr[1] = host_pipe[1];
1622         return host_pipe[0];
1623 #endif
1624     }
1625 
1626     if (put_user_s32(host_pipe[0], pipedes)
1627         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1628         return -TARGET_EFAULT;
1629     return get_errno(ret);
1630 }
1631 
1632 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1633                                               abi_ulong target_addr,
1634                                               socklen_t len)
1635 {
1636     struct target_ip_mreqn *target_smreqn;
1637 
1638     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1639     if (!target_smreqn)
1640         return -TARGET_EFAULT;
1641     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1642     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1643     if (len == sizeof(struct target_ip_mreqn))
1644         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1645     unlock_user(target_smreqn, target_addr, 0);
1646 
1647     return 0;
1648 }
1649 
1650 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1651                                                abi_ulong target_addr,
1652                                                socklen_t len)
1653 {
1654     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1655     sa_family_t sa_family;
1656     struct target_sockaddr *target_saddr;
1657 
1658     if (fd_trans_target_to_host_addr(fd)) {
1659         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1660     }
1661 
1662     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1663     if (!target_saddr)
1664         return -TARGET_EFAULT;
1665 
1666     sa_family = tswap16(target_saddr->sa_family);
1667 
1668     /* Oops. The caller might send a incomplete sun_path; sun_path
1669      * must be terminated by \0 (see the manual page), but
1670      * unfortunately it is quite common to specify sockaddr_un
1671      * length as "strlen(x->sun_path)" while it should be
1672      * "strlen(...) + 1". We'll fix that here if needed.
1673      * Linux kernel has a similar feature.
1674      */
1675 
1676     if (sa_family == AF_UNIX) {
1677         if (len < unix_maxlen && len > 0) {
1678             char *cp = (char*)target_saddr;
1679 
1680             if ( cp[len-1] && !cp[len] )
1681                 len++;
1682         }
1683         if (len > unix_maxlen)
1684             len = unix_maxlen;
1685     }
1686 
1687     memcpy(addr, target_saddr, len);
1688     addr->sa_family = sa_family;
1689     if (sa_family == AF_NETLINK) {
1690         struct sockaddr_nl *nladdr;
1691 
1692         nladdr = (struct sockaddr_nl *)addr;
1693         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1694         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1695     } else if (sa_family == AF_PACKET) {
1696 	struct target_sockaddr_ll *lladdr;
1697 
1698 	lladdr = (struct target_sockaddr_ll *)addr;
1699 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1700 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1701     }
1702     unlock_user(target_saddr, target_addr, 0);
1703 
1704     return 0;
1705 }
1706 
1707 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1708                                                struct sockaddr *addr,
1709                                                socklen_t len)
1710 {
1711     struct target_sockaddr *target_saddr;
1712 
1713     if (len == 0) {
1714         return 0;
1715     }
1716     assert(addr);
1717 
1718     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1719     if (!target_saddr)
1720         return -TARGET_EFAULT;
1721     memcpy(target_saddr, addr, len);
1722     if (len >= offsetof(struct target_sockaddr, sa_family) +
1723         sizeof(target_saddr->sa_family)) {
1724         target_saddr->sa_family = tswap16(addr->sa_family);
1725     }
1726     if (addr->sa_family == AF_NETLINK &&
1727         len >= sizeof(struct target_sockaddr_nl)) {
1728         struct target_sockaddr_nl *target_nl =
1729                (struct target_sockaddr_nl *)target_saddr;
1730         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1731         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1732     } else if (addr->sa_family == AF_PACKET) {
1733         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1734         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1735         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1736     } else if (addr->sa_family == AF_INET6 &&
1737                len >= sizeof(struct target_sockaddr_in6)) {
1738         struct target_sockaddr_in6 *target_in6 =
1739                (struct target_sockaddr_in6 *)target_saddr;
1740         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1741     }
1742     unlock_user(target_saddr, target_addr, len);
1743 
1744     return 0;
1745 }
1746 
1747 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1748                                            struct target_msghdr *target_msgh)
1749 {
1750     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1751     abi_long msg_controllen;
1752     abi_ulong target_cmsg_addr;
1753     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1754     socklen_t space = 0;
1755 
1756     msg_controllen = tswapal(target_msgh->msg_controllen);
1757     if (msg_controllen < sizeof (struct target_cmsghdr))
1758         goto the_end;
1759     target_cmsg_addr = tswapal(target_msgh->msg_control);
1760     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1761     target_cmsg_start = target_cmsg;
1762     if (!target_cmsg)
1763         return -TARGET_EFAULT;
1764 
1765     while (cmsg && target_cmsg) {
1766         void *data = CMSG_DATA(cmsg);
1767         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1768 
1769         int len = tswapal(target_cmsg->cmsg_len)
1770             - sizeof(struct target_cmsghdr);
1771 
1772         space += CMSG_SPACE(len);
1773         if (space > msgh->msg_controllen) {
1774             space -= CMSG_SPACE(len);
1775             /* This is a QEMU bug, since we allocated the payload
1776              * area ourselves (unlike overflow in host-to-target
1777              * conversion, which is just the guest giving us a buffer
1778              * that's too small). It can't happen for the payload types
1779              * we currently support; if it becomes an issue in future
1780              * we would need to improve our allocation strategy to
1781              * something more intelligent than "twice the size of the
1782              * target buffer we're reading from".
1783              */
1784             qemu_log_mask(LOG_UNIMP,
1785                           ("Unsupported ancillary data %d/%d: "
1786                            "unhandled msg size\n"),
1787                           tswap32(target_cmsg->cmsg_level),
1788                           tswap32(target_cmsg->cmsg_type));
1789             break;
1790         }
1791 
1792         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1793             cmsg->cmsg_level = SOL_SOCKET;
1794         } else {
1795             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1796         }
1797         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1798         cmsg->cmsg_len = CMSG_LEN(len);
1799 
1800         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1801             int *fd = (int *)data;
1802             int *target_fd = (int *)target_data;
1803             int i, numfds = len / sizeof(int);
1804 
1805             for (i = 0; i < numfds; i++) {
1806                 __get_user(fd[i], target_fd + i);
1807             }
1808         } else if (cmsg->cmsg_level == SOL_SOCKET
1809                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1810             struct ucred *cred = (struct ucred *)data;
1811             struct target_ucred *target_cred =
1812                 (struct target_ucred *)target_data;
1813 
1814             __get_user(cred->pid, &target_cred->pid);
1815             __get_user(cred->uid, &target_cred->uid);
1816             __get_user(cred->gid, &target_cred->gid);
1817         } else {
1818             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1819                           cmsg->cmsg_level, cmsg->cmsg_type);
1820             memcpy(data, target_data, len);
1821         }
1822 
1823         cmsg = CMSG_NXTHDR(msgh, cmsg);
1824         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1825                                          target_cmsg_start);
1826     }
1827     unlock_user(target_cmsg, target_cmsg_addr, 0);
1828  the_end:
1829     msgh->msg_controllen = space;
1830     return 0;
1831 }
1832 
1833 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1834                                            struct msghdr *msgh)
1835 {
1836     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1837     abi_long msg_controllen;
1838     abi_ulong target_cmsg_addr;
1839     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1840     socklen_t space = 0;
1841 
1842     msg_controllen = tswapal(target_msgh->msg_controllen);
1843     if (msg_controllen < sizeof (struct target_cmsghdr))
1844         goto the_end;
1845     target_cmsg_addr = tswapal(target_msgh->msg_control);
1846     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1847     target_cmsg_start = target_cmsg;
1848     if (!target_cmsg)
1849         return -TARGET_EFAULT;
1850 
1851     while (cmsg && target_cmsg) {
1852         void *data = CMSG_DATA(cmsg);
1853         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1854 
1855         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1856         int tgt_len, tgt_space;
1857 
1858         /* We never copy a half-header but may copy half-data;
1859          * this is Linux's behaviour in put_cmsg(). Note that
1860          * truncation here is a guest problem (which we report
1861          * to the guest via the CTRUNC bit), unlike truncation
1862          * in target_to_host_cmsg, which is a QEMU bug.
1863          */
1864         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1865             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1866             break;
1867         }
1868 
1869         if (cmsg->cmsg_level == SOL_SOCKET) {
1870             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1871         } else {
1872             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1873         }
1874         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1875 
1876         /* Payload types which need a different size of payload on
1877          * the target must adjust tgt_len here.
1878          */
1879         tgt_len = len;
1880         switch (cmsg->cmsg_level) {
1881         case SOL_SOCKET:
1882             switch (cmsg->cmsg_type) {
1883             case SO_TIMESTAMP:
1884                 tgt_len = sizeof(struct target_timeval);
1885                 break;
1886             default:
1887                 break;
1888             }
1889             break;
1890         default:
1891             break;
1892         }
1893 
1894         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1895             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1896             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1897         }
1898 
1899         /* We must now copy-and-convert len bytes of payload
1900          * into tgt_len bytes of destination space. Bear in mind
1901          * that in both source and destination we may be dealing
1902          * with a truncated value!
1903          */
1904         switch (cmsg->cmsg_level) {
1905         case SOL_SOCKET:
1906             switch (cmsg->cmsg_type) {
1907             case SCM_RIGHTS:
1908             {
1909                 int *fd = (int *)data;
1910                 int *target_fd = (int *)target_data;
1911                 int i, numfds = tgt_len / sizeof(int);
1912 
1913                 for (i = 0; i < numfds; i++) {
1914                     __put_user(fd[i], target_fd + i);
1915                 }
1916                 break;
1917             }
1918             case SO_TIMESTAMP:
1919             {
1920                 struct timeval *tv = (struct timeval *)data;
1921                 struct target_timeval *target_tv =
1922                     (struct target_timeval *)target_data;
1923 
1924                 if (len != sizeof(struct timeval) ||
1925                     tgt_len != sizeof(struct target_timeval)) {
1926                     goto unimplemented;
1927                 }
1928 
1929                 /* copy struct timeval to target */
1930                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1931                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1932                 break;
1933             }
1934             case SCM_CREDENTIALS:
1935             {
1936                 struct ucred *cred = (struct ucred *)data;
1937                 struct target_ucred *target_cred =
1938                     (struct target_ucred *)target_data;
1939 
1940                 __put_user(cred->pid, &target_cred->pid);
1941                 __put_user(cred->uid, &target_cred->uid);
1942                 __put_user(cred->gid, &target_cred->gid);
1943                 break;
1944             }
1945             default:
1946                 goto unimplemented;
1947             }
1948             break;
1949 
1950         case SOL_IP:
1951             switch (cmsg->cmsg_type) {
1952             case IP_TTL:
1953             {
1954                 uint32_t *v = (uint32_t *)data;
1955                 uint32_t *t_int = (uint32_t *)target_data;
1956 
1957                 if (len != sizeof(uint32_t) ||
1958                     tgt_len != sizeof(uint32_t)) {
1959                     goto unimplemented;
1960                 }
1961                 __put_user(*v, t_int);
1962                 break;
1963             }
1964             case IP_RECVERR:
1965             {
1966                 struct errhdr_t {
1967                    struct sock_extended_err ee;
1968                    struct sockaddr_in offender;
1969                 };
1970                 struct errhdr_t *errh = (struct errhdr_t *)data;
1971                 struct errhdr_t *target_errh =
1972                     (struct errhdr_t *)target_data;
1973 
1974                 if (len != sizeof(struct errhdr_t) ||
1975                     tgt_len != sizeof(struct errhdr_t)) {
1976                     goto unimplemented;
1977                 }
1978                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1979                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1980                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1981                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1982                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1983                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1984                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1985                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1986                     (void *) &errh->offender, sizeof(errh->offender));
1987                 break;
1988             }
1989             default:
1990                 goto unimplemented;
1991             }
1992             break;
1993 
1994         case SOL_IPV6:
1995             switch (cmsg->cmsg_type) {
1996             case IPV6_HOPLIMIT:
1997             {
1998                 uint32_t *v = (uint32_t *)data;
1999                 uint32_t *t_int = (uint32_t *)target_data;
2000 
2001                 if (len != sizeof(uint32_t) ||
2002                     tgt_len != sizeof(uint32_t)) {
2003                     goto unimplemented;
2004                 }
2005                 __put_user(*v, t_int);
2006                 break;
2007             }
2008             case IPV6_RECVERR:
2009             {
2010                 struct errhdr6_t {
2011                    struct sock_extended_err ee;
2012                    struct sockaddr_in6 offender;
2013                 };
2014                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2015                 struct errhdr6_t *target_errh =
2016                     (struct errhdr6_t *)target_data;
2017 
2018                 if (len != sizeof(struct errhdr6_t) ||
2019                     tgt_len != sizeof(struct errhdr6_t)) {
2020                     goto unimplemented;
2021                 }
2022                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2023                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2024                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2025                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2026                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2027                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2028                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2029                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2030                     (void *) &errh->offender, sizeof(errh->offender));
2031                 break;
2032             }
2033             default:
2034                 goto unimplemented;
2035             }
2036             break;
2037 
2038         default:
2039         unimplemented:
2040             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2041                           cmsg->cmsg_level, cmsg->cmsg_type);
2042             memcpy(target_data, data, MIN(len, tgt_len));
2043             if (tgt_len > len) {
2044                 memset(target_data + len, 0, tgt_len - len);
2045             }
2046         }
2047 
2048         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2049         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2050         if (msg_controllen < tgt_space) {
2051             tgt_space = msg_controllen;
2052         }
2053         msg_controllen -= tgt_space;
2054         space += tgt_space;
2055         cmsg = CMSG_NXTHDR(msgh, cmsg);
2056         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2057                                          target_cmsg_start);
2058     }
2059     unlock_user(target_cmsg, target_cmsg_addr, space);
2060  the_end:
2061     target_msgh->msg_controllen = tswapal(space);
2062     return 0;
2063 }
2064 
2065 /* do_setsockopt() Must return target values and target errnos. */
2066 static abi_long do_setsockopt(int sockfd, int level, int optname,
2067                               abi_ulong optval_addr, socklen_t optlen)
2068 {
2069     abi_long ret;
2070     int val;
2071     struct ip_mreqn *ip_mreq;
2072     struct ip_mreq_source *ip_mreq_source;
2073 
2074     switch(level) {
2075     case SOL_TCP:
2076     case SOL_UDP:
2077         /* TCP and UDP options all take an 'int' value.  */
2078         if (optlen < sizeof(uint32_t))
2079             return -TARGET_EINVAL;
2080 
2081         if (get_user_u32(val, optval_addr))
2082             return -TARGET_EFAULT;
2083         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2084         break;
2085     case SOL_IP:
2086         switch(optname) {
2087         case IP_TOS:
2088         case IP_TTL:
2089         case IP_HDRINCL:
2090         case IP_ROUTER_ALERT:
2091         case IP_RECVOPTS:
2092         case IP_RETOPTS:
2093         case IP_PKTINFO:
2094         case IP_MTU_DISCOVER:
2095         case IP_RECVERR:
2096         case IP_RECVTTL:
2097         case IP_RECVTOS:
2098 #ifdef IP_FREEBIND
2099         case IP_FREEBIND:
2100 #endif
2101         case IP_MULTICAST_TTL:
2102         case IP_MULTICAST_LOOP:
2103             val = 0;
2104             if (optlen >= sizeof(uint32_t)) {
2105                 if (get_user_u32(val, optval_addr))
2106                     return -TARGET_EFAULT;
2107             } else if (optlen >= 1) {
2108                 if (get_user_u8(val, optval_addr))
2109                     return -TARGET_EFAULT;
2110             }
2111             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2112             break;
2113         case IP_ADD_MEMBERSHIP:
2114         case IP_DROP_MEMBERSHIP:
2115             if (optlen < sizeof (struct target_ip_mreq) ||
2116                 optlen > sizeof (struct target_ip_mreqn))
2117                 return -TARGET_EINVAL;
2118 
2119             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2120             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2121             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2122             break;
2123 
2124         case IP_BLOCK_SOURCE:
2125         case IP_UNBLOCK_SOURCE:
2126         case IP_ADD_SOURCE_MEMBERSHIP:
2127         case IP_DROP_SOURCE_MEMBERSHIP:
2128             if (optlen != sizeof (struct target_ip_mreq_source))
2129                 return -TARGET_EINVAL;
2130 
2131             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2132             if (!ip_mreq_source) {
2133                 return -TARGET_EFAULT;
2134             }
2135             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2136             unlock_user (ip_mreq_source, optval_addr, 0);
2137             break;
2138 
2139         default:
2140             goto unimplemented;
2141         }
2142         break;
2143     case SOL_IPV6:
2144         switch (optname) {
2145         case IPV6_MTU_DISCOVER:
2146         case IPV6_MTU:
2147         case IPV6_V6ONLY:
2148         case IPV6_RECVPKTINFO:
2149         case IPV6_UNICAST_HOPS:
2150         case IPV6_MULTICAST_HOPS:
2151         case IPV6_MULTICAST_LOOP:
2152         case IPV6_RECVERR:
2153         case IPV6_RECVHOPLIMIT:
2154         case IPV6_2292HOPLIMIT:
2155         case IPV6_CHECKSUM:
2156         case IPV6_ADDRFORM:
2157         case IPV6_2292PKTINFO:
2158         case IPV6_RECVTCLASS:
2159         case IPV6_RECVRTHDR:
2160         case IPV6_2292RTHDR:
2161         case IPV6_RECVHOPOPTS:
2162         case IPV6_2292HOPOPTS:
2163         case IPV6_RECVDSTOPTS:
2164         case IPV6_2292DSTOPTS:
2165         case IPV6_TCLASS:
2166         case IPV6_ADDR_PREFERENCES:
2167 #ifdef IPV6_RECVPATHMTU
2168         case IPV6_RECVPATHMTU:
2169 #endif
2170 #ifdef IPV6_TRANSPARENT
2171         case IPV6_TRANSPARENT:
2172 #endif
2173 #ifdef IPV6_FREEBIND
2174         case IPV6_FREEBIND:
2175 #endif
2176 #ifdef IPV6_RECVORIGDSTADDR
2177         case IPV6_RECVORIGDSTADDR:
2178 #endif
2179             val = 0;
2180             if (optlen < sizeof(uint32_t)) {
2181                 return -TARGET_EINVAL;
2182             }
2183             if (get_user_u32(val, optval_addr)) {
2184                 return -TARGET_EFAULT;
2185             }
2186             ret = get_errno(setsockopt(sockfd, level, optname,
2187                                        &val, sizeof(val)));
2188             break;
2189         case IPV6_PKTINFO:
2190         {
2191             struct in6_pktinfo pki;
2192 
2193             if (optlen < sizeof(pki)) {
2194                 return -TARGET_EINVAL;
2195             }
2196 
2197             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2198                 return -TARGET_EFAULT;
2199             }
2200 
2201             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2202 
2203             ret = get_errno(setsockopt(sockfd, level, optname,
2204                                        &pki, sizeof(pki)));
2205             break;
2206         }
2207         case IPV6_ADD_MEMBERSHIP:
2208         case IPV6_DROP_MEMBERSHIP:
2209         {
2210             struct ipv6_mreq ipv6mreq;
2211 
2212             if (optlen < sizeof(ipv6mreq)) {
2213                 return -TARGET_EINVAL;
2214             }
2215 
2216             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2217                 return -TARGET_EFAULT;
2218             }
2219 
2220             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2221 
2222             ret = get_errno(setsockopt(sockfd, level, optname,
2223                                        &ipv6mreq, sizeof(ipv6mreq)));
2224             break;
2225         }
2226         default:
2227             goto unimplemented;
2228         }
2229         break;
2230     case SOL_ICMPV6:
2231         switch (optname) {
2232         case ICMPV6_FILTER:
2233         {
2234             struct icmp6_filter icmp6f;
2235 
2236             if (optlen > sizeof(icmp6f)) {
2237                 optlen = sizeof(icmp6f);
2238             }
2239 
2240             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2241                 return -TARGET_EFAULT;
2242             }
2243 
2244             for (val = 0; val < 8; val++) {
2245                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2246             }
2247 
2248             ret = get_errno(setsockopt(sockfd, level, optname,
2249                                        &icmp6f, optlen));
2250             break;
2251         }
2252         default:
2253             goto unimplemented;
2254         }
2255         break;
2256     case SOL_RAW:
2257         switch (optname) {
2258         case ICMP_FILTER:
2259         case IPV6_CHECKSUM:
2260             /* those take an u32 value */
2261             if (optlen < sizeof(uint32_t)) {
2262                 return -TARGET_EINVAL;
2263             }
2264 
2265             if (get_user_u32(val, optval_addr)) {
2266                 return -TARGET_EFAULT;
2267             }
2268             ret = get_errno(setsockopt(sockfd, level, optname,
2269                                        &val, sizeof(val)));
2270             break;
2271 
2272         default:
2273             goto unimplemented;
2274         }
2275         break;
2276 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2277     case SOL_ALG:
2278         switch (optname) {
2279         case ALG_SET_KEY:
2280         {
2281             char *alg_key = g_malloc(optlen);
2282 
2283             if (!alg_key) {
2284                 return -TARGET_ENOMEM;
2285             }
2286             if (copy_from_user(alg_key, optval_addr, optlen)) {
2287                 g_free(alg_key);
2288                 return -TARGET_EFAULT;
2289             }
2290             ret = get_errno(setsockopt(sockfd, level, optname,
2291                                        alg_key, optlen));
2292             g_free(alg_key);
2293             break;
2294         }
2295         case ALG_SET_AEAD_AUTHSIZE:
2296         {
2297             ret = get_errno(setsockopt(sockfd, level, optname,
2298                                        NULL, optlen));
2299             break;
2300         }
2301         default:
2302             goto unimplemented;
2303         }
2304         break;
2305 #endif
2306     case TARGET_SOL_SOCKET:
2307         switch (optname) {
2308         case TARGET_SO_RCVTIMEO:
2309         {
2310                 struct timeval tv;
2311 
2312                 optname = SO_RCVTIMEO;
2313 
2314 set_timeout:
2315                 if (optlen != sizeof(struct target_timeval)) {
2316                     return -TARGET_EINVAL;
2317                 }
2318 
2319                 if (copy_from_user_timeval(&tv, optval_addr)) {
2320                     return -TARGET_EFAULT;
2321                 }
2322 
2323                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2324                                 &tv, sizeof(tv)));
2325                 return ret;
2326         }
2327         case TARGET_SO_SNDTIMEO:
2328                 optname = SO_SNDTIMEO;
2329                 goto set_timeout;
2330         case TARGET_SO_ATTACH_FILTER:
2331         {
2332                 struct target_sock_fprog *tfprog;
2333                 struct target_sock_filter *tfilter;
2334                 struct sock_fprog fprog;
2335                 struct sock_filter *filter;
2336                 int i;
2337 
2338                 if (optlen != sizeof(*tfprog)) {
2339                     return -TARGET_EINVAL;
2340                 }
2341                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2342                     return -TARGET_EFAULT;
2343                 }
2344                 if (!lock_user_struct(VERIFY_READ, tfilter,
2345                                       tswapal(tfprog->filter), 0)) {
2346                     unlock_user_struct(tfprog, optval_addr, 1);
2347                     return -TARGET_EFAULT;
2348                 }
2349 
2350                 fprog.len = tswap16(tfprog->len);
2351                 filter = g_try_new(struct sock_filter, fprog.len);
2352                 if (filter == NULL) {
2353                     unlock_user_struct(tfilter, tfprog->filter, 1);
2354                     unlock_user_struct(tfprog, optval_addr, 1);
2355                     return -TARGET_ENOMEM;
2356                 }
2357                 for (i = 0; i < fprog.len; i++) {
2358                     filter[i].code = tswap16(tfilter[i].code);
2359                     filter[i].jt = tfilter[i].jt;
2360                     filter[i].jf = tfilter[i].jf;
2361                     filter[i].k = tswap32(tfilter[i].k);
2362                 }
2363                 fprog.filter = filter;
2364 
2365                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2366                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2367                 g_free(filter);
2368 
2369                 unlock_user_struct(tfilter, tfprog->filter, 1);
2370                 unlock_user_struct(tfprog, optval_addr, 1);
2371                 return ret;
2372         }
2373 	case TARGET_SO_BINDTODEVICE:
2374 	{
2375 		char *dev_ifname, *addr_ifname;
2376 
2377 		if (optlen > IFNAMSIZ - 1) {
2378 		    optlen = IFNAMSIZ - 1;
2379 		}
2380 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2381 		if (!dev_ifname) {
2382 		    return -TARGET_EFAULT;
2383 		}
2384 		optname = SO_BINDTODEVICE;
2385 		addr_ifname = alloca(IFNAMSIZ);
2386 		memcpy(addr_ifname, dev_ifname, optlen);
2387 		addr_ifname[optlen] = 0;
2388 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2389                                            addr_ifname, optlen));
2390 		unlock_user (dev_ifname, optval_addr, 0);
2391 		return ret;
2392 	}
2393         case TARGET_SO_LINGER:
2394         {
2395                 struct linger lg;
2396                 struct target_linger *tlg;
2397 
2398                 if (optlen != sizeof(struct target_linger)) {
2399                     return -TARGET_EINVAL;
2400                 }
2401                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2402                     return -TARGET_EFAULT;
2403                 }
2404                 __get_user(lg.l_onoff, &tlg->l_onoff);
2405                 __get_user(lg.l_linger, &tlg->l_linger);
2406                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2407                                 &lg, sizeof(lg)));
2408                 unlock_user_struct(tlg, optval_addr, 0);
2409                 return ret;
2410         }
2411             /* Options with 'int' argument.  */
2412         case TARGET_SO_DEBUG:
2413 		optname = SO_DEBUG;
2414 		break;
2415         case TARGET_SO_REUSEADDR:
2416 		optname = SO_REUSEADDR;
2417 		break;
2418 #ifdef SO_REUSEPORT
2419         case TARGET_SO_REUSEPORT:
2420                 optname = SO_REUSEPORT;
2421                 break;
2422 #endif
2423         case TARGET_SO_TYPE:
2424 		optname = SO_TYPE;
2425 		break;
2426         case TARGET_SO_ERROR:
2427 		optname = SO_ERROR;
2428 		break;
2429         case TARGET_SO_DONTROUTE:
2430 		optname = SO_DONTROUTE;
2431 		break;
2432         case TARGET_SO_BROADCAST:
2433 		optname = SO_BROADCAST;
2434 		break;
2435         case TARGET_SO_SNDBUF:
2436 		optname = SO_SNDBUF;
2437 		break;
2438         case TARGET_SO_SNDBUFFORCE:
2439                 optname = SO_SNDBUFFORCE;
2440                 break;
2441         case TARGET_SO_RCVBUF:
2442 		optname = SO_RCVBUF;
2443 		break;
2444         case TARGET_SO_RCVBUFFORCE:
2445                 optname = SO_RCVBUFFORCE;
2446                 break;
2447         case TARGET_SO_KEEPALIVE:
2448 		optname = SO_KEEPALIVE;
2449 		break;
2450         case TARGET_SO_OOBINLINE:
2451 		optname = SO_OOBINLINE;
2452 		break;
2453         case TARGET_SO_NO_CHECK:
2454 		optname = SO_NO_CHECK;
2455 		break;
2456         case TARGET_SO_PRIORITY:
2457 		optname = SO_PRIORITY;
2458 		break;
2459 #ifdef SO_BSDCOMPAT
2460         case TARGET_SO_BSDCOMPAT:
2461 		optname = SO_BSDCOMPAT;
2462 		break;
2463 #endif
2464         case TARGET_SO_PASSCRED:
2465 		optname = SO_PASSCRED;
2466 		break;
2467         case TARGET_SO_PASSSEC:
2468                 optname = SO_PASSSEC;
2469                 break;
2470         case TARGET_SO_TIMESTAMP:
2471 		optname = SO_TIMESTAMP;
2472 		break;
2473         case TARGET_SO_RCVLOWAT:
2474 		optname = SO_RCVLOWAT;
2475 		break;
2476         default:
2477             goto unimplemented;
2478         }
2479 	if (optlen < sizeof(uint32_t))
2480             return -TARGET_EINVAL;
2481 
2482 	if (get_user_u32(val, optval_addr))
2483             return -TARGET_EFAULT;
2484 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2485         break;
2486 #ifdef SOL_NETLINK
2487     case SOL_NETLINK:
2488         switch (optname) {
2489         case NETLINK_PKTINFO:
2490         case NETLINK_ADD_MEMBERSHIP:
2491         case NETLINK_DROP_MEMBERSHIP:
2492         case NETLINK_BROADCAST_ERROR:
2493         case NETLINK_NO_ENOBUFS:
2494 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2495         case NETLINK_LISTEN_ALL_NSID:
2496         case NETLINK_CAP_ACK:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2498 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2499         case NETLINK_EXT_ACK:
2500 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2502         case NETLINK_GET_STRICT_CHK:
2503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2504             break;
2505         default:
2506             goto unimplemented;
2507         }
2508         val = 0;
2509         if (optlen < sizeof(uint32_t)) {
2510             return -TARGET_EINVAL;
2511         }
2512         if (get_user_u32(val, optval_addr)) {
2513             return -TARGET_EFAULT;
2514         }
2515         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2516                                    sizeof(val)));
2517         break;
2518 #endif /* SOL_NETLINK */
2519     default:
2520     unimplemented:
2521         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2522                       level, optname);
2523         ret = -TARGET_ENOPROTOOPT;
2524     }
2525     return ret;
2526 }
2527 
2528 /* do_getsockopt() Must return target values and target errnos. */
2529 static abi_long do_getsockopt(int sockfd, int level, int optname,
2530                               abi_ulong optval_addr, abi_ulong optlen)
2531 {
2532     abi_long ret;
2533     int len, val;
2534     socklen_t lv;
2535 
2536     switch(level) {
2537     case TARGET_SOL_SOCKET:
2538         level = SOL_SOCKET;
2539         switch (optname) {
2540         /* These don't just return a single integer */
2541         case TARGET_SO_PEERNAME:
2542             goto unimplemented;
2543         case TARGET_SO_RCVTIMEO: {
2544             struct timeval tv;
2545             socklen_t tvlen;
2546 
2547             optname = SO_RCVTIMEO;
2548 
2549 get_timeout:
2550             if (get_user_u32(len, optlen)) {
2551                 return -TARGET_EFAULT;
2552             }
2553             if (len < 0) {
2554                 return -TARGET_EINVAL;
2555             }
2556 
2557             tvlen = sizeof(tv);
2558             ret = get_errno(getsockopt(sockfd, level, optname,
2559                                        &tv, &tvlen));
2560             if (ret < 0) {
2561                 return ret;
2562             }
2563             if (len > sizeof(struct target_timeval)) {
2564                 len = sizeof(struct target_timeval);
2565             }
2566             if (copy_to_user_timeval(optval_addr, &tv)) {
2567                 return -TARGET_EFAULT;
2568             }
2569             if (put_user_u32(len, optlen)) {
2570                 return -TARGET_EFAULT;
2571             }
2572             break;
2573         }
2574         case TARGET_SO_SNDTIMEO:
2575             optname = SO_SNDTIMEO;
2576             goto get_timeout;
2577         case TARGET_SO_PEERCRED: {
2578             struct ucred cr;
2579             socklen_t crlen;
2580             struct target_ucred *tcr;
2581 
2582             if (get_user_u32(len, optlen)) {
2583                 return -TARGET_EFAULT;
2584             }
2585             if (len < 0) {
2586                 return -TARGET_EINVAL;
2587             }
2588 
2589             crlen = sizeof(cr);
2590             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2591                                        &cr, &crlen));
2592             if (ret < 0) {
2593                 return ret;
2594             }
2595             if (len > crlen) {
2596                 len = crlen;
2597             }
2598             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2599                 return -TARGET_EFAULT;
2600             }
2601             __put_user(cr.pid, &tcr->pid);
2602             __put_user(cr.uid, &tcr->uid);
2603             __put_user(cr.gid, &tcr->gid);
2604             unlock_user_struct(tcr, optval_addr, 1);
2605             if (put_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             break;
2609         }
2610         case TARGET_SO_PEERSEC: {
2611             char *name;
2612 
2613             if (get_user_u32(len, optlen)) {
2614                 return -TARGET_EFAULT;
2615             }
2616             if (len < 0) {
2617                 return -TARGET_EINVAL;
2618             }
2619             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2620             if (!name) {
2621                 return -TARGET_EFAULT;
2622             }
2623             lv = len;
2624             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2625                                        name, &lv));
2626             if (put_user_u32(lv, optlen)) {
2627                 ret = -TARGET_EFAULT;
2628             }
2629             unlock_user(name, optval_addr, lv);
2630             break;
2631         }
2632         case TARGET_SO_LINGER:
2633         {
2634             struct linger lg;
2635             socklen_t lglen;
2636             struct target_linger *tlg;
2637 
2638             if (get_user_u32(len, optlen)) {
2639                 return -TARGET_EFAULT;
2640             }
2641             if (len < 0) {
2642                 return -TARGET_EINVAL;
2643             }
2644 
2645             lglen = sizeof(lg);
2646             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2647                                        &lg, &lglen));
2648             if (ret < 0) {
2649                 return ret;
2650             }
2651             if (len > lglen) {
2652                 len = lglen;
2653             }
2654             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2655                 return -TARGET_EFAULT;
2656             }
2657             __put_user(lg.l_onoff, &tlg->l_onoff);
2658             __put_user(lg.l_linger, &tlg->l_linger);
2659             unlock_user_struct(tlg, optval_addr, 1);
2660             if (put_user_u32(len, optlen)) {
2661                 return -TARGET_EFAULT;
2662             }
2663             break;
2664         }
2665         /* Options with 'int' argument.  */
2666         case TARGET_SO_DEBUG:
2667             optname = SO_DEBUG;
2668             goto int_case;
2669         case TARGET_SO_REUSEADDR:
2670             optname = SO_REUSEADDR;
2671             goto int_case;
2672 #ifdef SO_REUSEPORT
2673         case TARGET_SO_REUSEPORT:
2674             optname = SO_REUSEPORT;
2675             goto int_case;
2676 #endif
2677         case TARGET_SO_TYPE:
2678             optname = SO_TYPE;
2679             goto int_case;
2680         case TARGET_SO_ERROR:
2681             optname = SO_ERROR;
2682             goto int_case;
2683         case TARGET_SO_DONTROUTE:
2684             optname = SO_DONTROUTE;
2685             goto int_case;
2686         case TARGET_SO_BROADCAST:
2687             optname = SO_BROADCAST;
2688             goto int_case;
2689         case TARGET_SO_SNDBUF:
2690             optname = SO_SNDBUF;
2691             goto int_case;
2692         case TARGET_SO_RCVBUF:
2693             optname = SO_RCVBUF;
2694             goto int_case;
2695         case TARGET_SO_KEEPALIVE:
2696             optname = SO_KEEPALIVE;
2697             goto int_case;
2698         case TARGET_SO_OOBINLINE:
2699             optname = SO_OOBINLINE;
2700             goto int_case;
2701         case TARGET_SO_NO_CHECK:
2702             optname = SO_NO_CHECK;
2703             goto int_case;
2704         case TARGET_SO_PRIORITY:
2705             optname = SO_PRIORITY;
2706             goto int_case;
2707 #ifdef SO_BSDCOMPAT
2708         case TARGET_SO_BSDCOMPAT:
2709             optname = SO_BSDCOMPAT;
2710             goto int_case;
2711 #endif
2712         case TARGET_SO_PASSCRED:
2713             optname = SO_PASSCRED;
2714             goto int_case;
2715         case TARGET_SO_TIMESTAMP:
2716             optname = SO_TIMESTAMP;
2717             goto int_case;
2718         case TARGET_SO_RCVLOWAT:
2719             optname = SO_RCVLOWAT;
2720             goto int_case;
2721         case TARGET_SO_ACCEPTCONN:
2722             optname = SO_ACCEPTCONN;
2723             goto int_case;
2724         case TARGET_SO_PROTOCOL:
2725             optname = SO_PROTOCOL;
2726             goto int_case;
2727         case TARGET_SO_DOMAIN:
2728             optname = SO_DOMAIN;
2729             goto int_case;
2730         default:
2731             goto int_case;
2732         }
2733         break;
2734     case SOL_TCP:
2735     case SOL_UDP:
2736         /* TCP and UDP options all take an 'int' value.  */
2737     int_case:
2738         if (get_user_u32(len, optlen))
2739             return -TARGET_EFAULT;
2740         if (len < 0)
2741             return -TARGET_EINVAL;
2742         lv = sizeof(lv);
2743         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2744         if (ret < 0)
2745             return ret;
2746         if (optname == SO_TYPE) {
2747             val = host_to_target_sock_type(val);
2748         }
2749         if (len > lv)
2750             len = lv;
2751         if (len == 4) {
2752             if (put_user_u32(val, optval_addr))
2753                 return -TARGET_EFAULT;
2754         } else {
2755             if (put_user_u8(val, optval_addr))
2756                 return -TARGET_EFAULT;
2757         }
2758         if (put_user_u32(len, optlen))
2759             return -TARGET_EFAULT;
2760         break;
2761     case SOL_IP:
2762         switch(optname) {
2763         case IP_TOS:
2764         case IP_TTL:
2765         case IP_HDRINCL:
2766         case IP_ROUTER_ALERT:
2767         case IP_RECVOPTS:
2768         case IP_RETOPTS:
2769         case IP_PKTINFO:
2770         case IP_MTU_DISCOVER:
2771         case IP_RECVERR:
2772         case IP_RECVTOS:
2773 #ifdef IP_FREEBIND
2774         case IP_FREEBIND:
2775 #endif
2776         case IP_MULTICAST_TTL:
2777         case IP_MULTICAST_LOOP:
2778             if (get_user_u32(len, optlen))
2779                 return -TARGET_EFAULT;
2780             if (len < 0)
2781                 return -TARGET_EINVAL;
2782             lv = sizeof(lv);
2783             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2784             if (ret < 0)
2785                 return ret;
2786             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2787                 len = 1;
2788                 if (put_user_u32(len, optlen)
2789                     || put_user_u8(val, optval_addr))
2790                     return -TARGET_EFAULT;
2791             } else {
2792                 if (len > sizeof(int))
2793                     len = sizeof(int);
2794                 if (put_user_u32(len, optlen)
2795                     || put_user_u32(val, optval_addr))
2796                     return -TARGET_EFAULT;
2797             }
2798             break;
2799         default:
2800             ret = -TARGET_ENOPROTOOPT;
2801             break;
2802         }
2803         break;
2804     case SOL_IPV6:
2805         switch (optname) {
2806         case IPV6_MTU_DISCOVER:
2807         case IPV6_MTU:
2808         case IPV6_V6ONLY:
2809         case IPV6_RECVPKTINFO:
2810         case IPV6_UNICAST_HOPS:
2811         case IPV6_MULTICAST_HOPS:
2812         case IPV6_MULTICAST_LOOP:
2813         case IPV6_RECVERR:
2814         case IPV6_RECVHOPLIMIT:
2815         case IPV6_2292HOPLIMIT:
2816         case IPV6_CHECKSUM:
2817         case IPV6_ADDRFORM:
2818         case IPV6_2292PKTINFO:
2819         case IPV6_RECVTCLASS:
2820         case IPV6_RECVRTHDR:
2821         case IPV6_2292RTHDR:
2822         case IPV6_RECVHOPOPTS:
2823         case IPV6_2292HOPOPTS:
2824         case IPV6_RECVDSTOPTS:
2825         case IPV6_2292DSTOPTS:
2826         case IPV6_TCLASS:
2827         case IPV6_ADDR_PREFERENCES:
2828 #ifdef IPV6_RECVPATHMTU
2829         case IPV6_RECVPATHMTU:
2830 #endif
2831 #ifdef IPV6_TRANSPARENT
2832         case IPV6_TRANSPARENT:
2833 #endif
2834 #ifdef IPV6_FREEBIND
2835         case IPV6_FREEBIND:
2836 #endif
2837 #ifdef IPV6_RECVORIGDSTADDR
2838         case IPV6_RECVORIGDSTADDR:
2839 #endif
2840             if (get_user_u32(len, optlen))
2841                 return -TARGET_EFAULT;
2842             if (len < 0)
2843                 return -TARGET_EINVAL;
2844             lv = sizeof(lv);
2845             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2846             if (ret < 0)
2847                 return ret;
2848             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2849                 len = 1;
2850                 if (put_user_u32(len, optlen)
2851                     || put_user_u8(val, optval_addr))
2852                     return -TARGET_EFAULT;
2853             } else {
2854                 if (len > sizeof(int))
2855                     len = sizeof(int);
2856                 if (put_user_u32(len, optlen)
2857                     || put_user_u32(val, optval_addr))
2858                     return -TARGET_EFAULT;
2859             }
2860             break;
2861         default:
2862             ret = -TARGET_ENOPROTOOPT;
2863             break;
2864         }
2865         break;
2866 #ifdef SOL_NETLINK
2867     case SOL_NETLINK:
2868         switch (optname) {
2869         case NETLINK_PKTINFO:
2870         case NETLINK_BROADCAST_ERROR:
2871         case NETLINK_NO_ENOBUFS:
2872 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2873         case NETLINK_LISTEN_ALL_NSID:
2874         case NETLINK_CAP_ACK:
2875 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2876 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2877         case NETLINK_EXT_ACK:
2878 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2879 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2880         case NETLINK_GET_STRICT_CHK:
2881 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2882             if (get_user_u32(len, optlen)) {
2883                 return -TARGET_EFAULT;
2884             }
2885             if (len != sizeof(val)) {
2886                 return -TARGET_EINVAL;
2887             }
2888             lv = len;
2889             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2890             if (ret < 0) {
2891                 return ret;
2892             }
2893             if (put_user_u32(lv, optlen)
2894                 || put_user_u32(val, optval_addr)) {
2895                 return -TARGET_EFAULT;
2896             }
2897             break;
2898 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2899         case NETLINK_LIST_MEMBERSHIPS:
2900         {
2901             uint32_t *results;
2902             int i;
2903             if (get_user_u32(len, optlen)) {
2904                 return -TARGET_EFAULT;
2905             }
2906             if (len < 0) {
2907                 return -TARGET_EINVAL;
2908             }
2909             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2910             if (!results && len > 0) {
2911                 return -TARGET_EFAULT;
2912             }
2913             lv = len;
2914             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2915             if (ret < 0) {
2916                 unlock_user(results, optval_addr, 0);
2917                 return ret;
2918             }
2919             /* swap host endianess to target endianess. */
2920             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2921                 results[i] = tswap32(results[i]);
2922             }
2923             if (put_user_u32(lv, optlen)) {
2924                 return -TARGET_EFAULT;
2925             }
2926             unlock_user(results, optval_addr, 0);
2927             break;
2928         }
2929 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2930         default:
2931             goto unimplemented;
2932         }
2933         break;
2934 #endif /* SOL_NETLINK */
2935     default:
2936     unimplemented:
2937         qemu_log_mask(LOG_UNIMP,
2938                       "getsockopt level=%d optname=%d not yet supported\n",
2939                       level, optname);
2940         ret = -TARGET_EOPNOTSUPP;
2941         break;
2942     }
2943     return ret;
2944 }
2945 
2946 /* Convert target low/high pair representing file offset into the host
2947  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2948  * as the kernel doesn't handle them either.
2949  */
2950 static void target_to_host_low_high(abi_ulong tlow,
2951                                     abi_ulong thigh,
2952                                     unsigned long *hlow,
2953                                     unsigned long *hhigh)
2954 {
2955     uint64_t off = tlow |
2956         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2957         TARGET_LONG_BITS / 2;
2958 
2959     *hlow = off;
2960     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2961 }
2962 
2963 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2964                                 abi_ulong count, int copy)
2965 {
2966     struct target_iovec *target_vec;
2967     struct iovec *vec;
2968     abi_ulong total_len, max_len;
2969     int i;
2970     int err = 0;
2971     bool bad_address = false;
2972 
2973     if (count == 0) {
2974         errno = 0;
2975         return NULL;
2976     }
2977     if (count > IOV_MAX) {
2978         errno = EINVAL;
2979         return NULL;
2980     }
2981 
2982     vec = g_try_new0(struct iovec, count);
2983     if (vec == NULL) {
2984         errno = ENOMEM;
2985         return NULL;
2986     }
2987 
2988     target_vec = lock_user(VERIFY_READ, target_addr,
2989                            count * sizeof(struct target_iovec), 1);
2990     if (target_vec == NULL) {
2991         err = EFAULT;
2992         goto fail2;
2993     }
2994 
2995     /* ??? If host page size > target page size, this will result in a
2996        value larger than what we can actually support.  */
2997     max_len = 0x7fffffff & TARGET_PAGE_MASK;
2998     total_len = 0;
2999 
3000     for (i = 0; i < count; i++) {
3001         abi_ulong base = tswapal(target_vec[i].iov_base);
3002         abi_long len = tswapal(target_vec[i].iov_len);
3003 
3004         if (len < 0) {
3005             err = EINVAL;
3006             goto fail;
3007         } else if (len == 0) {
3008             /* Zero length pointer is ignored.  */
3009             vec[i].iov_base = 0;
3010         } else {
3011             vec[i].iov_base = lock_user(type, base, len, copy);
3012             /* If the first buffer pointer is bad, this is a fault.  But
3013              * subsequent bad buffers will result in a partial write; this
3014              * is realized by filling the vector with null pointers and
3015              * zero lengths. */
3016             if (!vec[i].iov_base) {
3017                 if (i == 0) {
3018                     err = EFAULT;
3019                     goto fail;
3020                 } else {
3021                     bad_address = true;
3022                 }
3023             }
3024             if (bad_address) {
3025                 len = 0;
3026             }
3027             if (len > max_len - total_len) {
3028                 len = max_len - total_len;
3029             }
3030         }
3031         vec[i].iov_len = len;
3032         total_len += len;
3033     }
3034 
3035     unlock_user(target_vec, target_addr, 0);
3036     return vec;
3037 
3038  fail:
3039     while (--i >= 0) {
3040         if (tswapal(target_vec[i].iov_len) > 0) {
3041             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3042         }
3043     }
3044     unlock_user(target_vec, target_addr, 0);
3045  fail2:
3046     g_free(vec);
3047     errno = err;
3048     return NULL;
3049 }
3050 
3051 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3052                          abi_ulong count, int copy)
3053 {
3054     struct target_iovec *target_vec;
3055     int i;
3056 
3057     target_vec = lock_user(VERIFY_READ, target_addr,
3058                            count * sizeof(struct target_iovec), 1);
3059     if (target_vec) {
3060         for (i = 0; i < count; i++) {
3061             abi_ulong base = tswapal(target_vec[i].iov_base);
3062             abi_long len = tswapal(target_vec[i].iov_len);
3063             if (len < 0) {
3064                 break;
3065             }
3066             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3067         }
3068         unlock_user(target_vec, target_addr, 0);
3069     }
3070 
3071     g_free(vec);
3072 }
3073 
3074 static inline int target_to_host_sock_type(int *type)
3075 {
3076     int host_type = 0;
3077     int target_type = *type;
3078 
3079     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3080     case TARGET_SOCK_DGRAM:
3081         host_type = SOCK_DGRAM;
3082         break;
3083     case TARGET_SOCK_STREAM:
3084         host_type = SOCK_STREAM;
3085         break;
3086     default:
3087         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3088         break;
3089     }
3090     if (target_type & TARGET_SOCK_CLOEXEC) {
3091 #if defined(SOCK_CLOEXEC)
3092         host_type |= SOCK_CLOEXEC;
3093 #else
3094         return -TARGET_EINVAL;
3095 #endif
3096     }
3097     if (target_type & TARGET_SOCK_NONBLOCK) {
3098 #if defined(SOCK_NONBLOCK)
3099         host_type |= SOCK_NONBLOCK;
3100 #elif !defined(O_NONBLOCK)
3101         return -TARGET_EINVAL;
3102 #endif
3103     }
3104     *type = host_type;
3105     return 0;
3106 }
3107 
3108 /* Try to emulate socket type flags after socket creation.  */
3109 static int sock_flags_fixup(int fd, int target_type)
3110 {
3111 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3112     if (target_type & TARGET_SOCK_NONBLOCK) {
3113         int flags = fcntl(fd, F_GETFL);
3114         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3115             close(fd);
3116             return -TARGET_EINVAL;
3117         }
3118     }
3119 #endif
3120     return fd;
3121 }
3122 
3123 /* do_socket() Must return target values and target errnos. */
3124 static abi_long do_socket(int domain, int type, int protocol)
3125 {
3126     int target_type = type;
3127     int ret;
3128 
3129     ret = target_to_host_sock_type(&type);
3130     if (ret) {
3131         return ret;
3132     }
3133 
3134     if (domain == PF_NETLINK && !(
3135 #ifdef CONFIG_RTNETLINK
3136          protocol == NETLINK_ROUTE ||
3137 #endif
3138          protocol == NETLINK_KOBJECT_UEVENT ||
3139          protocol == NETLINK_AUDIT)) {
3140         return -TARGET_EPROTONOSUPPORT;
3141     }
3142 
3143     if (domain == AF_PACKET ||
3144         (domain == AF_INET && type == SOCK_PACKET)) {
3145         protocol = tswap16(protocol);
3146     }
3147 
3148     ret = get_errno(socket(domain, type, protocol));
3149     if (ret >= 0) {
3150         ret = sock_flags_fixup(ret, target_type);
3151         if (type == SOCK_PACKET) {
3152             /* Manage an obsolete case :
3153              * if socket type is SOCK_PACKET, bind by name
3154              */
3155             fd_trans_register(ret, &target_packet_trans);
3156         } else if (domain == PF_NETLINK) {
3157             switch (protocol) {
3158 #ifdef CONFIG_RTNETLINK
3159             case NETLINK_ROUTE:
3160                 fd_trans_register(ret, &target_netlink_route_trans);
3161                 break;
3162 #endif
3163             case NETLINK_KOBJECT_UEVENT:
3164                 /* nothing to do: messages are strings */
3165                 break;
3166             case NETLINK_AUDIT:
3167                 fd_trans_register(ret, &target_netlink_audit_trans);
3168                 break;
3169             default:
3170                 g_assert_not_reached();
3171             }
3172         }
3173     }
3174     return ret;
3175 }
3176 
3177 /* do_bind() Must return target values and target errnos. */
3178 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3179                         socklen_t addrlen)
3180 {
3181     void *addr;
3182     abi_long ret;
3183 
3184     if ((int)addrlen < 0) {
3185         return -TARGET_EINVAL;
3186     }
3187 
3188     addr = alloca(addrlen+1);
3189 
3190     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3191     if (ret)
3192         return ret;
3193 
3194     return get_errno(bind(sockfd, addr, addrlen));
3195 }
3196 
3197 /* do_connect() Must return target values and target errnos. */
3198 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3199                            socklen_t addrlen)
3200 {
3201     void *addr;
3202     abi_long ret;
3203 
3204     if ((int)addrlen < 0) {
3205         return -TARGET_EINVAL;
3206     }
3207 
3208     addr = alloca(addrlen+1);
3209 
3210     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3211     if (ret)
3212         return ret;
3213 
3214     return get_errno(safe_connect(sockfd, addr, addrlen));
3215 }
3216 
3217 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3218 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3219                                       int flags, int send)
3220 {
3221     abi_long ret, len;
3222     struct msghdr msg;
3223     abi_ulong count;
3224     struct iovec *vec;
3225     abi_ulong target_vec;
3226 
3227     if (msgp->msg_name) {
3228         msg.msg_namelen = tswap32(msgp->msg_namelen);
3229         msg.msg_name = alloca(msg.msg_namelen+1);
3230         ret = target_to_host_sockaddr(fd, msg.msg_name,
3231                                       tswapal(msgp->msg_name),
3232                                       msg.msg_namelen);
3233         if (ret == -TARGET_EFAULT) {
3234             /* For connected sockets msg_name and msg_namelen must
3235              * be ignored, so returning EFAULT immediately is wrong.
3236              * Instead, pass a bad msg_name to the host kernel, and
3237              * let it decide whether to return EFAULT or not.
3238              */
3239             msg.msg_name = (void *)-1;
3240         } else if (ret) {
3241             goto out2;
3242         }
3243     } else {
3244         msg.msg_name = NULL;
3245         msg.msg_namelen = 0;
3246     }
3247     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3248     msg.msg_control = alloca(msg.msg_controllen);
3249     memset(msg.msg_control, 0, msg.msg_controllen);
3250 
3251     msg.msg_flags = tswap32(msgp->msg_flags);
3252 
3253     count = tswapal(msgp->msg_iovlen);
3254     target_vec = tswapal(msgp->msg_iov);
3255 
3256     if (count > IOV_MAX) {
3257         /* sendrcvmsg returns a different errno for this condition than
3258          * readv/writev, so we must catch it here before lock_iovec() does.
3259          */
3260         ret = -TARGET_EMSGSIZE;
3261         goto out2;
3262     }
3263 
3264     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3265                      target_vec, count, send);
3266     if (vec == NULL) {
3267         ret = -host_to_target_errno(errno);
3268         goto out2;
3269     }
3270     msg.msg_iovlen = count;
3271     msg.msg_iov = vec;
3272 
3273     if (send) {
3274         if (fd_trans_target_to_host_data(fd)) {
3275             void *host_msg;
3276 
3277             host_msg = g_malloc(msg.msg_iov->iov_len);
3278             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3279             ret = fd_trans_target_to_host_data(fd)(host_msg,
3280                                                    msg.msg_iov->iov_len);
3281             if (ret >= 0) {
3282                 msg.msg_iov->iov_base = host_msg;
3283                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3284             }
3285             g_free(host_msg);
3286         } else {
3287             ret = target_to_host_cmsg(&msg, msgp);
3288             if (ret == 0) {
3289                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3290             }
3291         }
3292     } else {
3293         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3294         if (!is_error(ret)) {
3295             len = ret;
3296             if (fd_trans_host_to_target_data(fd)) {
3297                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3298                                                MIN(msg.msg_iov->iov_len, len));
3299             } else {
3300                 ret = host_to_target_cmsg(msgp, &msg);
3301             }
3302             if (!is_error(ret)) {
3303                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3304                 msgp->msg_flags = tswap32(msg.msg_flags);
3305                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3306                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3307                                     msg.msg_name, msg.msg_namelen);
3308                     if (ret) {
3309                         goto out;
3310                     }
3311                 }
3312 
3313                 ret = len;
3314             }
3315         }
3316     }
3317 
3318 out:
3319     unlock_iovec(vec, target_vec, count, !send);
3320 out2:
3321     return ret;
3322 }
3323 
3324 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3325                                int flags, int send)
3326 {
3327     abi_long ret;
3328     struct target_msghdr *msgp;
3329 
3330     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3331                           msgp,
3332                           target_msg,
3333                           send ? 1 : 0)) {
3334         return -TARGET_EFAULT;
3335     }
3336     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3337     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3338     return ret;
3339 }
3340 
3341 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3342  * so it might not have this *mmsg-specific flag either.
3343  */
3344 #ifndef MSG_WAITFORONE
3345 #define MSG_WAITFORONE 0x10000
3346 #endif
3347 
3348 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3349                                 unsigned int vlen, unsigned int flags,
3350                                 int send)
3351 {
3352     struct target_mmsghdr *mmsgp;
3353     abi_long ret = 0;
3354     int i;
3355 
3356     if (vlen > UIO_MAXIOV) {
3357         vlen = UIO_MAXIOV;
3358     }
3359 
3360     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3361     if (!mmsgp) {
3362         return -TARGET_EFAULT;
3363     }
3364 
3365     for (i = 0; i < vlen; i++) {
3366         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3367         if (is_error(ret)) {
3368             break;
3369         }
3370         mmsgp[i].msg_len = tswap32(ret);
3371         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3372         if (flags & MSG_WAITFORONE) {
3373             flags |= MSG_DONTWAIT;
3374         }
3375     }
3376 
3377     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3378 
3379     /* Return number of datagrams sent if we sent any at all;
3380      * otherwise return the error.
3381      */
3382     if (i) {
3383         return i;
3384     }
3385     return ret;
3386 }
3387 
3388 /* do_accept4() Must return target values and target errnos. */
3389 static abi_long do_accept4(int fd, abi_ulong target_addr,
3390                            abi_ulong target_addrlen_addr, int flags)
3391 {
3392     socklen_t addrlen, ret_addrlen;
3393     void *addr;
3394     abi_long ret;
3395     int host_flags;
3396 
3397     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3398 
3399     if (target_addr == 0) {
3400         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3401     }
3402 
3403     /* linux returns EFAULT if addrlen pointer is invalid */
3404     if (get_user_u32(addrlen, target_addrlen_addr))
3405         return -TARGET_EFAULT;
3406 
3407     if ((int)addrlen < 0) {
3408         return -TARGET_EINVAL;
3409     }
3410 
3411     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3412         return -TARGET_EFAULT;
3413     }
3414 
3415     addr = alloca(addrlen);
3416 
3417     ret_addrlen = addrlen;
3418     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3419     if (!is_error(ret)) {
3420         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3421         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3422             ret = -TARGET_EFAULT;
3423         }
3424     }
3425     return ret;
3426 }
3427 
3428 /* do_getpeername() Must return target values and target errnos. */
3429 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3430                                abi_ulong target_addrlen_addr)
3431 {
3432     socklen_t addrlen, ret_addrlen;
3433     void *addr;
3434     abi_long ret;
3435 
3436     if (get_user_u32(addrlen, target_addrlen_addr))
3437         return -TARGET_EFAULT;
3438 
3439     if ((int)addrlen < 0) {
3440         return -TARGET_EINVAL;
3441     }
3442 
3443     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3444         return -TARGET_EFAULT;
3445     }
3446 
3447     addr = alloca(addrlen);
3448 
3449     ret_addrlen = addrlen;
3450     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3451     if (!is_error(ret)) {
3452         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3453         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3454             ret = -TARGET_EFAULT;
3455         }
3456     }
3457     return ret;
3458 }
3459 
3460 /* do_getsockname() Must return target values and target errnos. */
3461 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3462                                abi_ulong target_addrlen_addr)
3463 {
3464     socklen_t addrlen, ret_addrlen;
3465     void *addr;
3466     abi_long ret;
3467 
3468     if (get_user_u32(addrlen, target_addrlen_addr))
3469         return -TARGET_EFAULT;
3470 
3471     if ((int)addrlen < 0) {
3472         return -TARGET_EINVAL;
3473     }
3474 
3475     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3476         return -TARGET_EFAULT;
3477     }
3478 
3479     addr = alloca(addrlen);
3480 
3481     ret_addrlen = addrlen;
3482     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3483     if (!is_error(ret)) {
3484         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3485         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3486             ret = -TARGET_EFAULT;
3487         }
3488     }
3489     return ret;
3490 }
3491 
3492 /* do_socketpair() Must return target values and target errnos. */
3493 static abi_long do_socketpair(int domain, int type, int protocol,
3494                               abi_ulong target_tab_addr)
3495 {
3496     int tab[2];
3497     abi_long ret;
3498 
3499     target_to_host_sock_type(&type);
3500 
3501     ret = get_errno(socketpair(domain, type, protocol, tab));
3502     if (!is_error(ret)) {
3503         if (put_user_s32(tab[0], target_tab_addr)
3504             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3505             ret = -TARGET_EFAULT;
3506     }
3507     return ret;
3508 }
3509 
3510 /* do_sendto() Must return target values and target errnos. */
3511 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3512                           abi_ulong target_addr, socklen_t addrlen)
3513 {
3514     void *addr;
3515     void *host_msg;
3516     void *copy_msg = NULL;
3517     abi_long ret;
3518 
3519     if ((int)addrlen < 0) {
3520         return -TARGET_EINVAL;
3521     }
3522 
3523     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3524     if (!host_msg)
3525         return -TARGET_EFAULT;
3526     if (fd_trans_target_to_host_data(fd)) {
3527         copy_msg = host_msg;
3528         host_msg = g_malloc(len);
3529         memcpy(host_msg, copy_msg, len);
3530         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3531         if (ret < 0) {
3532             goto fail;
3533         }
3534     }
3535     if (target_addr) {
3536         addr = alloca(addrlen+1);
3537         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3538         if (ret) {
3539             goto fail;
3540         }
3541         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3542     } else {
3543         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3544     }
3545 fail:
3546     if (copy_msg) {
3547         g_free(host_msg);
3548         host_msg = copy_msg;
3549     }
3550     unlock_user(host_msg, msg, 0);
3551     return ret;
3552 }
3553 
3554 /* do_recvfrom() Must return target values and target errnos. */
3555 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3556                             abi_ulong target_addr,
3557                             abi_ulong target_addrlen)
3558 {
3559     socklen_t addrlen, ret_addrlen;
3560     void *addr;
3561     void *host_msg;
3562     abi_long ret;
3563 
3564     if (!msg) {
3565         host_msg = NULL;
3566     } else {
3567         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3568         if (!host_msg) {
3569             return -TARGET_EFAULT;
3570         }
3571     }
3572     if (target_addr) {
3573         if (get_user_u32(addrlen, target_addrlen)) {
3574             ret = -TARGET_EFAULT;
3575             goto fail;
3576         }
3577         if ((int)addrlen < 0) {
3578             ret = -TARGET_EINVAL;
3579             goto fail;
3580         }
3581         addr = alloca(addrlen);
3582         ret_addrlen = addrlen;
3583         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3584                                       addr, &ret_addrlen));
3585     } else {
3586         addr = NULL; /* To keep compiler quiet.  */
3587         addrlen = 0; /* To keep compiler quiet.  */
3588         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3589     }
3590     if (!is_error(ret)) {
3591         if (fd_trans_host_to_target_data(fd)) {
3592             abi_long trans;
3593             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3594             if (is_error(trans)) {
3595                 ret = trans;
3596                 goto fail;
3597             }
3598         }
3599         if (target_addr) {
3600             host_to_target_sockaddr(target_addr, addr,
3601                                     MIN(addrlen, ret_addrlen));
3602             if (put_user_u32(ret_addrlen, target_addrlen)) {
3603                 ret = -TARGET_EFAULT;
3604                 goto fail;
3605             }
3606         }
3607         unlock_user(host_msg, msg, len);
3608     } else {
3609 fail:
3610         unlock_user(host_msg, msg, 0);
3611     }
3612     return ret;
3613 }
3614 
3615 #ifdef TARGET_NR_socketcall
3616 /* do_socketcall() must return target values and target errnos. */
3617 static abi_long do_socketcall(int num, abi_ulong vptr)
3618 {
3619     static const unsigned nargs[] = { /* number of arguments per operation */
3620         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3621         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3622         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3623         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3624         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3625         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3626         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3627         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3628         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3629         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3630         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3631         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3632         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3633         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3634         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3635         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3636         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3637         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3638         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3639         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3640     };
3641     abi_long a[6]; /* max 6 args */
3642     unsigned i;
3643 
3644     /* check the range of the first argument num */
3645     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3646     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3647         return -TARGET_EINVAL;
3648     }
3649     /* ensure we have space for args */
3650     if (nargs[num] > ARRAY_SIZE(a)) {
3651         return -TARGET_EINVAL;
3652     }
3653     /* collect the arguments in a[] according to nargs[] */
3654     for (i = 0; i < nargs[num]; ++i) {
3655         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3656             return -TARGET_EFAULT;
3657         }
3658     }
3659     /* now when we have the args, invoke the appropriate underlying function */
3660     switch (num) {
3661     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3662         return do_socket(a[0], a[1], a[2]);
3663     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3664         return do_bind(a[0], a[1], a[2]);
3665     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3666         return do_connect(a[0], a[1], a[2]);
3667     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3668         return get_errno(listen(a[0], a[1]));
3669     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3670         return do_accept4(a[0], a[1], a[2], 0);
3671     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3672         return do_getsockname(a[0], a[1], a[2]);
3673     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3674         return do_getpeername(a[0], a[1], a[2]);
3675     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3676         return do_socketpair(a[0], a[1], a[2], a[3]);
3677     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3678         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3679     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3680         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3681     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3682         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3683     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3684         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3685     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3686         return get_errno(shutdown(a[0], a[1]));
3687     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3688         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3689     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3690         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3691     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3692         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3693     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3694         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3695     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3696         return do_accept4(a[0], a[1], a[2], a[3]);
3697     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3698         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3699     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3700         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3701     default:
3702         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3703         return -TARGET_EINVAL;
3704     }
3705 }
3706 #endif
3707 
3708 #define N_SHM_REGIONS	32
3709 
3710 static struct shm_region {
3711     abi_ulong start;
3712     abi_ulong size;
3713     bool in_use;
3714 } shm_regions[N_SHM_REGIONS];
3715 
3716 #ifndef TARGET_SEMID64_DS
3717 /* asm-generic version of this struct */
3718 struct target_semid64_ds
3719 {
3720   struct target_ipc_perm sem_perm;
3721   abi_ulong sem_otime;
3722 #if TARGET_ABI_BITS == 32
3723   abi_ulong __unused1;
3724 #endif
3725   abi_ulong sem_ctime;
3726 #if TARGET_ABI_BITS == 32
3727   abi_ulong __unused2;
3728 #endif
3729   abi_ulong sem_nsems;
3730   abi_ulong __unused3;
3731   abi_ulong __unused4;
3732 };
3733 #endif
3734 
3735 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3736                                                abi_ulong target_addr)
3737 {
3738     struct target_ipc_perm *target_ip;
3739     struct target_semid64_ds *target_sd;
3740 
3741     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3742         return -TARGET_EFAULT;
3743     target_ip = &(target_sd->sem_perm);
3744     host_ip->__key = tswap32(target_ip->__key);
3745     host_ip->uid = tswap32(target_ip->uid);
3746     host_ip->gid = tswap32(target_ip->gid);
3747     host_ip->cuid = tswap32(target_ip->cuid);
3748     host_ip->cgid = tswap32(target_ip->cgid);
3749 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3750     host_ip->mode = tswap32(target_ip->mode);
3751 #else
3752     host_ip->mode = tswap16(target_ip->mode);
3753 #endif
3754 #if defined(TARGET_PPC)
3755     host_ip->__seq = tswap32(target_ip->__seq);
3756 #else
3757     host_ip->__seq = tswap16(target_ip->__seq);
3758 #endif
3759     unlock_user_struct(target_sd, target_addr, 0);
3760     return 0;
3761 }
3762 
3763 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3764                                                struct ipc_perm *host_ip)
3765 {
3766     struct target_ipc_perm *target_ip;
3767     struct target_semid64_ds *target_sd;
3768 
3769     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3770         return -TARGET_EFAULT;
3771     target_ip = &(target_sd->sem_perm);
3772     target_ip->__key = tswap32(host_ip->__key);
3773     target_ip->uid = tswap32(host_ip->uid);
3774     target_ip->gid = tswap32(host_ip->gid);
3775     target_ip->cuid = tswap32(host_ip->cuid);
3776     target_ip->cgid = tswap32(host_ip->cgid);
3777 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3778     target_ip->mode = tswap32(host_ip->mode);
3779 #else
3780     target_ip->mode = tswap16(host_ip->mode);
3781 #endif
3782 #if defined(TARGET_PPC)
3783     target_ip->__seq = tswap32(host_ip->__seq);
3784 #else
3785     target_ip->__seq = tswap16(host_ip->__seq);
3786 #endif
3787     unlock_user_struct(target_sd, target_addr, 1);
3788     return 0;
3789 }
3790 
3791 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3792                                                abi_ulong target_addr)
3793 {
3794     struct target_semid64_ds *target_sd;
3795 
3796     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3797         return -TARGET_EFAULT;
3798     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3799         return -TARGET_EFAULT;
3800     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3801     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3802     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3803     unlock_user_struct(target_sd, target_addr, 0);
3804     return 0;
3805 }
3806 
3807 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3808                                                struct semid_ds *host_sd)
3809 {
3810     struct target_semid64_ds *target_sd;
3811 
3812     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3813         return -TARGET_EFAULT;
3814     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3815         return -TARGET_EFAULT;
3816     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3817     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3818     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3819     unlock_user_struct(target_sd, target_addr, 1);
3820     return 0;
3821 }
3822 
3823 struct target_seminfo {
3824     int semmap;
3825     int semmni;
3826     int semmns;
3827     int semmnu;
3828     int semmsl;
3829     int semopm;
3830     int semume;
3831     int semusz;
3832     int semvmx;
3833     int semaem;
3834 };
3835 
3836 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3837                                               struct seminfo *host_seminfo)
3838 {
3839     struct target_seminfo *target_seminfo;
3840     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3841         return -TARGET_EFAULT;
3842     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3843     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3844     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3845     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3846     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3847     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3848     __put_user(host_seminfo->semume, &target_seminfo->semume);
3849     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3850     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3851     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3852     unlock_user_struct(target_seminfo, target_addr, 1);
3853     return 0;
3854 }
3855 
3856 union semun {
3857 	int val;
3858 	struct semid_ds *buf;
3859 	unsigned short *array;
3860 	struct seminfo *__buf;
3861 };
3862 
3863 union target_semun {
3864 	int val;
3865 	abi_ulong buf;
3866 	abi_ulong array;
3867 	abi_ulong __buf;
3868 };
3869 
3870 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3871                                                abi_ulong target_addr)
3872 {
3873     int nsems;
3874     unsigned short *array;
3875     union semun semun;
3876     struct semid_ds semid_ds;
3877     int i, ret;
3878 
3879     semun.buf = &semid_ds;
3880 
3881     ret = semctl(semid, 0, IPC_STAT, semun);
3882     if (ret == -1)
3883         return get_errno(ret);
3884 
3885     nsems = semid_ds.sem_nsems;
3886 
3887     *host_array = g_try_new(unsigned short, nsems);
3888     if (!*host_array) {
3889         return -TARGET_ENOMEM;
3890     }
3891     array = lock_user(VERIFY_READ, target_addr,
3892                       nsems*sizeof(unsigned short), 1);
3893     if (!array) {
3894         g_free(*host_array);
3895         return -TARGET_EFAULT;
3896     }
3897 
3898     for(i=0; i<nsems; i++) {
3899         __get_user((*host_array)[i], &array[i]);
3900     }
3901     unlock_user(array, target_addr, 0);
3902 
3903     return 0;
3904 }
3905 
3906 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3907                                                unsigned short **host_array)
3908 {
3909     int nsems;
3910     unsigned short *array;
3911     union semun semun;
3912     struct semid_ds semid_ds;
3913     int i, ret;
3914 
3915     semun.buf = &semid_ds;
3916 
3917     ret = semctl(semid, 0, IPC_STAT, semun);
3918     if (ret == -1)
3919         return get_errno(ret);
3920 
3921     nsems = semid_ds.sem_nsems;
3922 
3923     array = lock_user(VERIFY_WRITE, target_addr,
3924                       nsems*sizeof(unsigned short), 0);
3925     if (!array)
3926         return -TARGET_EFAULT;
3927 
3928     for(i=0; i<nsems; i++) {
3929         __put_user((*host_array)[i], &array[i]);
3930     }
3931     g_free(*host_array);
3932     unlock_user(array, target_addr, 1);
3933 
3934     return 0;
3935 }
3936 
3937 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3938                                  abi_ulong target_arg)
3939 {
3940     union target_semun target_su = { .buf = target_arg };
3941     union semun arg;
3942     struct semid_ds dsarg;
3943     unsigned short *array = NULL;
3944     struct seminfo seminfo;
3945     abi_long ret = -TARGET_EINVAL;
3946     abi_long err;
3947     cmd &= 0xff;
3948 
3949     switch( cmd ) {
3950 	case GETVAL:
3951 	case SETVAL:
3952             /* In 64 bit cross-endian situations, we will erroneously pick up
3953              * the wrong half of the union for the "val" element.  To rectify
3954              * this, the entire 8-byte structure is byteswapped, followed by
3955 	     * a swap of the 4 byte val field. In other cases, the data is
3956 	     * already in proper host byte order. */
3957 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3958 		target_su.buf = tswapal(target_su.buf);
3959 		arg.val = tswap32(target_su.val);
3960 	    } else {
3961 		arg.val = target_su.val;
3962 	    }
3963             ret = get_errno(semctl(semid, semnum, cmd, arg));
3964             break;
3965 	case GETALL:
3966 	case SETALL:
3967             err = target_to_host_semarray(semid, &array, target_su.array);
3968             if (err)
3969                 return err;
3970             arg.array = array;
3971             ret = get_errno(semctl(semid, semnum, cmd, arg));
3972             err = host_to_target_semarray(semid, target_su.array, &array);
3973             if (err)
3974                 return err;
3975             break;
3976 	case IPC_STAT:
3977 	case IPC_SET:
3978 	case SEM_STAT:
3979             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3980             if (err)
3981                 return err;
3982             arg.buf = &dsarg;
3983             ret = get_errno(semctl(semid, semnum, cmd, arg));
3984             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3985             if (err)
3986                 return err;
3987             break;
3988 	case IPC_INFO:
3989 	case SEM_INFO:
3990             arg.__buf = &seminfo;
3991             ret = get_errno(semctl(semid, semnum, cmd, arg));
3992             err = host_to_target_seminfo(target_su.__buf, &seminfo);
3993             if (err)
3994                 return err;
3995             break;
3996 	case IPC_RMID:
3997 	case GETPID:
3998 	case GETNCNT:
3999 	case GETZCNT:
4000             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4001             break;
4002     }
4003 
4004     return ret;
4005 }
4006 
4007 struct target_sembuf {
4008     unsigned short sem_num;
4009     short sem_op;
4010     short sem_flg;
4011 };
4012 
4013 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4014                                              abi_ulong target_addr,
4015                                              unsigned nsops)
4016 {
4017     struct target_sembuf *target_sembuf;
4018     int i;
4019 
4020     target_sembuf = lock_user(VERIFY_READ, target_addr,
4021                               nsops*sizeof(struct target_sembuf), 1);
4022     if (!target_sembuf)
4023         return -TARGET_EFAULT;
4024 
4025     for(i=0; i<nsops; i++) {
4026         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4027         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4028         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4029     }
4030 
4031     unlock_user(target_sembuf, target_addr, 0);
4032 
4033     return 0;
4034 }
4035 
4036 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4037     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4038 
4039 /*
4040  * This macro is required to handle the s390 variants, which passes the
4041  * arguments in a different order than default.
4042  */
4043 #ifdef __s390x__
4044 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4045   (__nsops), (__timeout), (__sops)
4046 #else
4047 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4048   (__nsops), 0, (__sops), (__timeout)
4049 #endif
4050 
4051 static inline abi_long do_semtimedop(int semid,
4052                                      abi_long ptr,
4053                                      unsigned nsops,
4054                                      abi_long timeout, bool time64)
4055 {
4056     struct sembuf *sops;
4057     struct timespec ts, *pts = NULL;
4058     abi_long ret;
4059 
4060     if (timeout) {
4061         pts = &ts;
4062         if (time64) {
4063             if (target_to_host_timespec64(pts, timeout)) {
4064                 return -TARGET_EFAULT;
4065             }
4066         } else {
4067             if (target_to_host_timespec(pts, timeout)) {
4068                 return -TARGET_EFAULT;
4069             }
4070         }
4071     }
4072 
4073     if (nsops > TARGET_SEMOPM) {
4074         return -TARGET_E2BIG;
4075     }
4076 
4077     sops = g_new(struct sembuf, nsops);
4078 
4079     if (target_to_host_sembuf(sops, ptr, nsops)) {
4080         g_free(sops);
4081         return -TARGET_EFAULT;
4082     }
4083 
4084     ret = -TARGET_ENOSYS;
4085 #ifdef __NR_semtimedop
4086     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4087 #endif
4088 #ifdef __NR_ipc
4089     if (ret == -TARGET_ENOSYS) {
4090         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4091                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4092     }
4093 #endif
4094     g_free(sops);
4095     return ret;
4096 }
4097 #endif
4098 
4099 struct target_msqid_ds
4100 {
4101     struct target_ipc_perm msg_perm;
4102     abi_ulong msg_stime;
4103 #if TARGET_ABI_BITS == 32
4104     abi_ulong __unused1;
4105 #endif
4106     abi_ulong msg_rtime;
4107 #if TARGET_ABI_BITS == 32
4108     abi_ulong __unused2;
4109 #endif
4110     abi_ulong msg_ctime;
4111 #if TARGET_ABI_BITS == 32
4112     abi_ulong __unused3;
4113 #endif
4114     abi_ulong __msg_cbytes;
4115     abi_ulong msg_qnum;
4116     abi_ulong msg_qbytes;
4117     abi_ulong msg_lspid;
4118     abi_ulong msg_lrpid;
4119     abi_ulong __unused4;
4120     abi_ulong __unused5;
4121 };
4122 
4123 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4124                                                abi_ulong target_addr)
4125 {
4126     struct target_msqid_ds *target_md;
4127 
4128     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4129         return -TARGET_EFAULT;
4130     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4131         return -TARGET_EFAULT;
4132     host_md->msg_stime = tswapal(target_md->msg_stime);
4133     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4134     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4135     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4136     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4137     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4138     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4139     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4140     unlock_user_struct(target_md, target_addr, 0);
4141     return 0;
4142 }
4143 
4144 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4145                                                struct msqid_ds *host_md)
4146 {
4147     struct target_msqid_ds *target_md;
4148 
4149     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4150         return -TARGET_EFAULT;
4151     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4152         return -TARGET_EFAULT;
4153     target_md->msg_stime = tswapal(host_md->msg_stime);
4154     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4155     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4156     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4157     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4158     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4159     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4160     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4161     unlock_user_struct(target_md, target_addr, 1);
4162     return 0;
4163 }
4164 
4165 struct target_msginfo {
4166     int msgpool;
4167     int msgmap;
4168     int msgmax;
4169     int msgmnb;
4170     int msgmni;
4171     int msgssz;
4172     int msgtql;
4173     unsigned short int msgseg;
4174 };
4175 
4176 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4177                                               struct msginfo *host_msginfo)
4178 {
4179     struct target_msginfo *target_msginfo;
4180     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4181         return -TARGET_EFAULT;
4182     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4183     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4184     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4185     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4186     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4187     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4188     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4189     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4190     unlock_user_struct(target_msginfo, target_addr, 1);
4191     return 0;
4192 }
4193 
4194 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4195 {
4196     struct msqid_ds dsarg;
4197     struct msginfo msginfo;
4198     abi_long ret = -TARGET_EINVAL;
4199 
4200     cmd &= 0xff;
4201 
4202     switch (cmd) {
4203     case IPC_STAT:
4204     case IPC_SET:
4205     case MSG_STAT:
4206         if (target_to_host_msqid_ds(&dsarg,ptr))
4207             return -TARGET_EFAULT;
4208         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4209         if (host_to_target_msqid_ds(ptr,&dsarg))
4210             return -TARGET_EFAULT;
4211         break;
4212     case IPC_RMID:
4213         ret = get_errno(msgctl(msgid, cmd, NULL));
4214         break;
4215     case IPC_INFO:
4216     case MSG_INFO:
4217         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4218         if (host_to_target_msginfo(ptr, &msginfo))
4219             return -TARGET_EFAULT;
4220         break;
4221     }
4222 
4223     return ret;
4224 }
4225 
4226 struct target_msgbuf {
4227     abi_long mtype;
4228     char	mtext[1];
4229 };
4230 
4231 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4232                                  ssize_t msgsz, int msgflg)
4233 {
4234     struct target_msgbuf *target_mb;
4235     struct msgbuf *host_mb;
4236     abi_long ret = 0;
4237 
4238     if (msgsz < 0) {
4239         return -TARGET_EINVAL;
4240     }
4241 
4242     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4243         return -TARGET_EFAULT;
4244     host_mb = g_try_malloc(msgsz + sizeof(long));
4245     if (!host_mb) {
4246         unlock_user_struct(target_mb, msgp, 0);
4247         return -TARGET_ENOMEM;
4248     }
4249     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4250     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4251     ret = -TARGET_ENOSYS;
4252 #ifdef __NR_msgsnd
4253     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4254 #endif
4255 #ifdef __NR_ipc
4256     if (ret == -TARGET_ENOSYS) {
4257 #ifdef __s390x__
4258         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4259                                  host_mb));
4260 #else
4261         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4262                                  host_mb, 0));
4263 #endif
4264     }
4265 #endif
4266     g_free(host_mb);
4267     unlock_user_struct(target_mb, msgp, 0);
4268 
4269     return ret;
4270 }
4271 
4272 #ifdef __NR_ipc
4273 #if defined(__sparc__)
4274 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4275 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4276 #elif defined(__s390x__)
4277 /* The s390 sys_ipc variant has only five parameters.  */
4278 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4279     ((long int[]){(long int)__msgp, __msgtyp})
4280 #else
4281 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4282     ((long int[]){(long int)__msgp, __msgtyp}), 0
4283 #endif
4284 #endif
4285 
4286 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4287                                  ssize_t msgsz, abi_long msgtyp,
4288                                  int msgflg)
4289 {
4290     struct target_msgbuf *target_mb;
4291     char *target_mtext;
4292     struct msgbuf *host_mb;
4293     abi_long ret = 0;
4294 
4295     if (msgsz < 0) {
4296         return -TARGET_EINVAL;
4297     }
4298 
4299     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4300         return -TARGET_EFAULT;
4301 
4302     host_mb = g_try_malloc(msgsz + sizeof(long));
4303     if (!host_mb) {
4304         ret = -TARGET_ENOMEM;
4305         goto end;
4306     }
4307     ret = -TARGET_ENOSYS;
4308 #ifdef __NR_msgrcv
4309     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4310 #endif
4311 #ifdef __NR_ipc
4312     if (ret == -TARGET_ENOSYS) {
4313         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4314                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4315     }
4316 #endif
4317 
4318     if (ret > 0) {
4319         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4320         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4321         if (!target_mtext) {
4322             ret = -TARGET_EFAULT;
4323             goto end;
4324         }
4325         memcpy(target_mb->mtext, host_mb->mtext, ret);
4326         unlock_user(target_mtext, target_mtext_addr, ret);
4327     }
4328 
4329     target_mb->mtype = tswapal(host_mb->mtype);
4330 
4331 end:
4332     if (target_mb)
4333         unlock_user_struct(target_mb, msgp, 1);
4334     g_free(host_mb);
4335     return ret;
4336 }
4337 
4338 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4339                                                abi_ulong target_addr)
4340 {
4341     struct target_shmid_ds *target_sd;
4342 
4343     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4344         return -TARGET_EFAULT;
4345     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4346         return -TARGET_EFAULT;
4347     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4348     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4349     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4350     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4351     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4352     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4353     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4354     unlock_user_struct(target_sd, target_addr, 0);
4355     return 0;
4356 }
4357 
4358 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4359                                                struct shmid_ds *host_sd)
4360 {
4361     struct target_shmid_ds *target_sd;
4362 
4363     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4364         return -TARGET_EFAULT;
4365     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4366         return -TARGET_EFAULT;
4367     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4368     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4369     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4370     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4371     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4372     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4373     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4374     unlock_user_struct(target_sd, target_addr, 1);
4375     return 0;
4376 }
4377 
4378 struct  target_shminfo {
4379     abi_ulong shmmax;
4380     abi_ulong shmmin;
4381     abi_ulong shmmni;
4382     abi_ulong shmseg;
4383     abi_ulong shmall;
4384 };
4385 
4386 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4387                                               struct shminfo *host_shminfo)
4388 {
4389     struct target_shminfo *target_shminfo;
4390     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4391         return -TARGET_EFAULT;
4392     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4393     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4394     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4395     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4396     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4397     unlock_user_struct(target_shminfo, target_addr, 1);
4398     return 0;
4399 }
4400 
4401 struct target_shm_info {
4402     int used_ids;
4403     abi_ulong shm_tot;
4404     abi_ulong shm_rss;
4405     abi_ulong shm_swp;
4406     abi_ulong swap_attempts;
4407     abi_ulong swap_successes;
4408 };
4409 
4410 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4411                                                struct shm_info *host_shm_info)
4412 {
4413     struct target_shm_info *target_shm_info;
4414     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4415         return -TARGET_EFAULT;
4416     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4417     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4418     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4419     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4420     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4421     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4422     unlock_user_struct(target_shm_info, target_addr, 1);
4423     return 0;
4424 }
4425 
4426 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4427 {
4428     struct shmid_ds dsarg;
4429     struct shminfo shminfo;
4430     struct shm_info shm_info;
4431     abi_long ret = -TARGET_EINVAL;
4432 
4433     cmd &= 0xff;
4434 
4435     switch(cmd) {
4436     case IPC_STAT:
4437     case IPC_SET:
4438     case SHM_STAT:
4439         if (target_to_host_shmid_ds(&dsarg, buf))
4440             return -TARGET_EFAULT;
4441         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4442         if (host_to_target_shmid_ds(buf, &dsarg))
4443             return -TARGET_EFAULT;
4444         break;
4445     case IPC_INFO:
4446         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4447         if (host_to_target_shminfo(buf, &shminfo))
4448             return -TARGET_EFAULT;
4449         break;
4450     case SHM_INFO:
4451         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4452         if (host_to_target_shm_info(buf, &shm_info))
4453             return -TARGET_EFAULT;
4454         break;
4455     case IPC_RMID:
4456     case SHM_LOCK:
4457     case SHM_UNLOCK:
4458         ret = get_errno(shmctl(shmid, cmd, NULL));
4459         break;
4460     }
4461 
4462     return ret;
4463 }
4464 
4465 #ifndef TARGET_FORCE_SHMLBA
4466 /* For most architectures, SHMLBA is the same as the page size;
4467  * some architectures have larger values, in which case they should
4468  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4469  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4470  * and defining its own value for SHMLBA.
4471  *
4472  * The kernel also permits SHMLBA to be set by the architecture to a
4473  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4474  * this means that addresses are rounded to the large size if
4475  * SHM_RND is set but addresses not aligned to that size are not rejected
4476  * as long as they are at least page-aligned. Since the only architecture
4477  * which uses this is ia64 this code doesn't provide for that oddity.
4478  */
4479 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4480 {
4481     return TARGET_PAGE_SIZE;
4482 }
4483 #endif
4484 
4485 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4486                                  int shmid, abi_ulong shmaddr, int shmflg)
4487 {
4488     CPUState *cpu = env_cpu(cpu_env);
4489     abi_long raddr;
4490     void *host_raddr;
4491     struct shmid_ds shm_info;
4492     int i,ret;
4493     abi_ulong shmlba;
4494 
4495     /* shmat pointers are always untagged */
4496 
4497     /* find out the length of the shared memory segment */
4498     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4499     if (is_error(ret)) {
4500         /* can't get length, bail out */
4501         return ret;
4502     }
4503 
4504     shmlba = target_shmlba(cpu_env);
4505 
4506     if (shmaddr & (shmlba - 1)) {
4507         if (shmflg & SHM_RND) {
4508             shmaddr &= ~(shmlba - 1);
4509         } else {
4510             return -TARGET_EINVAL;
4511         }
4512     }
4513     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4514         return -TARGET_EINVAL;
4515     }
4516 
4517     mmap_lock();
4518 
4519     /*
4520      * We're mapping shared memory, so ensure we generate code for parallel
4521      * execution and flush old translations.  This will work up to the level
4522      * supported by the host -- anything that requires EXCP_ATOMIC will not
4523      * be atomic with respect to an external process.
4524      */
4525     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4526         cpu->tcg_cflags |= CF_PARALLEL;
4527         tb_flush(cpu);
4528     }
4529 
4530     if (shmaddr)
4531         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4532     else {
4533         abi_ulong mmap_start;
4534 
4535         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4536         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4537 
4538         if (mmap_start == -1) {
4539             errno = ENOMEM;
4540             host_raddr = (void *)-1;
4541         } else
4542             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4543                                shmflg | SHM_REMAP);
4544     }
4545 
4546     if (host_raddr == (void *)-1) {
4547         mmap_unlock();
4548         return get_errno((long)host_raddr);
4549     }
4550     raddr=h2g((unsigned long)host_raddr);
4551 
4552     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4553                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4554                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4555 
4556     for (i = 0; i < N_SHM_REGIONS; i++) {
4557         if (!shm_regions[i].in_use) {
4558             shm_regions[i].in_use = true;
4559             shm_regions[i].start = raddr;
4560             shm_regions[i].size = shm_info.shm_segsz;
4561             break;
4562         }
4563     }
4564 
4565     mmap_unlock();
4566     return raddr;
4567 
4568 }
4569 
4570 static inline abi_long do_shmdt(abi_ulong shmaddr)
4571 {
4572     int i;
4573     abi_long rv;
4574 
4575     /* shmdt pointers are always untagged */
4576 
4577     mmap_lock();
4578 
4579     for (i = 0; i < N_SHM_REGIONS; ++i) {
4580         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4581             shm_regions[i].in_use = false;
4582             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4583             break;
4584         }
4585     }
4586     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4587 
4588     mmap_unlock();
4589 
4590     return rv;
4591 }
4592 
4593 #ifdef TARGET_NR_ipc
4594 /* ??? This only works with linear mappings.  */
4595 /* do_ipc() must return target values and target errnos. */
4596 static abi_long do_ipc(CPUArchState *cpu_env,
4597                        unsigned int call, abi_long first,
4598                        abi_long second, abi_long third,
4599                        abi_long ptr, abi_long fifth)
4600 {
4601     int version;
4602     abi_long ret = 0;
4603 
4604     version = call >> 16;
4605     call &= 0xffff;
4606 
4607     switch (call) {
4608     case IPCOP_semop:
4609         ret = do_semtimedop(first, ptr, second, 0, false);
4610         break;
4611     case IPCOP_semtimedop:
4612     /*
4613      * The s390 sys_ipc variant has only five parameters instead of six
4614      * (as for default variant) and the only difference is the handling of
4615      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4616      * to a struct timespec where the generic variant uses fifth parameter.
4617      */
4618 #if defined(TARGET_S390X)
4619         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4620 #else
4621         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4622 #endif
4623         break;
4624 
4625     case IPCOP_semget:
4626         ret = get_errno(semget(first, second, third));
4627         break;
4628 
4629     case IPCOP_semctl: {
4630         /* The semun argument to semctl is passed by value, so dereference the
4631          * ptr argument. */
4632         abi_ulong atptr;
4633         get_user_ual(atptr, ptr);
4634         ret = do_semctl(first, second, third, atptr);
4635         break;
4636     }
4637 
4638     case IPCOP_msgget:
4639         ret = get_errno(msgget(first, second));
4640         break;
4641 
4642     case IPCOP_msgsnd:
4643         ret = do_msgsnd(first, ptr, second, third);
4644         break;
4645 
4646     case IPCOP_msgctl:
4647         ret = do_msgctl(first, second, ptr);
4648         break;
4649 
4650     case IPCOP_msgrcv:
4651         switch (version) {
4652         case 0:
4653             {
4654                 struct target_ipc_kludge {
4655                     abi_long msgp;
4656                     abi_long msgtyp;
4657                 } *tmp;
4658 
4659                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4660                     ret = -TARGET_EFAULT;
4661                     break;
4662                 }
4663 
4664                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4665 
4666                 unlock_user_struct(tmp, ptr, 0);
4667                 break;
4668             }
4669         default:
4670             ret = do_msgrcv(first, ptr, second, fifth, third);
4671         }
4672         break;
4673 
4674     case IPCOP_shmat:
4675         switch (version) {
4676         default:
4677         {
4678             abi_ulong raddr;
4679             raddr = do_shmat(cpu_env, first, ptr, second);
4680             if (is_error(raddr))
4681                 return get_errno(raddr);
4682             if (put_user_ual(raddr, third))
4683                 return -TARGET_EFAULT;
4684             break;
4685         }
4686         case 1:
4687             ret = -TARGET_EINVAL;
4688             break;
4689         }
4690 	break;
4691     case IPCOP_shmdt:
4692         ret = do_shmdt(ptr);
4693 	break;
4694 
4695     case IPCOP_shmget:
4696 	/* IPC_* flag values are the same on all linux platforms */
4697 	ret = get_errno(shmget(first, second, third));
4698 	break;
4699 
4700 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4701     case IPCOP_shmctl:
4702         ret = do_shmctl(first, second, ptr);
4703         break;
4704     default:
4705         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4706                       call, version);
4707 	ret = -TARGET_ENOSYS;
4708 	break;
4709     }
4710     return ret;
4711 }
4712 #endif
4713 
4714 /* kernel structure types definitions */
4715 
4716 #define STRUCT(name, ...) STRUCT_ ## name,
4717 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4718 enum {
4719 #include "syscall_types.h"
4720 STRUCT_MAX
4721 };
4722 #undef STRUCT
4723 #undef STRUCT_SPECIAL
4724 
4725 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4726 #define STRUCT_SPECIAL(name)
4727 #include "syscall_types.h"
4728 #undef STRUCT
4729 #undef STRUCT_SPECIAL
4730 
4731 #define MAX_STRUCT_SIZE 4096
4732 
4733 #ifdef CONFIG_FIEMAP
4734 /* So fiemap access checks don't overflow on 32 bit systems.
4735  * This is very slightly smaller than the limit imposed by
4736  * the underlying kernel.
4737  */
4738 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4739                             / sizeof(struct fiemap_extent))
4740 
4741 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4742                                        int fd, int cmd, abi_long arg)
4743 {
4744     /* The parameter for this ioctl is a struct fiemap followed
4745      * by an array of struct fiemap_extent whose size is set
4746      * in fiemap->fm_extent_count. The array is filled in by the
4747      * ioctl.
4748      */
4749     int target_size_in, target_size_out;
4750     struct fiemap *fm;
4751     const argtype *arg_type = ie->arg_type;
4752     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4753     void *argptr, *p;
4754     abi_long ret;
4755     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4756     uint32_t outbufsz;
4757     int free_fm = 0;
4758 
4759     assert(arg_type[0] == TYPE_PTR);
4760     assert(ie->access == IOC_RW);
4761     arg_type++;
4762     target_size_in = thunk_type_size(arg_type, 0);
4763     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4764     if (!argptr) {
4765         return -TARGET_EFAULT;
4766     }
4767     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4768     unlock_user(argptr, arg, 0);
4769     fm = (struct fiemap *)buf_temp;
4770     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4771         return -TARGET_EINVAL;
4772     }
4773 
4774     outbufsz = sizeof (*fm) +
4775         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4776 
4777     if (outbufsz > MAX_STRUCT_SIZE) {
4778         /* We can't fit all the extents into the fixed size buffer.
4779          * Allocate one that is large enough and use it instead.
4780          */
4781         fm = g_try_malloc(outbufsz);
4782         if (!fm) {
4783             return -TARGET_ENOMEM;
4784         }
4785         memcpy(fm, buf_temp, sizeof(struct fiemap));
4786         free_fm = 1;
4787     }
4788     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4789     if (!is_error(ret)) {
4790         target_size_out = target_size_in;
4791         /* An extent_count of 0 means we were only counting the extents
4792          * so there are no structs to copy
4793          */
4794         if (fm->fm_extent_count != 0) {
4795             target_size_out += fm->fm_mapped_extents * extent_size;
4796         }
4797         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4798         if (!argptr) {
4799             ret = -TARGET_EFAULT;
4800         } else {
4801             /* Convert the struct fiemap */
4802             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4803             if (fm->fm_extent_count != 0) {
4804                 p = argptr + target_size_in;
4805                 /* ...and then all the struct fiemap_extents */
4806                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4807                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4808                                   THUNK_TARGET);
4809                     p += extent_size;
4810                 }
4811             }
4812             unlock_user(argptr, arg, target_size_out);
4813         }
4814     }
4815     if (free_fm) {
4816         g_free(fm);
4817     }
4818     return ret;
4819 }
4820 #endif
4821 
4822 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4823                                 int fd, int cmd, abi_long arg)
4824 {
4825     const argtype *arg_type = ie->arg_type;
4826     int target_size;
4827     void *argptr;
4828     int ret;
4829     struct ifconf *host_ifconf;
4830     uint32_t outbufsz;
4831     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4832     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4833     int target_ifreq_size;
4834     int nb_ifreq;
4835     int free_buf = 0;
4836     int i;
4837     int target_ifc_len;
4838     abi_long target_ifc_buf;
4839     int host_ifc_len;
4840     char *host_ifc_buf;
4841 
4842     assert(arg_type[0] == TYPE_PTR);
4843     assert(ie->access == IOC_RW);
4844 
4845     arg_type++;
4846     target_size = thunk_type_size(arg_type, 0);
4847 
4848     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4849     if (!argptr)
4850         return -TARGET_EFAULT;
4851     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4852     unlock_user(argptr, arg, 0);
4853 
4854     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4855     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4856     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4857 
4858     if (target_ifc_buf != 0) {
4859         target_ifc_len = host_ifconf->ifc_len;
4860         nb_ifreq = target_ifc_len / target_ifreq_size;
4861         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4862 
4863         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4864         if (outbufsz > MAX_STRUCT_SIZE) {
4865             /*
4866              * We can't fit all the extents into the fixed size buffer.
4867              * Allocate one that is large enough and use it instead.
4868              */
4869             host_ifconf = g_try_malloc(outbufsz);
4870             if (!host_ifconf) {
4871                 return -TARGET_ENOMEM;
4872             }
4873             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4874             free_buf = 1;
4875         }
4876         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4877 
4878         host_ifconf->ifc_len = host_ifc_len;
4879     } else {
4880       host_ifc_buf = NULL;
4881     }
4882     host_ifconf->ifc_buf = host_ifc_buf;
4883 
4884     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4885     if (!is_error(ret)) {
4886 	/* convert host ifc_len to target ifc_len */
4887 
4888         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4889         target_ifc_len = nb_ifreq * target_ifreq_size;
4890         host_ifconf->ifc_len = target_ifc_len;
4891 
4892 	/* restore target ifc_buf */
4893 
4894         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4895 
4896 	/* copy struct ifconf to target user */
4897 
4898         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4899         if (!argptr)
4900             return -TARGET_EFAULT;
4901         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4902         unlock_user(argptr, arg, target_size);
4903 
4904         if (target_ifc_buf != 0) {
4905             /* copy ifreq[] to target user */
4906             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4907             for (i = 0; i < nb_ifreq ; i++) {
4908                 thunk_convert(argptr + i * target_ifreq_size,
4909                               host_ifc_buf + i * sizeof(struct ifreq),
4910                               ifreq_arg_type, THUNK_TARGET);
4911             }
4912             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4913         }
4914     }
4915 
4916     if (free_buf) {
4917         g_free(host_ifconf);
4918     }
4919 
4920     return ret;
4921 }
4922 
4923 #if defined(CONFIG_USBFS)
4924 #if HOST_LONG_BITS > 64
4925 #error USBDEVFS thunks do not support >64 bit hosts yet.
4926 #endif
4927 struct live_urb {
4928     uint64_t target_urb_adr;
4929     uint64_t target_buf_adr;
4930     char *target_buf_ptr;
4931     struct usbdevfs_urb host_urb;
4932 };
4933 
4934 static GHashTable *usbdevfs_urb_hashtable(void)
4935 {
4936     static GHashTable *urb_hashtable;
4937 
4938     if (!urb_hashtable) {
4939         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4940     }
4941     return urb_hashtable;
4942 }
4943 
4944 static void urb_hashtable_insert(struct live_urb *urb)
4945 {
4946     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4947     g_hash_table_insert(urb_hashtable, urb, urb);
4948 }
4949 
4950 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4951 {
4952     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4953     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4954 }
4955 
4956 static void urb_hashtable_remove(struct live_urb *urb)
4957 {
4958     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4959     g_hash_table_remove(urb_hashtable, urb);
4960 }
4961 
4962 static abi_long
4963 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4964                           int fd, int cmd, abi_long arg)
4965 {
4966     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4967     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4968     struct live_urb *lurb;
4969     void *argptr;
4970     uint64_t hurb;
4971     int target_size;
4972     uintptr_t target_urb_adr;
4973     abi_long ret;
4974 
4975     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4976 
4977     memset(buf_temp, 0, sizeof(uint64_t));
4978     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4979     if (is_error(ret)) {
4980         return ret;
4981     }
4982 
4983     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4984     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4985     if (!lurb->target_urb_adr) {
4986         return -TARGET_EFAULT;
4987     }
4988     urb_hashtable_remove(lurb);
4989     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4990         lurb->host_urb.buffer_length);
4991     lurb->target_buf_ptr = NULL;
4992 
4993     /* restore the guest buffer pointer */
4994     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4995 
4996     /* update the guest urb struct */
4997     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4998     if (!argptr) {
4999         g_free(lurb);
5000         return -TARGET_EFAULT;
5001     }
5002     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5003     unlock_user(argptr, lurb->target_urb_adr, target_size);
5004 
5005     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5006     /* write back the urb handle */
5007     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5008     if (!argptr) {
5009         g_free(lurb);
5010         return -TARGET_EFAULT;
5011     }
5012 
5013     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5014     target_urb_adr = lurb->target_urb_adr;
5015     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5016     unlock_user(argptr, arg, target_size);
5017 
5018     g_free(lurb);
5019     return ret;
5020 }
5021 
5022 static abi_long
5023 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5024                              uint8_t *buf_temp __attribute__((unused)),
5025                              int fd, int cmd, abi_long arg)
5026 {
5027     struct live_urb *lurb;
5028 
5029     /* map target address back to host URB with metadata. */
5030     lurb = urb_hashtable_lookup(arg);
5031     if (!lurb) {
5032         return -TARGET_EFAULT;
5033     }
5034     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5035 }
5036 
5037 static abi_long
5038 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5039                             int fd, int cmd, abi_long arg)
5040 {
5041     const argtype *arg_type = ie->arg_type;
5042     int target_size;
5043     abi_long ret;
5044     void *argptr;
5045     int rw_dir;
5046     struct live_urb *lurb;
5047 
5048     /*
5049      * each submitted URB needs to map to a unique ID for the
5050      * kernel, and that unique ID needs to be a pointer to
5051      * host memory.  hence, we need to malloc for each URB.
5052      * isochronous transfers have a variable length struct.
5053      */
5054     arg_type++;
5055     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5056 
5057     /* construct host copy of urb and metadata */
5058     lurb = g_try_new0(struct live_urb, 1);
5059     if (!lurb) {
5060         return -TARGET_ENOMEM;
5061     }
5062 
5063     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5064     if (!argptr) {
5065         g_free(lurb);
5066         return -TARGET_EFAULT;
5067     }
5068     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5069     unlock_user(argptr, arg, 0);
5070 
5071     lurb->target_urb_adr = arg;
5072     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5073 
5074     /* buffer space used depends on endpoint type so lock the entire buffer */
5075     /* control type urbs should check the buffer contents for true direction */
5076     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5077     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5078         lurb->host_urb.buffer_length, 1);
5079     if (lurb->target_buf_ptr == NULL) {
5080         g_free(lurb);
5081         return -TARGET_EFAULT;
5082     }
5083 
5084     /* update buffer pointer in host copy */
5085     lurb->host_urb.buffer = lurb->target_buf_ptr;
5086 
5087     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5088     if (is_error(ret)) {
5089         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5090         g_free(lurb);
5091     } else {
5092         urb_hashtable_insert(lurb);
5093     }
5094 
5095     return ret;
5096 }
5097 #endif /* CONFIG_USBFS */
5098 
5099 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5100                             int cmd, abi_long arg)
5101 {
5102     void *argptr;
5103     struct dm_ioctl *host_dm;
5104     abi_long guest_data;
5105     uint32_t guest_data_size;
5106     int target_size;
5107     const argtype *arg_type = ie->arg_type;
5108     abi_long ret;
5109     void *big_buf = NULL;
5110     char *host_data;
5111 
5112     arg_type++;
5113     target_size = thunk_type_size(arg_type, 0);
5114     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5115     if (!argptr) {
5116         ret = -TARGET_EFAULT;
5117         goto out;
5118     }
5119     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5120     unlock_user(argptr, arg, 0);
5121 
5122     /* buf_temp is too small, so fetch things into a bigger buffer */
5123     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5124     memcpy(big_buf, buf_temp, target_size);
5125     buf_temp = big_buf;
5126     host_dm = big_buf;
5127 
5128     guest_data = arg + host_dm->data_start;
5129     if ((guest_data - arg) < 0) {
5130         ret = -TARGET_EINVAL;
5131         goto out;
5132     }
5133     guest_data_size = host_dm->data_size - host_dm->data_start;
5134     host_data = (char*)host_dm + host_dm->data_start;
5135 
5136     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5137     if (!argptr) {
5138         ret = -TARGET_EFAULT;
5139         goto out;
5140     }
5141 
5142     switch (ie->host_cmd) {
5143     case DM_REMOVE_ALL:
5144     case DM_LIST_DEVICES:
5145     case DM_DEV_CREATE:
5146     case DM_DEV_REMOVE:
5147     case DM_DEV_SUSPEND:
5148     case DM_DEV_STATUS:
5149     case DM_DEV_WAIT:
5150     case DM_TABLE_STATUS:
5151     case DM_TABLE_CLEAR:
5152     case DM_TABLE_DEPS:
5153     case DM_LIST_VERSIONS:
5154         /* no input data */
5155         break;
5156     case DM_DEV_RENAME:
5157     case DM_DEV_SET_GEOMETRY:
5158         /* data contains only strings */
5159         memcpy(host_data, argptr, guest_data_size);
5160         break;
5161     case DM_TARGET_MSG:
5162         memcpy(host_data, argptr, guest_data_size);
5163         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5164         break;
5165     case DM_TABLE_LOAD:
5166     {
5167         void *gspec = argptr;
5168         void *cur_data = host_data;
5169         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5170         int spec_size = thunk_type_size(arg_type, 0);
5171         int i;
5172 
5173         for (i = 0; i < host_dm->target_count; i++) {
5174             struct dm_target_spec *spec = cur_data;
5175             uint32_t next;
5176             int slen;
5177 
5178             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5179             slen = strlen((char*)gspec + spec_size) + 1;
5180             next = spec->next;
5181             spec->next = sizeof(*spec) + slen;
5182             strcpy((char*)&spec[1], gspec + spec_size);
5183             gspec += next;
5184             cur_data += spec->next;
5185         }
5186         break;
5187     }
5188     default:
5189         ret = -TARGET_EINVAL;
5190         unlock_user(argptr, guest_data, 0);
5191         goto out;
5192     }
5193     unlock_user(argptr, guest_data, 0);
5194 
5195     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5196     if (!is_error(ret)) {
5197         guest_data = arg + host_dm->data_start;
5198         guest_data_size = host_dm->data_size - host_dm->data_start;
5199         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5200         switch (ie->host_cmd) {
5201         case DM_REMOVE_ALL:
5202         case DM_DEV_CREATE:
5203         case DM_DEV_REMOVE:
5204         case DM_DEV_RENAME:
5205         case DM_DEV_SUSPEND:
5206         case DM_DEV_STATUS:
5207         case DM_TABLE_LOAD:
5208         case DM_TABLE_CLEAR:
5209         case DM_TARGET_MSG:
5210         case DM_DEV_SET_GEOMETRY:
5211             /* no return data */
5212             break;
5213         case DM_LIST_DEVICES:
5214         {
5215             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5216             uint32_t remaining_data = guest_data_size;
5217             void *cur_data = argptr;
5218             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5219             int nl_size = 12; /* can't use thunk_size due to alignment */
5220 
5221             while (1) {
5222                 uint32_t next = nl->next;
5223                 if (next) {
5224                     nl->next = nl_size + (strlen(nl->name) + 1);
5225                 }
5226                 if (remaining_data < nl->next) {
5227                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5228                     break;
5229                 }
5230                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5231                 strcpy(cur_data + nl_size, nl->name);
5232                 cur_data += nl->next;
5233                 remaining_data -= nl->next;
5234                 if (!next) {
5235                     break;
5236                 }
5237                 nl = (void*)nl + next;
5238             }
5239             break;
5240         }
5241         case DM_DEV_WAIT:
5242         case DM_TABLE_STATUS:
5243         {
5244             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5245             void *cur_data = argptr;
5246             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5247             int spec_size = thunk_type_size(arg_type, 0);
5248             int i;
5249 
5250             for (i = 0; i < host_dm->target_count; i++) {
5251                 uint32_t next = spec->next;
5252                 int slen = strlen((char*)&spec[1]) + 1;
5253                 spec->next = (cur_data - argptr) + spec_size + slen;
5254                 if (guest_data_size < spec->next) {
5255                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5256                     break;
5257                 }
5258                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5259                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5260                 cur_data = argptr + spec->next;
5261                 spec = (void*)host_dm + host_dm->data_start + next;
5262             }
5263             break;
5264         }
5265         case DM_TABLE_DEPS:
5266         {
5267             void *hdata = (void*)host_dm + host_dm->data_start;
5268             int count = *(uint32_t*)hdata;
5269             uint64_t *hdev = hdata + 8;
5270             uint64_t *gdev = argptr + 8;
5271             int i;
5272 
5273             *(uint32_t*)argptr = tswap32(count);
5274             for (i = 0; i < count; i++) {
5275                 *gdev = tswap64(*hdev);
5276                 gdev++;
5277                 hdev++;
5278             }
5279             break;
5280         }
5281         case DM_LIST_VERSIONS:
5282         {
5283             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5284             uint32_t remaining_data = guest_data_size;
5285             void *cur_data = argptr;
5286             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5287             int vers_size = thunk_type_size(arg_type, 0);
5288 
5289             while (1) {
5290                 uint32_t next = vers->next;
5291                 if (next) {
5292                     vers->next = vers_size + (strlen(vers->name) + 1);
5293                 }
5294                 if (remaining_data < vers->next) {
5295                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5296                     break;
5297                 }
5298                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5299                 strcpy(cur_data + vers_size, vers->name);
5300                 cur_data += vers->next;
5301                 remaining_data -= vers->next;
5302                 if (!next) {
5303                     break;
5304                 }
5305                 vers = (void*)vers + next;
5306             }
5307             break;
5308         }
5309         default:
5310             unlock_user(argptr, guest_data, 0);
5311             ret = -TARGET_EINVAL;
5312             goto out;
5313         }
5314         unlock_user(argptr, guest_data, guest_data_size);
5315 
5316         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5317         if (!argptr) {
5318             ret = -TARGET_EFAULT;
5319             goto out;
5320         }
5321         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5322         unlock_user(argptr, arg, target_size);
5323     }
5324 out:
5325     g_free(big_buf);
5326     return ret;
5327 }
5328 
5329 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5330                                int cmd, abi_long arg)
5331 {
5332     void *argptr;
5333     int target_size;
5334     const argtype *arg_type = ie->arg_type;
5335     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5336     abi_long ret;
5337 
5338     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5339     struct blkpg_partition host_part;
5340 
5341     /* Read and convert blkpg */
5342     arg_type++;
5343     target_size = thunk_type_size(arg_type, 0);
5344     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5345     if (!argptr) {
5346         ret = -TARGET_EFAULT;
5347         goto out;
5348     }
5349     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5350     unlock_user(argptr, arg, 0);
5351 
5352     switch (host_blkpg->op) {
5353     case BLKPG_ADD_PARTITION:
5354     case BLKPG_DEL_PARTITION:
5355         /* payload is struct blkpg_partition */
5356         break;
5357     default:
5358         /* Unknown opcode */
5359         ret = -TARGET_EINVAL;
5360         goto out;
5361     }
5362 
5363     /* Read and convert blkpg->data */
5364     arg = (abi_long)(uintptr_t)host_blkpg->data;
5365     target_size = thunk_type_size(part_arg_type, 0);
5366     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5367     if (!argptr) {
5368         ret = -TARGET_EFAULT;
5369         goto out;
5370     }
5371     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5372     unlock_user(argptr, arg, 0);
5373 
5374     /* Swizzle the data pointer to our local copy and call! */
5375     host_blkpg->data = &host_part;
5376     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5377 
5378 out:
5379     return ret;
5380 }
5381 
5382 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5383                                 int fd, int cmd, abi_long arg)
5384 {
5385     const argtype *arg_type = ie->arg_type;
5386     const StructEntry *se;
5387     const argtype *field_types;
5388     const int *dst_offsets, *src_offsets;
5389     int target_size;
5390     void *argptr;
5391     abi_ulong *target_rt_dev_ptr = NULL;
5392     unsigned long *host_rt_dev_ptr = NULL;
5393     abi_long ret;
5394     int i;
5395 
5396     assert(ie->access == IOC_W);
5397     assert(*arg_type == TYPE_PTR);
5398     arg_type++;
5399     assert(*arg_type == TYPE_STRUCT);
5400     target_size = thunk_type_size(arg_type, 0);
5401     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5402     if (!argptr) {
5403         return -TARGET_EFAULT;
5404     }
5405     arg_type++;
5406     assert(*arg_type == (int)STRUCT_rtentry);
5407     se = struct_entries + *arg_type++;
5408     assert(se->convert[0] == NULL);
5409     /* convert struct here to be able to catch rt_dev string */
5410     field_types = se->field_types;
5411     dst_offsets = se->field_offsets[THUNK_HOST];
5412     src_offsets = se->field_offsets[THUNK_TARGET];
5413     for (i = 0; i < se->nb_fields; i++) {
5414         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5415             assert(*field_types == TYPE_PTRVOID);
5416             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5417             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5418             if (*target_rt_dev_ptr != 0) {
5419                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5420                                                   tswapal(*target_rt_dev_ptr));
5421                 if (!*host_rt_dev_ptr) {
5422                     unlock_user(argptr, arg, 0);
5423                     return -TARGET_EFAULT;
5424                 }
5425             } else {
5426                 *host_rt_dev_ptr = 0;
5427             }
5428             field_types++;
5429             continue;
5430         }
5431         field_types = thunk_convert(buf_temp + dst_offsets[i],
5432                                     argptr + src_offsets[i],
5433                                     field_types, THUNK_HOST);
5434     }
5435     unlock_user(argptr, arg, 0);
5436 
5437     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5438 
5439     assert(host_rt_dev_ptr != NULL);
5440     assert(target_rt_dev_ptr != NULL);
5441     if (*host_rt_dev_ptr != 0) {
5442         unlock_user((void *)*host_rt_dev_ptr,
5443                     *target_rt_dev_ptr, 0);
5444     }
5445     return ret;
5446 }
5447 
5448 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5449                                      int fd, int cmd, abi_long arg)
5450 {
5451     int sig = target_to_host_signal(arg);
5452     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5453 }
5454 
5455 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5456                                     int fd, int cmd, abi_long arg)
5457 {
5458     struct timeval tv;
5459     abi_long ret;
5460 
5461     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5462     if (is_error(ret)) {
5463         return ret;
5464     }
5465 
5466     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5467         if (copy_to_user_timeval(arg, &tv)) {
5468             return -TARGET_EFAULT;
5469         }
5470     } else {
5471         if (copy_to_user_timeval64(arg, &tv)) {
5472             return -TARGET_EFAULT;
5473         }
5474     }
5475 
5476     return ret;
5477 }
5478 
5479 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5480                                       int fd, int cmd, abi_long arg)
5481 {
5482     struct timespec ts;
5483     abi_long ret;
5484 
5485     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5486     if (is_error(ret)) {
5487         return ret;
5488     }
5489 
5490     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5491         if (host_to_target_timespec(arg, &ts)) {
5492             return -TARGET_EFAULT;
5493         }
5494     } else{
5495         if (host_to_target_timespec64(arg, &ts)) {
5496             return -TARGET_EFAULT;
5497         }
5498     }
5499 
5500     return ret;
5501 }
5502 
5503 #ifdef TIOCGPTPEER
5504 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5505                                      int fd, int cmd, abi_long arg)
5506 {
5507     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5508     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5509 }
5510 #endif
5511 
5512 #ifdef HAVE_DRM_H
5513 
5514 static void unlock_drm_version(struct drm_version *host_ver,
5515                                struct target_drm_version *target_ver,
5516                                bool copy)
5517 {
5518     unlock_user(host_ver->name, target_ver->name,
5519                                 copy ? host_ver->name_len : 0);
5520     unlock_user(host_ver->date, target_ver->date,
5521                                 copy ? host_ver->date_len : 0);
5522     unlock_user(host_ver->desc, target_ver->desc,
5523                                 copy ? host_ver->desc_len : 0);
5524 }
5525 
5526 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5527                                           struct target_drm_version *target_ver)
5528 {
5529     memset(host_ver, 0, sizeof(*host_ver));
5530 
5531     __get_user(host_ver->name_len, &target_ver->name_len);
5532     if (host_ver->name_len) {
5533         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5534                                    target_ver->name_len, 0);
5535         if (!host_ver->name) {
5536             return -EFAULT;
5537         }
5538     }
5539 
5540     __get_user(host_ver->date_len, &target_ver->date_len);
5541     if (host_ver->date_len) {
5542         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5543                                    target_ver->date_len, 0);
5544         if (!host_ver->date) {
5545             goto err;
5546         }
5547     }
5548 
5549     __get_user(host_ver->desc_len, &target_ver->desc_len);
5550     if (host_ver->desc_len) {
5551         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5552                                    target_ver->desc_len, 0);
5553         if (!host_ver->desc) {
5554             goto err;
5555         }
5556     }
5557 
5558     return 0;
5559 err:
5560     unlock_drm_version(host_ver, target_ver, false);
5561     return -EFAULT;
5562 }
5563 
5564 static inline void host_to_target_drmversion(
5565                                           struct target_drm_version *target_ver,
5566                                           struct drm_version *host_ver)
5567 {
5568     __put_user(host_ver->version_major, &target_ver->version_major);
5569     __put_user(host_ver->version_minor, &target_ver->version_minor);
5570     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5571     __put_user(host_ver->name_len, &target_ver->name_len);
5572     __put_user(host_ver->date_len, &target_ver->date_len);
5573     __put_user(host_ver->desc_len, &target_ver->desc_len);
5574     unlock_drm_version(host_ver, target_ver, true);
5575 }
5576 
5577 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5578                              int fd, int cmd, abi_long arg)
5579 {
5580     struct drm_version *ver;
5581     struct target_drm_version *target_ver;
5582     abi_long ret;
5583 
5584     switch (ie->host_cmd) {
5585     case DRM_IOCTL_VERSION:
5586         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5587             return -TARGET_EFAULT;
5588         }
5589         ver = (struct drm_version *)buf_temp;
5590         ret = target_to_host_drmversion(ver, target_ver);
5591         if (!is_error(ret)) {
5592             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5593             if (is_error(ret)) {
5594                 unlock_drm_version(ver, target_ver, false);
5595             } else {
5596                 host_to_target_drmversion(target_ver, ver);
5597             }
5598         }
5599         unlock_user_struct(target_ver, arg, 0);
5600         return ret;
5601     }
5602     return -TARGET_ENOSYS;
5603 }
5604 
5605 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5606                                            struct drm_i915_getparam *gparam,
5607                                            int fd, abi_long arg)
5608 {
5609     abi_long ret;
5610     int value;
5611     struct target_drm_i915_getparam *target_gparam;
5612 
5613     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5614         return -TARGET_EFAULT;
5615     }
5616 
5617     __get_user(gparam->param, &target_gparam->param);
5618     gparam->value = &value;
5619     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5620     put_user_s32(value, target_gparam->value);
5621 
5622     unlock_user_struct(target_gparam, arg, 0);
5623     return ret;
5624 }
5625 
5626 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5627                                   int fd, int cmd, abi_long arg)
5628 {
5629     switch (ie->host_cmd) {
5630     case DRM_IOCTL_I915_GETPARAM:
5631         return do_ioctl_drm_i915_getparam(ie,
5632                                           (struct drm_i915_getparam *)buf_temp,
5633                                           fd, arg);
5634     default:
5635         return -TARGET_ENOSYS;
5636     }
5637 }
5638 
5639 #endif
5640 
5641 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5642                                         int fd, int cmd, abi_long arg)
5643 {
5644     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5645     struct tun_filter *target_filter;
5646     char *target_addr;
5647 
5648     assert(ie->access == IOC_W);
5649 
5650     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5651     if (!target_filter) {
5652         return -TARGET_EFAULT;
5653     }
5654     filter->flags = tswap16(target_filter->flags);
5655     filter->count = tswap16(target_filter->count);
5656     unlock_user(target_filter, arg, 0);
5657 
5658     if (filter->count) {
5659         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5660             MAX_STRUCT_SIZE) {
5661             return -TARGET_EFAULT;
5662         }
5663 
5664         target_addr = lock_user(VERIFY_READ,
5665                                 arg + offsetof(struct tun_filter, addr),
5666                                 filter->count * ETH_ALEN, 1);
5667         if (!target_addr) {
5668             return -TARGET_EFAULT;
5669         }
5670         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5671         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5672     }
5673 
5674     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5675 }
5676 
5677 IOCTLEntry ioctl_entries[] = {
5678 #define IOCTL(cmd, access, ...) \
5679     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5680 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5681     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5682 #define IOCTL_IGNORE(cmd) \
5683     { TARGET_ ## cmd, 0, #cmd },
5684 #include "ioctls.h"
5685     { 0, 0, },
5686 };
5687 
5688 /* ??? Implement proper locking for ioctls.  */
5689 /* do_ioctl() Must return target values and target errnos. */
5690 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5691 {
5692     const IOCTLEntry *ie;
5693     const argtype *arg_type;
5694     abi_long ret;
5695     uint8_t buf_temp[MAX_STRUCT_SIZE];
5696     int target_size;
5697     void *argptr;
5698 
5699     ie = ioctl_entries;
5700     for(;;) {
5701         if (ie->target_cmd == 0) {
5702             qemu_log_mask(
5703                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5704             return -TARGET_ENOSYS;
5705         }
5706         if (ie->target_cmd == cmd)
5707             break;
5708         ie++;
5709     }
5710     arg_type = ie->arg_type;
5711     if (ie->do_ioctl) {
5712         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5713     } else if (!ie->host_cmd) {
5714         /* Some architectures define BSD ioctls in their headers
5715            that are not implemented in Linux.  */
5716         return -TARGET_ENOSYS;
5717     }
5718 
5719     switch(arg_type[0]) {
5720     case TYPE_NULL:
5721         /* no argument */
5722         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5723         break;
5724     case TYPE_PTRVOID:
5725     case TYPE_INT:
5726     case TYPE_LONG:
5727     case TYPE_ULONG:
5728         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5729         break;
5730     case TYPE_PTR:
5731         arg_type++;
5732         target_size = thunk_type_size(arg_type, 0);
5733         switch(ie->access) {
5734         case IOC_R:
5735             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5736             if (!is_error(ret)) {
5737                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5738                 if (!argptr)
5739                     return -TARGET_EFAULT;
5740                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5741                 unlock_user(argptr, arg, target_size);
5742             }
5743             break;
5744         case IOC_W:
5745             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5746             if (!argptr)
5747                 return -TARGET_EFAULT;
5748             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5749             unlock_user(argptr, arg, 0);
5750             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5751             break;
5752         default:
5753         case IOC_RW:
5754             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5755             if (!argptr)
5756                 return -TARGET_EFAULT;
5757             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5758             unlock_user(argptr, arg, 0);
5759             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5760             if (!is_error(ret)) {
5761                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5762                 if (!argptr)
5763                     return -TARGET_EFAULT;
5764                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5765                 unlock_user(argptr, arg, target_size);
5766             }
5767             break;
5768         }
5769         break;
5770     default:
5771         qemu_log_mask(LOG_UNIMP,
5772                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5773                       (long)cmd, arg_type[0]);
5774         ret = -TARGET_ENOSYS;
5775         break;
5776     }
5777     return ret;
5778 }
5779 
5780 static const bitmask_transtbl iflag_tbl[] = {
5781         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5782         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5783         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5784         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5785         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5786         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5787         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5788         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5789         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5790         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5791         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5792         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5793         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5794         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5795         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5796         { 0, 0, 0, 0 }
5797 };
5798 
5799 static const bitmask_transtbl oflag_tbl[] = {
5800 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5801 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5802 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5803 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5804 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5805 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5806 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5807 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5808 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5809 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5810 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5811 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5812 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5813 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5814 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5815 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5816 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5817 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5818 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5819 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5820 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5821 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5822 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5823 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5824 	{ 0, 0, 0, 0 }
5825 };
5826 
5827 static const bitmask_transtbl cflag_tbl[] = {
5828 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5829 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5830 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5831 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5832 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5833 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5834 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5835 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5836 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5837 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5838 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5839 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5840 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5841 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5842 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5843 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5844 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5845 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5846 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5847 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5848 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5849 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5850 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5851 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5852 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5853 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5854 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5855 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5856 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5857 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5858 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5859 	{ 0, 0, 0, 0 }
5860 };
5861 
5862 static const bitmask_transtbl lflag_tbl[] = {
5863   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5864   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5865   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5866   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5867   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5868   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5869   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5870   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5871   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5872   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5873   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5874   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5875   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5876   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5877   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5878   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5879   { 0, 0, 0, 0 }
5880 };
5881 
5882 static void target_to_host_termios (void *dst, const void *src)
5883 {
5884     struct host_termios *host = dst;
5885     const struct target_termios *target = src;
5886 
5887     host->c_iflag =
5888         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5889     host->c_oflag =
5890         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5891     host->c_cflag =
5892         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5893     host->c_lflag =
5894         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5895     host->c_line = target->c_line;
5896 
5897     memset(host->c_cc, 0, sizeof(host->c_cc));
5898     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5899     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5900     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5901     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5902     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5903     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5904     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5905     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5906     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5907     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5908     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5909     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5910     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5911     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5912     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5913     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5914     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5915 }
5916 
5917 static void host_to_target_termios (void *dst, const void *src)
5918 {
5919     struct target_termios *target = dst;
5920     const struct host_termios *host = src;
5921 
5922     target->c_iflag =
5923         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5924     target->c_oflag =
5925         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5926     target->c_cflag =
5927         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5928     target->c_lflag =
5929         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5930     target->c_line = host->c_line;
5931 
5932     memset(target->c_cc, 0, sizeof(target->c_cc));
5933     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5934     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5935     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5936     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5937     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5938     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5939     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5940     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5941     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5942     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5943     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5944     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5945     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5946     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5947     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5948     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5949     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5950 }
5951 
5952 static const StructEntry struct_termios_def = {
5953     .convert = { host_to_target_termios, target_to_host_termios },
5954     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5955     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5956     .print = print_termios,
5957 };
5958 
5959 static const bitmask_transtbl mmap_flags_tbl[] = {
5960     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5961     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5962     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5963     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5964       MAP_ANONYMOUS, MAP_ANONYMOUS },
5965     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5966       MAP_GROWSDOWN, MAP_GROWSDOWN },
5967     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5968       MAP_DENYWRITE, MAP_DENYWRITE },
5969     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5970       MAP_EXECUTABLE, MAP_EXECUTABLE },
5971     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5972     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5973       MAP_NORESERVE, MAP_NORESERVE },
5974     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5975     /* MAP_STACK had been ignored by the kernel for quite some time.
5976        Recognize it for the target insofar as we do not want to pass
5977        it through to the host.  */
5978     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5979     { 0, 0, 0, 0 }
5980 };
5981 
5982 /*
5983  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5984  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5985  */
5986 #if defined(TARGET_I386)
5987 
5988 /* NOTE: there is really one LDT for all the threads */
5989 static uint8_t *ldt_table;
5990 
5991 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5992 {
5993     int size;
5994     void *p;
5995 
5996     if (!ldt_table)
5997         return 0;
5998     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5999     if (size > bytecount)
6000         size = bytecount;
6001     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6002     if (!p)
6003         return -TARGET_EFAULT;
6004     /* ??? Should this by byteswapped?  */
6005     memcpy(p, ldt_table, size);
6006     unlock_user(p, ptr, size);
6007     return size;
6008 }
6009 
6010 /* XXX: add locking support */
6011 static abi_long write_ldt(CPUX86State *env,
6012                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6013 {
6014     struct target_modify_ldt_ldt_s ldt_info;
6015     struct target_modify_ldt_ldt_s *target_ldt_info;
6016     int seg_32bit, contents, read_exec_only, limit_in_pages;
6017     int seg_not_present, useable, lm;
6018     uint32_t *lp, entry_1, entry_2;
6019 
6020     if (bytecount != sizeof(ldt_info))
6021         return -TARGET_EINVAL;
6022     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6023         return -TARGET_EFAULT;
6024     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6025     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6026     ldt_info.limit = tswap32(target_ldt_info->limit);
6027     ldt_info.flags = tswap32(target_ldt_info->flags);
6028     unlock_user_struct(target_ldt_info, ptr, 0);
6029 
6030     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6031         return -TARGET_EINVAL;
6032     seg_32bit = ldt_info.flags & 1;
6033     contents = (ldt_info.flags >> 1) & 3;
6034     read_exec_only = (ldt_info.flags >> 3) & 1;
6035     limit_in_pages = (ldt_info.flags >> 4) & 1;
6036     seg_not_present = (ldt_info.flags >> 5) & 1;
6037     useable = (ldt_info.flags >> 6) & 1;
6038 #ifdef TARGET_ABI32
6039     lm = 0;
6040 #else
6041     lm = (ldt_info.flags >> 7) & 1;
6042 #endif
6043     if (contents == 3) {
6044         if (oldmode)
6045             return -TARGET_EINVAL;
6046         if (seg_not_present == 0)
6047             return -TARGET_EINVAL;
6048     }
6049     /* allocate the LDT */
6050     if (!ldt_table) {
6051         env->ldt.base = target_mmap(0,
6052                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6053                                     PROT_READ|PROT_WRITE,
6054                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6055         if (env->ldt.base == -1)
6056             return -TARGET_ENOMEM;
6057         memset(g2h_untagged(env->ldt.base), 0,
6058                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6059         env->ldt.limit = 0xffff;
6060         ldt_table = g2h_untagged(env->ldt.base);
6061     }
6062 
6063     /* NOTE: same code as Linux kernel */
6064     /* Allow LDTs to be cleared by the user. */
6065     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6066         if (oldmode ||
6067             (contents == 0		&&
6068              read_exec_only == 1	&&
6069              seg_32bit == 0		&&
6070              limit_in_pages == 0	&&
6071              seg_not_present == 1	&&
6072              useable == 0 )) {
6073             entry_1 = 0;
6074             entry_2 = 0;
6075             goto install;
6076         }
6077     }
6078 
6079     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6080         (ldt_info.limit & 0x0ffff);
6081     entry_2 = (ldt_info.base_addr & 0xff000000) |
6082         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6083         (ldt_info.limit & 0xf0000) |
6084         ((read_exec_only ^ 1) << 9) |
6085         (contents << 10) |
6086         ((seg_not_present ^ 1) << 15) |
6087         (seg_32bit << 22) |
6088         (limit_in_pages << 23) |
6089         (lm << 21) |
6090         0x7000;
6091     if (!oldmode)
6092         entry_2 |= (useable << 20);
6093 
6094     /* Install the new entry ...  */
6095 install:
6096     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6097     lp[0] = tswap32(entry_1);
6098     lp[1] = tswap32(entry_2);
6099     return 0;
6100 }
6101 
6102 /* specific and weird i386 syscalls */
6103 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6104                               unsigned long bytecount)
6105 {
6106     abi_long ret;
6107 
6108     switch (func) {
6109     case 0:
6110         ret = read_ldt(ptr, bytecount);
6111         break;
6112     case 1:
6113         ret = write_ldt(env, ptr, bytecount, 1);
6114         break;
6115     case 0x11:
6116         ret = write_ldt(env, ptr, bytecount, 0);
6117         break;
6118     default:
6119         ret = -TARGET_ENOSYS;
6120         break;
6121     }
6122     return ret;
6123 }
6124 
6125 #if defined(TARGET_ABI32)
6126 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6127 {
6128     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6129     struct target_modify_ldt_ldt_s ldt_info;
6130     struct target_modify_ldt_ldt_s *target_ldt_info;
6131     int seg_32bit, contents, read_exec_only, limit_in_pages;
6132     int seg_not_present, useable, lm;
6133     uint32_t *lp, entry_1, entry_2;
6134     int i;
6135 
6136     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6137     if (!target_ldt_info)
6138         return -TARGET_EFAULT;
6139     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6140     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6141     ldt_info.limit = tswap32(target_ldt_info->limit);
6142     ldt_info.flags = tswap32(target_ldt_info->flags);
6143     if (ldt_info.entry_number == -1) {
6144         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6145             if (gdt_table[i] == 0) {
6146                 ldt_info.entry_number = i;
6147                 target_ldt_info->entry_number = tswap32(i);
6148                 break;
6149             }
6150         }
6151     }
6152     unlock_user_struct(target_ldt_info, ptr, 1);
6153 
6154     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6155         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6156            return -TARGET_EINVAL;
6157     seg_32bit = ldt_info.flags & 1;
6158     contents = (ldt_info.flags >> 1) & 3;
6159     read_exec_only = (ldt_info.flags >> 3) & 1;
6160     limit_in_pages = (ldt_info.flags >> 4) & 1;
6161     seg_not_present = (ldt_info.flags >> 5) & 1;
6162     useable = (ldt_info.flags >> 6) & 1;
6163 #ifdef TARGET_ABI32
6164     lm = 0;
6165 #else
6166     lm = (ldt_info.flags >> 7) & 1;
6167 #endif
6168 
6169     if (contents == 3) {
6170         if (seg_not_present == 0)
6171             return -TARGET_EINVAL;
6172     }
6173 
6174     /* NOTE: same code as Linux kernel */
6175     /* Allow LDTs to be cleared by the user. */
6176     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6177         if ((contents == 0             &&
6178              read_exec_only == 1       &&
6179              seg_32bit == 0            &&
6180              limit_in_pages == 0       &&
6181              seg_not_present == 1      &&
6182              useable == 0 )) {
6183             entry_1 = 0;
6184             entry_2 = 0;
6185             goto install;
6186         }
6187     }
6188 
6189     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6190         (ldt_info.limit & 0x0ffff);
6191     entry_2 = (ldt_info.base_addr & 0xff000000) |
6192         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6193         (ldt_info.limit & 0xf0000) |
6194         ((read_exec_only ^ 1) << 9) |
6195         (contents << 10) |
6196         ((seg_not_present ^ 1) << 15) |
6197         (seg_32bit << 22) |
6198         (limit_in_pages << 23) |
6199         (useable << 20) |
6200         (lm << 21) |
6201         0x7000;
6202 
6203     /* Install the new entry ...  */
6204 install:
6205     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6206     lp[0] = tswap32(entry_1);
6207     lp[1] = tswap32(entry_2);
6208     return 0;
6209 }
6210 
6211 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6212 {
6213     struct target_modify_ldt_ldt_s *target_ldt_info;
6214     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6215     uint32_t base_addr, limit, flags;
6216     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6217     int seg_not_present, useable, lm;
6218     uint32_t *lp, entry_1, entry_2;
6219 
6220     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6221     if (!target_ldt_info)
6222         return -TARGET_EFAULT;
6223     idx = tswap32(target_ldt_info->entry_number);
6224     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6225         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6226         unlock_user_struct(target_ldt_info, ptr, 1);
6227         return -TARGET_EINVAL;
6228     }
6229     lp = (uint32_t *)(gdt_table + idx);
6230     entry_1 = tswap32(lp[0]);
6231     entry_2 = tswap32(lp[1]);
6232 
6233     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6234     contents = (entry_2 >> 10) & 3;
6235     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6236     seg_32bit = (entry_2 >> 22) & 1;
6237     limit_in_pages = (entry_2 >> 23) & 1;
6238     useable = (entry_2 >> 20) & 1;
6239 #ifdef TARGET_ABI32
6240     lm = 0;
6241 #else
6242     lm = (entry_2 >> 21) & 1;
6243 #endif
6244     flags = (seg_32bit << 0) | (contents << 1) |
6245         (read_exec_only << 3) | (limit_in_pages << 4) |
6246         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6247     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6248     base_addr = (entry_1 >> 16) |
6249         (entry_2 & 0xff000000) |
6250         ((entry_2 & 0xff) << 16);
6251     target_ldt_info->base_addr = tswapal(base_addr);
6252     target_ldt_info->limit = tswap32(limit);
6253     target_ldt_info->flags = tswap32(flags);
6254     unlock_user_struct(target_ldt_info, ptr, 1);
6255     return 0;
6256 }
6257 
6258 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6259 {
6260     return -TARGET_ENOSYS;
6261 }
6262 #else
6263 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6264 {
6265     abi_long ret = 0;
6266     abi_ulong val;
6267     int idx;
6268 
6269     switch(code) {
6270     case TARGET_ARCH_SET_GS:
6271     case TARGET_ARCH_SET_FS:
6272         if (code == TARGET_ARCH_SET_GS)
6273             idx = R_GS;
6274         else
6275             idx = R_FS;
6276         cpu_x86_load_seg(env, idx, 0);
6277         env->segs[idx].base = addr;
6278         break;
6279     case TARGET_ARCH_GET_GS:
6280     case TARGET_ARCH_GET_FS:
6281         if (code == TARGET_ARCH_GET_GS)
6282             idx = R_GS;
6283         else
6284             idx = R_FS;
6285         val = env->segs[idx].base;
6286         if (put_user(val, addr, abi_ulong))
6287             ret = -TARGET_EFAULT;
6288         break;
6289     default:
6290         ret = -TARGET_EINVAL;
6291         break;
6292     }
6293     return ret;
6294 }
6295 #endif /* defined(TARGET_ABI32 */
6296 #endif /* defined(TARGET_I386) */
6297 
6298 /*
6299  * These constants are generic.  Supply any that are missing from the host.
6300  */
6301 #ifndef PR_SET_NAME
6302 # define PR_SET_NAME    15
6303 # define PR_GET_NAME    16
6304 #endif
6305 #ifndef PR_SET_FP_MODE
6306 # define PR_SET_FP_MODE 45
6307 # define PR_GET_FP_MODE 46
6308 # define PR_FP_MODE_FR   (1 << 0)
6309 # define PR_FP_MODE_FRE  (1 << 1)
6310 #endif
6311 #ifndef PR_SVE_SET_VL
6312 # define PR_SVE_SET_VL  50
6313 # define PR_SVE_GET_VL  51
6314 # define PR_SVE_VL_LEN_MASK  0xffff
6315 # define PR_SVE_VL_INHERIT   (1 << 17)
6316 #endif
6317 #ifndef PR_PAC_RESET_KEYS
6318 # define PR_PAC_RESET_KEYS  54
6319 # define PR_PAC_APIAKEY   (1 << 0)
6320 # define PR_PAC_APIBKEY   (1 << 1)
6321 # define PR_PAC_APDAKEY   (1 << 2)
6322 # define PR_PAC_APDBKEY   (1 << 3)
6323 # define PR_PAC_APGAKEY   (1 << 4)
6324 #endif
6325 #ifndef PR_SET_TAGGED_ADDR_CTRL
6326 # define PR_SET_TAGGED_ADDR_CTRL 55
6327 # define PR_GET_TAGGED_ADDR_CTRL 56
6328 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6329 #endif
6330 #ifndef PR_MTE_TCF_SHIFT
6331 # define PR_MTE_TCF_SHIFT       1
6332 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6333 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6334 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6335 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6336 # define PR_MTE_TAG_SHIFT       3
6337 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6338 #endif
6339 #ifndef PR_SET_IO_FLUSHER
6340 # define PR_SET_IO_FLUSHER 57
6341 # define PR_GET_IO_FLUSHER 58
6342 #endif
6343 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6344 # define PR_SET_SYSCALL_USER_DISPATCH 59
6345 #endif
6346 #ifndef PR_SME_SET_VL
6347 # define PR_SME_SET_VL  63
6348 # define PR_SME_GET_VL  64
6349 # define PR_SME_VL_LEN_MASK  0xffff
6350 # define PR_SME_VL_INHERIT   (1 << 17)
6351 #endif
6352 
6353 #include "target_prctl.h"
6354 
6355 static abi_long do_prctl_inval0(CPUArchState *env)
6356 {
6357     return -TARGET_EINVAL;
6358 }
6359 
6360 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6361 {
6362     return -TARGET_EINVAL;
6363 }
6364 
6365 #ifndef do_prctl_get_fp_mode
6366 #define do_prctl_get_fp_mode do_prctl_inval0
6367 #endif
6368 #ifndef do_prctl_set_fp_mode
6369 #define do_prctl_set_fp_mode do_prctl_inval1
6370 #endif
6371 #ifndef do_prctl_sve_get_vl
6372 #define do_prctl_sve_get_vl do_prctl_inval0
6373 #endif
6374 #ifndef do_prctl_sve_set_vl
6375 #define do_prctl_sve_set_vl do_prctl_inval1
6376 #endif
6377 #ifndef do_prctl_reset_keys
6378 #define do_prctl_reset_keys do_prctl_inval1
6379 #endif
6380 #ifndef do_prctl_set_tagged_addr_ctrl
6381 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6382 #endif
6383 #ifndef do_prctl_get_tagged_addr_ctrl
6384 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6385 #endif
6386 #ifndef do_prctl_get_unalign
6387 #define do_prctl_get_unalign do_prctl_inval1
6388 #endif
6389 #ifndef do_prctl_set_unalign
6390 #define do_prctl_set_unalign do_prctl_inval1
6391 #endif
6392 #ifndef do_prctl_sme_get_vl
6393 #define do_prctl_sme_get_vl do_prctl_inval0
6394 #endif
6395 #ifndef do_prctl_sme_set_vl
6396 #define do_prctl_sme_set_vl do_prctl_inval1
6397 #endif
6398 
6399 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6400                          abi_long arg3, abi_long arg4, abi_long arg5)
6401 {
6402     abi_long ret;
6403 
6404     switch (option) {
6405     case PR_GET_PDEATHSIG:
6406         {
6407             int deathsig;
6408             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6409                                   arg3, arg4, arg5));
6410             if (!is_error(ret) &&
6411                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6412                 return -TARGET_EFAULT;
6413             }
6414             return ret;
6415         }
6416     case PR_SET_PDEATHSIG:
6417         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6418                                arg3, arg4, arg5));
6419     case PR_GET_NAME:
6420         {
6421             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6422             if (!name) {
6423                 return -TARGET_EFAULT;
6424             }
6425             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6426                                   arg3, arg4, arg5));
6427             unlock_user(name, arg2, 16);
6428             return ret;
6429         }
6430     case PR_SET_NAME:
6431         {
6432             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6433             if (!name) {
6434                 return -TARGET_EFAULT;
6435             }
6436             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6437                                   arg3, arg4, arg5));
6438             unlock_user(name, arg2, 0);
6439             return ret;
6440         }
6441     case PR_GET_FP_MODE:
6442         return do_prctl_get_fp_mode(env);
6443     case PR_SET_FP_MODE:
6444         return do_prctl_set_fp_mode(env, arg2);
6445     case PR_SVE_GET_VL:
6446         return do_prctl_sve_get_vl(env);
6447     case PR_SVE_SET_VL:
6448         return do_prctl_sve_set_vl(env, arg2);
6449     case PR_SME_GET_VL:
6450         return do_prctl_sme_get_vl(env);
6451     case PR_SME_SET_VL:
6452         return do_prctl_sme_set_vl(env, arg2);
6453     case PR_PAC_RESET_KEYS:
6454         if (arg3 || arg4 || arg5) {
6455             return -TARGET_EINVAL;
6456         }
6457         return do_prctl_reset_keys(env, arg2);
6458     case PR_SET_TAGGED_ADDR_CTRL:
6459         if (arg3 || arg4 || arg5) {
6460             return -TARGET_EINVAL;
6461         }
6462         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6463     case PR_GET_TAGGED_ADDR_CTRL:
6464         if (arg2 || arg3 || arg4 || arg5) {
6465             return -TARGET_EINVAL;
6466         }
6467         return do_prctl_get_tagged_addr_ctrl(env);
6468 
6469     case PR_GET_UNALIGN:
6470         return do_prctl_get_unalign(env, arg2);
6471     case PR_SET_UNALIGN:
6472         return do_prctl_set_unalign(env, arg2);
6473 
6474     case PR_CAP_AMBIENT:
6475     case PR_CAPBSET_READ:
6476     case PR_CAPBSET_DROP:
6477     case PR_GET_DUMPABLE:
6478     case PR_SET_DUMPABLE:
6479     case PR_GET_KEEPCAPS:
6480     case PR_SET_KEEPCAPS:
6481     case PR_GET_SECUREBITS:
6482     case PR_SET_SECUREBITS:
6483     case PR_GET_TIMING:
6484     case PR_SET_TIMING:
6485     case PR_GET_TIMERSLACK:
6486     case PR_SET_TIMERSLACK:
6487     case PR_MCE_KILL:
6488     case PR_MCE_KILL_GET:
6489     case PR_GET_NO_NEW_PRIVS:
6490     case PR_SET_NO_NEW_PRIVS:
6491     case PR_GET_IO_FLUSHER:
6492     case PR_SET_IO_FLUSHER:
6493         /* Some prctl options have no pointer arguments and we can pass on. */
6494         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6495 
6496     case PR_GET_CHILD_SUBREAPER:
6497     case PR_SET_CHILD_SUBREAPER:
6498     case PR_GET_SPECULATION_CTRL:
6499     case PR_SET_SPECULATION_CTRL:
6500     case PR_GET_TID_ADDRESS:
6501         /* TODO */
6502         return -TARGET_EINVAL;
6503 
6504     case PR_GET_FPEXC:
6505     case PR_SET_FPEXC:
6506         /* Was used for SPE on PowerPC. */
6507         return -TARGET_EINVAL;
6508 
6509     case PR_GET_ENDIAN:
6510     case PR_SET_ENDIAN:
6511     case PR_GET_FPEMU:
6512     case PR_SET_FPEMU:
6513     case PR_SET_MM:
6514     case PR_GET_SECCOMP:
6515     case PR_SET_SECCOMP:
6516     case PR_SET_SYSCALL_USER_DISPATCH:
6517     case PR_GET_THP_DISABLE:
6518     case PR_SET_THP_DISABLE:
6519     case PR_GET_TSC:
6520     case PR_SET_TSC:
6521         /* Disable to prevent the target disabling stuff we need. */
6522         return -TARGET_EINVAL;
6523 
6524     default:
6525         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6526                       option);
6527         return -TARGET_EINVAL;
6528     }
6529 }
6530 
6531 #define NEW_STACK_SIZE 0x40000
6532 
6533 
6534 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6535 typedef struct {
6536     CPUArchState *env;
6537     pthread_mutex_t mutex;
6538     pthread_cond_t cond;
6539     pthread_t thread;
6540     uint32_t tid;
6541     abi_ulong child_tidptr;
6542     abi_ulong parent_tidptr;
6543     sigset_t sigmask;
6544 } new_thread_info;
6545 
6546 static void *clone_func(void *arg)
6547 {
6548     new_thread_info *info = arg;
6549     CPUArchState *env;
6550     CPUState *cpu;
6551     TaskState *ts;
6552 
6553     rcu_register_thread();
6554     tcg_register_thread();
6555     env = info->env;
6556     cpu = env_cpu(env);
6557     thread_cpu = cpu;
6558     ts = (TaskState *)cpu->opaque;
6559     info->tid = sys_gettid();
6560     task_settid(ts);
6561     if (info->child_tidptr)
6562         put_user_u32(info->tid, info->child_tidptr);
6563     if (info->parent_tidptr)
6564         put_user_u32(info->tid, info->parent_tidptr);
6565     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6566     /* Enable signals.  */
6567     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6568     /* Signal to the parent that we're ready.  */
6569     pthread_mutex_lock(&info->mutex);
6570     pthread_cond_broadcast(&info->cond);
6571     pthread_mutex_unlock(&info->mutex);
6572     /* Wait until the parent has finished initializing the tls state.  */
6573     pthread_mutex_lock(&clone_lock);
6574     pthread_mutex_unlock(&clone_lock);
6575     cpu_loop(env);
6576     /* never exits */
6577     return NULL;
6578 }
6579 
6580 /* do_fork() Must return host values and target errnos (unlike most
6581    do_*() functions). */
6582 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6583                    abi_ulong parent_tidptr, target_ulong newtls,
6584                    abi_ulong child_tidptr)
6585 {
6586     CPUState *cpu = env_cpu(env);
6587     int ret;
6588     TaskState *ts;
6589     CPUState *new_cpu;
6590     CPUArchState *new_env;
6591     sigset_t sigmask;
6592 
6593     flags &= ~CLONE_IGNORED_FLAGS;
6594 
6595     /* Emulate vfork() with fork() */
6596     if (flags & CLONE_VFORK)
6597         flags &= ~(CLONE_VFORK | CLONE_VM);
6598 
6599     if (flags & CLONE_VM) {
6600         TaskState *parent_ts = (TaskState *)cpu->opaque;
6601         new_thread_info info;
6602         pthread_attr_t attr;
6603 
6604         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6605             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6606             return -TARGET_EINVAL;
6607         }
6608 
6609         ts = g_new0(TaskState, 1);
6610         init_task_state(ts);
6611 
6612         /* Grab a mutex so that thread setup appears atomic.  */
6613         pthread_mutex_lock(&clone_lock);
6614 
6615         /*
6616          * If this is our first additional thread, we need to ensure we
6617          * generate code for parallel execution and flush old translations.
6618          * Do this now so that the copy gets CF_PARALLEL too.
6619          */
6620         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6621             cpu->tcg_cflags |= CF_PARALLEL;
6622             tb_flush(cpu);
6623         }
6624 
6625         /* we create a new CPU instance. */
6626         new_env = cpu_copy(env);
6627         /* Init regs that differ from the parent.  */
6628         cpu_clone_regs_child(new_env, newsp, flags);
6629         cpu_clone_regs_parent(env, flags);
6630         new_cpu = env_cpu(new_env);
6631         new_cpu->opaque = ts;
6632         ts->bprm = parent_ts->bprm;
6633         ts->info = parent_ts->info;
6634         ts->signal_mask = parent_ts->signal_mask;
6635 
6636         if (flags & CLONE_CHILD_CLEARTID) {
6637             ts->child_tidptr = child_tidptr;
6638         }
6639 
6640         if (flags & CLONE_SETTLS) {
6641             cpu_set_tls (new_env, newtls);
6642         }
6643 
6644         memset(&info, 0, sizeof(info));
6645         pthread_mutex_init(&info.mutex, NULL);
6646         pthread_mutex_lock(&info.mutex);
6647         pthread_cond_init(&info.cond, NULL);
6648         info.env = new_env;
6649         if (flags & CLONE_CHILD_SETTID) {
6650             info.child_tidptr = child_tidptr;
6651         }
6652         if (flags & CLONE_PARENT_SETTID) {
6653             info.parent_tidptr = parent_tidptr;
6654         }
6655 
6656         ret = pthread_attr_init(&attr);
6657         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6658         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6659         /* It is not safe to deliver signals until the child has finished
6660            initializing, so temporarily block all signals.  */
6661         sigfillset(&sigmask);
6662         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6663         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6664 
6665         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6666         /* TODO: Free new CPU state if thread creation failed.  */
6667 
6668         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6669         pthread_attr_destroy(&attr);
6670         if (ret == 0) {
6671             /* Wait for the child to initialize.  */
6672             pthread_cond_wait(&info.cond, &info.mutex);
6673             ret = info.tid;
6674         } else {
6675             ret = -1;
6676         }
6677         pthread_mutex_unlock(&info.mutex);
6678         pthread_cond_destroy(&info.cond);
6679         pthread_mutex_destroy(&info.mutex);
6680         pthread_mutex_unlock(&clone_lock);
6681     } else {
6682         /* if no CLONE_VM, we consider it is a fork */
6683         if (flags & CLONE_INVALID_FORK_FLAGS) {
6684             return -TARGET_EINVAL;
6685         }
6686 
6687         /* We can't support custom termination signals */
6688         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6689             return -TARGET_EINVAL;
6690         }
6691 
6692         if (block_signals()) {
6693             return -QEMU_ERESTARTSYS;
6694         }
6695 
6696         fork_start();
6697         ret = fork();
6698         if (ret == 0) {
6699             /* Child Process.  */
6700             cpu_clone_regs_child(env, newsp, flags);
6701             fork_end(1);
6702             /* There is a race condition here.  The parent process could
6703                theoretically read the TID in the child process before the child
6704                tid is set.  This would require using either ptrace
6705                (not implemented) or having *_tidptr to point at a shared memory
6706                mapping.  We can't repeat the spinlock hack used above because
6707                the child process gets its own copy of the lock.  */
6708             if (flags & CLONE_CHILD_SETTID)
6709                 put_user_u32(sys_gettid(), child_tidptr);
6710             if (flags & CLONE_PARENT_SETTID)
6711                 put_user_u32(sys_gettid(), parent_tidptr);
6712             ts = (TaskState *)cpu->opaque;
6713             if (flags & CLONE_SETTLS)
6714                 cpu_set_tls (env, newtls);
6715             if (flags & CLONE_CHILD_CLEARTID)
6716                 ts->child_tidptr = child_tidptr;
6717         } else {
6718             cpu_clone_regs_parent(env, flags);
6719             fork_end(0);
6720         }
6721     }
6722     return ret;
6723 }
6724 
6725 /* warning : doesn't handle linux specific flags... */
6726 static int target_to_host_fcntl_cmd(int cmd)
6727 {
6728     int ret;
6729 
6730     switch(cmd) {
6731     case TARGET_F_DUPFD:
6732     case TARGET_F_GETFD:
6733     case TARGET_F_SETFD:
6734     case TARGET_F_GETFL:
6735     case TARGET_F_SETFL:
6736     case TARGET_F_OFD_GETLK:
6737     case TARGET_F_OFD_SETLK:
6738     case TARGET_F_OFD_SETLKW:
6739         ret = cmd;
6740         break;
6741     case TARGET_F_GETLK:
6742         ret = F_GETLK64;
6743         break;
6744     case TARGET_F_SETLK:
6745         ret = F_SETLK64;
6746         break;
6747     case TARGET_F_SETLKW:
6748         ret = F_SETLKW64;
6749         break;
6750     case TARGET_F_GETOWN:
6751         ret = F_GETOWN;
6752         break;
6753     case TARGET_F_SETOWN:
6754         ret = F_SETOWN;
6755         break;
6756     case TARGET_F_GETSIG:
6757         ret = F_GETSIG;
6758         break;
6759     case TARGET_F_SETSIG:
6760         ret = F_SETSIG;
6761         break;
6762 #if TARGET_ABI_BITS == 32
6763     case TARGET_F_GETLK64:
6764         ret = F_GETLK64;
6765         break;
6766     case TARGET_F_SETLK64:
6767         ret = F_SETLK64;
6768         break;
6769     case TARGET_F_SETLKW64:
6770         ret = F_SETLKW64;
6771         break;
6772 #endif
6773     case TARGET_F_SETLEASE:
6774         ret = F_SETLEASE;
6775         break;
6776     case TARGET_F_GETLEASE:
6777         ret = F_GETLEASE;
6778         break;
6779 #ifdef F_DUPFD_CLOEXEC
6780     case TARGET_F_DUPFD_CLOEXEC:
6781         ret = F_DUPFD_CLOEXEC;
6782         break;
6783 #endif
6784     case TARGET_F_NOTIFY:
6785         ret = F_NOTIFY;
6786         break;
6787 #ifdef F_GETOWN_EX
6788     case TARGET_F_GETOWN_EX:
6789         ret = F_GETOWN_EX;
6790         break;
6791 #endif
6792 #ifdef F_SETOWN_EX
6793     case TARGET_F_SETOWN_EX:
6794         ret = F_SETOWN_EX;
6795         break;
6796 #endif
6797 #ifdef F_SETPIPE_SZ
6798     case TARGET_F_SETPIPE_SZ:
6799         ret = F_SETPIPE_SZ;
6800         break;
6801     case TARGET_F_GETPIPE_SZ:
6802         ret = F_GETPIPE_SZ;
6803         break;
6804 #endif
6805 #ifdef F_ADD_SEALS
6806     case TARGET_F_ADD_SEALS:
6807         ret = F_ADD_SEALS;
6808         break;
6809     case TARGET_F_GET_SEALS:
6810         ret = F_GET_SEALS;
6811         break;
6812 #endif
6813     default:
6814         ret = -TARGET_EINVAL;
6815         break;
6816     }
6817 
6818 #if defined(__powerpc64__)
6819     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6820      * is not supported by kernel. The glibc fcntl call actually adjusts
6821      * them to 5, 6 and 7 before making the syscall(). Since we make the
6822      * syscall directly, adjust to what is supported by the kernel.
6823      */
6824     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6825         ret -= F_GETLK64 - 5;
6826     }
6827 #endif
6828 
6829     return ret;
6830 }
6831 
6832 #define FLOCK_TRANSTBL \
6833     switch (type) { \
6834     TRANSTBL_CONVERT(F_RDLCK); \
6835     TRANSTBL_CONVERT(F_WRLCK); \
6836     TRANSTBL_CONVERT(F_UNLCK); \
6837     }
6838 
6839 static int target_to_host_flock(int type)
6840 {
6841 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6842     FLOCK_TRANSTBL
6843 #undef  TRANSTBL_CONVERT
6844     return -TARGET_EINVAL;
6845 }
6846 
6847 static int host_to_target_flock(int type)
6848 {
6849 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6850     FLOCK_TRANSTBL
6851 #undef  TRANSTBL_CONVERT
6852     /* if we don't know how to convert the value coming
6853      * from the host we copy to the target field as-is
6854      */
6855     return type;
6856 }
6857 
6858 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6859                                             abi_ulong target_flock_addr)
6860 {
6861     struct target_flock *target_fl;
6862     int l_type;
6863 
6864     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6865         return -TARGET_EFAULT;
6866     }
6867 
6868     __get_user(l_type, &target_fl->l_type);
6869     l_type = target_to_host_flock(l_type);
6870     if (l_type < 0) {
6871         return l_type;
6872     }
6873     fl->l_type = l_type;
6874     __get_user(fl->l_whence, &target_fl->l_whence);
6875     __get_user(fl->l_start, &target_fl->l_start);
6876     __get_user(fl->l_len, &target_fl->l_len);
6877     __get_user(fl->l_pid, &target_fl->l_pid);
6878     unlock_user_struct(target_fl, target_flock_addr, 0);
6879     return 0;
6880 }
6881 
6882 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6883                                           const struct flock64 *fl)
6884 {
6885     struct target_flock *target_fl;
6886     short l_type;
6887 
6888     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6889         return -TARGET_EFAULT;
6890     }
6891 
6892     l_type = host_to_target_flock(fl->l_type);
6893     __put_user(l_type, &target_fl->l_type);
6894     __put_user(fl->l_whence, &target_fl->l_whence);
6895     __put_user(fl->l_start, &target_fl->l_start);
6896     __put_user(fl->l_len, &target_fl->l_len);
6897     __put_user(fl->l_pid, &target_fl->l_pid);
6898     unlock_user_struct(target_fl, target_flock_addr, 1);
6899     return 0;
6900 }
6901 
6902 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6903 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6904 
6905 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6906 struct target_oabi_flock64 {
6907     abi_short l_type;
6908     abi_short l_whence;
6909     abi_llong l_start;
6910     abi_llong l_len;
6911     abi_int   l_pid;
6912 } QEMU_PACKED;
6913 
6914 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6915                                                    abi_ulong target_flock_addr)
6916 {
6917     struct target_oabi_flock64 *target_fl;
6918     int l_type;
6919 
6920     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6921         return -TARGET_EFAULT;
6922     }
6923 
6924     __get_user(l_type, &target_fl->l_type);
6925     l_type = target_to_host_flock(l_type);
6926     if (l_type < 0) {
6927         return l_type;
6928     }
6929     fl->l_type = l_type;
6930     __get_user(fl->l_whence, &target_fl->l_whence);
6931     __get_user(fl->l_start, &target_fl->l_start);
6932     __get_user(fl->l_len, &target_fl->l_len);
6933     __get_user(fl->l_pid, &target_fl->l_pid);
6934     unlock_user_struct(target_fl, target_flock_addr, 0);
6935     return 0;
6936 }
6937 
6938 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6939                                                  const struct flock64 *fl)
6940 {
6941     struct target_oabi_flock64 *target_fl;
6942     short l_type;
6943 
6944     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6945         return -TARGET_EFAULT;
6946     }
6947 
6948     l_type = host_to_target_flock(fl->l_type);
6949     __put_user(l_type, &target_fl->l_type);
6950     __put_user(fl->l_whence, &target_fl->l_whence);
6951     __put_user(fl->l_start, &target_fl->l_start);
6952     __put_user(fl->l_len, &target_fl->l_len);
6953     __put_user(fl->l_pid, &target_fl->l_pid);
6954     unlock_user_struct(target_fl, target_flock_addr, 1);
6955     return 0;
6956 }
6957 #endif
6958 
6959 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6960                                               abi_ulong target_flock_addr)
6961 {
6962     struct target_flock64 *target_fl;
6963     int l_type;
6964 
6965     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6966         return -TARGET_EFAULT;
6967     }
6968 
6969     __get_user(l_type, &target_fl->l_type);
6970     l_type = target_to_host_flock(l_type);
6971     if (l_type < 0) {
6972         return l_type;
6973     }
6974     fl->l_type = l_type;
6975     __get_user(fl->l_whence, &target_fl->l_whence);
6976     __get_user(fl->l_start, &target_fl->l_start);
6977     __get_user(fl->l_len, &target_fl->l_len);
6978     __get_user(fl->l_pid, &target_fl->l_pid);
6979     unlock_user_struct(target_fl, target_flock_addr, 0);
6980     return 0;
6981 }
6982 
6983 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6984                                             const struct flock64 *fl)
6985 {
6986     struct target_flock64 *target_fl;
6987     short l_type;
6988 
6989     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6990         return -TARGET_EFAULT;
6991     }
6992 
6993     l_type = host_to_target_flock(fl->l_type);
6994     __put_user(l_type, &target_fl->l_type);
6995     __put_user(fl->l_whence, &target_fl->l_whence);
6996     __put_user(fl->l_start, &target_fl->l_start);
6997     __put_user(fl->l_len, &target_fl->l_len);
6998     __put_user(fl->l_pid, &target_fl->l_pid);
6999     unlock_user_struct(target_fl, target_flock_addr, 1);
7000     return 0;
7001 }
7002 
7003 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7004 {
7005     struct flock64 fl64;
7006 #ifdef F_GETOWN_EX
7007     struct f_owner_ex fox;
7008     struct target_f_owner_ex *target_fox;
7009 #endif
7010     abi_long ret;
7011     int host_cmd = target_to_host_fcntl_cmd(cmd);
7012 
7013     if (host_cmd == -TARGET_EINVAL)
7014 	    return host_cmd;
7015 
7016     switch(cmd) {
7017     case TARGET_F_GETLK:
7018         ret = copy_from_user_flock(&fl64, arg);
7019         if (ret) {
7020             return ret;
7021         }
7022         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7023         if (ret == 0) {
7024             ret = copy_to_user_flock(arg, &fl64);
7025         }
7026         break;
7027 
7028     case TARGET_F_SETLK:
7029     case TARGET_F_SETLKW:
7030         ret = copy_from_user_flock(&fl64, arg);
7031         if (ret) {
7032             return ret;
7033         }
7034         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7035         break;
7036 
7037     case TARGET_F_GETLK64:
7038     case TARGET_F_OFD_GETLK:
7039         ret = copy_from_user_flock64(&fl64, arg);
7040         if (ret) {
7041             return ret;
7042         }
7043         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7044         if (ret == 0) {
7045             ret = copy_to_user_flock64(arg, &fl64);
7046         }
7047         break;
7048     case TARGET_F_SETLK64:
7049     case TARGET_F_SETLKW64:
7050     case TARGET_F_OFD_SETLK:
7051     case TARGET_F_OFD_SETLKW:
7052         ret = copy_from_user_flock64(&fl64, arg);
7053         if (ret) {
7054             return ret;
7055         }
7056         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7057         break;
7058 
7059     case TARGET_F_GETFL:
7060         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7061         if (ret >= 0) {
7062             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7063         }
7064         break;
7065 
7066     case TARGET_F_SETFL:
7067         ret = get_errno(safe_fcntl(fd, host_cmd,
7068                                    target_to_host_bitmask(arg,
7069                                                           fcntl_flags_tbl)));
7070         break;
7071 
7072 #ifdef F_GETOWN_EX
7073     case TARGET_F_GETOWN_EX:
7074         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7075         if (ret >= 0) {
7076             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7077                 return -TARGET_EFAULT;
7078             target_fox->type = tswap32(fox.type);
7079             target_fox->pid = tswap32(fox.pid);
7080             unlock_user_struct(target_fox, arg, 1);
7081         }
7082         break;
7083 #endif
7084 
7085 #ifdef F_SETOWN_EX
7086     case TARGET_F_SETOWN_EX:
7087         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7088             return -TARGET_EFAULT;
7089         fox.type = tswap32(target_fox->type);
7090         fox.pid = tswap32(target_fox->pid);
7091         unlock_user_struct(target_fox, arg, 0);
7092         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7093         break;
7094 #endif
7095 
7096     case TARGET_F_SETSIG:
7097         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7098         break;
7099 
7100     case TARGET_F_GETSIG:
7101         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7102         break;
7103 
7104     case TARGET_F_SETOWN:
7105     case TARGET_F_GETOWN:
7106     case TARGET_F_SETLEASE:
7107     case TARGET_F_GETLEASE:
7108     case TARGET_F_SETPIPE_SZ:
7109     case TARGET_F_GETPIPE_SZ:
7110     case TARGET_F_ADD_SEALS:
7111     case TARGET_F_GET_SEALS:
7112         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7113         break;
7114 
7115     default:
7116         ret = get_errno(safe_fcntl(fd, cmd, arg));
7117         break;
7118     }
7119     return ret;
7120 }
7121 
7122 #ifdef USE_UID16
7123 
7124 static inline int high2lowuid(int uid)
7125 {
7126     if (uid > 65535)
7127         return 65534;
7128     else
7129         return uid;
7130 }
7131 
7132 static inline int high2lowgid(int gid)
7133 {
7134     if (gid > 65535)
7135         return 65534;
7136     else
7137         return gid;
7138 }
7139 
7140 static inline int low2highuid(int uid)
7141 {
7142     if ((int16_t)uid == -1)
7143         return -1;
7144     else
7145         return uid;
7146 }
7147 
7148 static inline int low2highgid(int gid)
7149 {
7150     if ((int16_t)gid == -1)
7151         return -1;
7152     else
7153         return gid;
7154 }
7155 static inline int tswapid(int id)
7156 {
7157     return tswap16(id);
7158 }
7159 
7160 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7161 
7162 #else /* !USE_UID16 */
7163 static inline int high2lowuid(int uid)
7164 {
7165     return uid;
7166 }
7167 static inline int high2lowgid(int gid)
7168 {
7169     return gid;
7170 }
7171 static inline int low2highuid(int uid)
7172 {
7173     return uid;
7174 }
7175 static inline int low2highgid(int gid)
7176 {
7177     return gid;
7178 }
7179 static inline int tswapid(int id)
7180 {
7181     return tswap32(id);
7182 }
7183 
7184 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7185 
7186 #endif /* USE_UID16 */
7187 
7188 /* We must do direct syscalls for setting UID/GID, because we want to
7189  * implement the Linux system call semantics of "change only for this thread",
7190  * not the libc/POSIX semantics of "change for all threads in process".
7191  * (See http://ewontfix.com/17/ for more details.)
7192  * We use the 32-bit version of the syscalls if present; if it is not
7193  * then either the host architecture supports 32-bit UIDs natively with
7194  * the standard syscall, or the 16-bit UID is the best we can do.
7195  */
7196 #ifdef __NR_setuid32
7197 #define __NR_sys_setuid __NR_setuid32
7198 #else
7199 #define __NR_sys_setuid __NR_setuid
7200 #endif
7201 #ifdef __NR_setgid32
7202 #define __NR_sys_setgid __NR_setgid32
7203 #else
7204 #define __NR_sys_setgid __NR_setgid
7205 #endif
7206 #ifdef __NR_setresuid32
7207 #define __NR_sys_setresuid __NR_setresuid32
7208 #else
7209 #define __NR_sys_setresuid __NR_setresuid
7210 #endif
7211 #ifdef __NR_setresgid32
7212 #define __NR_sys_setresgid __NR_setresgid32
7213 #else
7214 #define __NR_sys_setresgid __NR_setresgid
7215 #endif
7216 
7217 _syscall1(int, sys_setuid, uid_t, uid)
7218 _syscall1(int, sys_setgid, gid_t, gid)
7219 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7220 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7221 
7222 void syscall_init(void)
7223 {
7224     IOCTLEntry *ie;
7225     const argtype *arg_type;
7226     int size;
7227 
7228     thunk_init(STRUCT_MAX);
7229 
7230 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7231 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7232 #include "syscall_types.h"
7233 #undef STRUCT
7234 #undef STRUCT_SPECIAL
7235 
7236     /* we patch the ioctl size if necessary. We rely on the fact that
7237        no ioctl has all the bits at '1' in the size field */
7238     ie = ioctl_entries;
7239     while (ie->target_cmd != 0) {
7240         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7241             TARGET_IOC_SIZEMASK) {
7242             arg_type = ie->arg_type;
7243             if (arg_type[0] != TYPE_PTR) {
7244                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7245                         ie->target_cmd);
7246                 exit(1);
7247             }
7248             arg_type++;
7249             size = thunk_type_size(arg_type, 0);
7250             ie->target_cmd = (ie->target_cmd &
7251                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7252                 (size << TARGET_IOC_SIZESHIFT);
7253         }
7254 
7255         /* automatic consistency check if same arch */
7256 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7257     (defined(__x86_64__) && defined(TARGET_X86_64))
7258         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7259             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7260                     ie->name, ie->target_cmd, ie->host_cmd);
7261         }
7262 #endif
7263         ie++;
7264     }
7265 }
7266 
7267 #ifdef TARGET_NR_truncate64
7268 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7269                                          abi_long arg2,
7270                                          abi_long arg3,
7271                                          abi_long arg4)
7272 {
7273     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7274         arg2 = arg3;
7275         arg3 = arg4;
7276     }
7277     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7278 }
7279 #endif
7280 
7281 #ifdef TARGET_NR_ftruncate64
7282 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7283                                           abi_long arg2,
7284                                           abi_long arg3,
7285                                           abi_long arg4)
7286 {
7287     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7288         arg2 = arg3;
7289         arg3 = arg4;
7290     }
7291     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7292 }
7293 #endif
7294 
7295 #if defined(TARGET_NR_timer_settime) || \
7296     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7297 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7298                                                  abi_ulong target_addr)
7299 {
7300     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7301                                 offsetof(struct target_itimerspec,
7302                                          it_interval)) ||
7303         target_to_host_timespec(&host_its->it_value, target_addr +
7304                                 offsetof(struct target_itimerspec,
7305                                          it_value))) {
7306         return -TARGET_EFAULT;
7307     }
7308 
7309     return 0;
7310 }
7311 #endif
7312 
7313 #if defined(TARGET_NR_timer_settime64) || \
7314     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7315 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7316                                                    abi_ulong target_addr)
7317 {
7318     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7319                                   offsetof(struct target__kernel_itimerspec,
7320                                            it_interval)) ||
7321         target_to_host_timespec64(&host_its->it_value, target_addr +
7322                                   offsetof(struct target__kernel_itimerspec,
7323                                            it_value))) {
7324         return -TARGET_EFAULT;
7325     }
7326 
7327     return 0;
7328 }
7329 #endif
7330 
7331 #if ((defined(TARGET_NR_timerfd_gettime) || \
7332       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7333       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7334 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7335                                                  struct itimerspec *host_its)
7336 {
7337     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7338                                                        it_interval),
7339                                 &host_its->it_interval) ||
7340         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7341                                                        it_value),
7342                                 &host_its->it_value)) {
7343         return -TARGET_EFAULT;
7344     }
7345     return 0;
7346 }
7347 #endif
7348 
7349 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7350       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7351       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7352 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7353                                                    struct itimerspec *host_its)
7354 {
7355     if (host_to_target_timespec64(target_addr +
7356                                   offsetof(struct target__kernel_itimerspec,
7357                                            it_interval),
7358                                   &host_its->it_interval) ||
7359         host_to_target_timespec64(target_addr +
7360                                   offsetof(struct target__kernel_itimerspec,
7361                                            it_value),
7362                                   &host_its->it_value)) {
7363         return -TARGET_EFAULT;
7364     }
7365     return 0;
7366 }
7367 #endif
7368 
7369 #if defined(TARGET_NR_adjtimex) || \
7370     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7371 static inline abi_long target_to_host_timex(struct timex *host_tx,
7372                                             abi_long target_addr)
7373 {
7374     struct target_timex *target_tx;
7375 
7376     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7377         return -TARGET_EFAULT;
7378     }
7379 
7380     __get_user(host_tx->modes, &target_tx->modes);
7381     __get_user(host_tx->offset, &target_tx->offset);
7382     __get_user(host_tx->freq, &target_tx->freq);
7383     __get_user(host_tx->maxerror, &target_tx->maxerror);
7384     __get_user(host_tx->esterror, &target_tx->esterror);
7385     __get_user(host_tx->status, &target_tx->status);
7386     __get_user(host_tx->constant, &target_tx->constant);
7387     __get_user(host_tx->precision, &target_tx->precision);
7388     __get_user(host_tx->tolerance, &target_tx->tolerance);
7389     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7390     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7391     __get_user(host_tx->tick, &target_tx->tick);
7392     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7393     __get_user(host_tx->jitter, &target_tx->jitter);
7394     __get_user(host_tx->shift, &target_tx->shift);
7395     __get_user(host_tx->stabil, &target_tx->stabil);
7396     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7397     __get_user(host_tx->calcnt, &target_tx->calcnt);
7398     __get_user(host_tx->errcnt, &target_tx->errcnt);
7399     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7400     __get_user(host_tx->tai, &target_tx->tai);
7401 
7402     unlock_user_struct(target_tx, target_addr, 0);
7403     return 0;
7404 }
7405 
7406 static inline abi_long host_to_target_timex(abi_long target_addr,
7407                                             struct timex *host_tx)
7408 {
7409     struct target_timex *target_tx;
7410 
7411     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7412         return -TARGET_EFAULT;
7413     }
7414 
7415     __put_user(host_tx->modes, &target_tx->modes);
7416     __put_user(host_tx->offset, &target_tx->offset);
7417     __put_user(host_tx->freq, &target_tx->freq);
7418     __put_user(host_tx->maxerror, &target_tx->maxerror);
7419     __put_user(host_tx->esterror, &target_tx->esterror);
7420     __put_user(host_tx->status, &target_tx->status);
7421     __put_user(host_tx->constant, &target_tx->constant);
7422     __put_user(host_tx->precision, &target_tx->precision);
7423     __put_user(host_tx->tolerance, &target_tx->tolerance);
7424     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7425     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7426     __put_user(host_tx->tick, &target_tx->tick);
7427     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7428     __put_user(host_tx->jitter, &target_tx->jitter);
7429     __put_user(host_tx->shift, &target_tx->shift);
7430     __put_user(host_tx->stabil, &target_tx->stabil);
7431     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7432     __put_user(host_tx->calcnt, &target_tx->calcnt);
7433     __put_user(host_tx->errcnt, &target_tx->errcnt);
7434     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7435     __put_user(host_tx->tai, &target_tx->tai);
7436 
7437     unlock_user_struct(target_tx, target_addr, 1);
7438     return 0;
7439 }
7440 #endif
7441 
7442 
7443 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7444 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7445                                               abi_long target_addr)
7446 {
7447     struct target__kernel_timex *target_tx;
7448 
7449     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7450                                  offsetof(struct target__kernel_timex,
7451                                           time))) {
7452         return -TARGET_EFAULT;
7453     }
7454 
7455     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7456         return -TARGET_EFAULT;
7457     }
7458 
7459     __get_user(host_tx->modes, &target_tx->modes);
7460     __get_user(host_tx->offset, &target_tx->offset);
7461     __get_user(host_tx->freq, &target_tx->freq);
7462     __get_user(host_tx->maxerror, &target_tx->maxerror);
7463     __get_user(host_tx->esterror, &target_tx->esterror);
7464     __get_user(host_tx->status, &target_tx->status);
7465     __get_user(host_tx->constant, &target_tx->constant);
7466     __get_user(host_tx->precision, &target_tx->precision);
7467     __get_user(host_tx->tolerance, &target_tx->tolerance);
7468     __get_user(host_tx->tick, &target_tx->tick);
7469     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7470     __get_user(host_tx->jitter, &target_tx->jitter);
7471     __get_user(host_tx->shift, &target_tx->shift);
7472     __get_user(host_tx->stabil, &target_tx->stabil);
7473     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7474     __get_user(host_tx->calcnt, &target_tx->calcnt);
7475     __get_user(host_tx->errcnt, &target_tx->errcnt);
7476     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7477     __get_user(host_tx->tai, &target_tx->tai);
7478 
7479     unlock_user_struct(target_tx, target_addr, 0);
7480     return 0;
7481 }
7482 
7483 static inline abi_long host_to_target_timex64(abi_long target_addr,
7484                                               struct timex *host_tx)
7485 {
7486     struct target__kernel_timex *target_tx;
7487 
7488    if (copy_to_user_timeval64(target_addr +
7489                               offsetof(struct target__kernel_timex, time),
7490                               &host_tx->time)) {
7491         return -TARGET_EFAULT;
7492     }
7493 
7494     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7495         return -TARGET_EFAULT;
7496     }
7497 
7498     __put_user(host_tx->modes, &target_tx->modes);
7499     __put_user(host_tx->offset, &target_tx->offset);
7500     __put_user(host_tx->freq, &target_tx->freq);
7501     __put_user(host_tx->maxerror, &target_tx->maxerror);
7502     __put_user(host_tx->esterror, &target_tx->esterror);
7503     __put_user(host_tx->status, &target_tx->status);
7504     __put_user(host_tx->constant, &target_tx->constant);
7505     __put_user(host_tx->precision, &target_tx->precision);
7506     __put_user(host_tx->tolerance, &target_tx->tolerance);
7507     __put_user(host_tx->tick, &target_tx->tick);
7508     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7509     __put_user(host_tx->jitter, &target_tx->jitter);
7510     __put_user(host_tx->shift, &target_tx->shift);
7511     __put_user(host_tx->stabil, &target_tx->stabil);
7512     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7513     __put_user(host_tx->calcnt, &target_tx->calcnt);
7514     __put_user(host_tx->errcnt, &target_tx->errcnt);
7515     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7516     __put_user(host_tx->tai, &target_tx->tai);
7517 
7518     unlock_user_struct(target_tx, target_addr, 1);
7519     return 0;
7520 }
7521 #endif
7522 
7523 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7524 #define sigev_notify_thread_id _sigev_un._tid
7525 #endif
7526 
7527 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7528                                                abi_ulong target_addr)
7529 {
7530     struct target_sigevent *target_sevp;
7531 
7532     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7533         return -TARGET_EFAULT;
7534     }
7535 
7536     /* This union is awkward on 64 bit systems because it has a 32 bit
7537      * integer and a pointer in it; we follow the conversion approach
7538      * used for handling sigval types in signal.c so the guest should get
7539      * the correct value back even if we did a 64 bit byteswap and it's
7540      * using the 32 bit integer.
7541      */
7542     host_sevp->sigev_value.sival_ptr =
7543         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7544     host_sevp->sigev_signo =
7545         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7546     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7547     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7548 
7549     unlock_user_struct(target_sevp, target_addr, 1);
7550     return 0;
7551 }
7552 
7553 #if defined(TARGET_NR_mlockall)
7554 static inline int target_to_host_mlockall_arg(int arg)
7555 {
7556     int result = 0;
7557 
7558     if (arg & TARGET_MCL_CURRENT) {
7559         result |= MCL_CURRENT;
7560     }
7561     if (arg & TARGET_MCL_FUTURE) {
7562         result |= MCL_FUTURE;
7563     }
7564 #ifdef MCL_ONFAULT
7565     if (arg & TARGET_MCL_ONFAULT) {
7566         result |= MCL_ONFAULT;
7567     }
7568 #endif
7569 
7570     return result;
7571 }
7572 #endif
7573 
7574 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7575      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7576      defined(TARGET_NR_newfstatat))
7577 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7578                                              abi_ulong target_addr,
7579                                              struct stat *host_st)
7580 {
7581 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7582     if (cpu_env->eabi) {
7583         struct target_eabi_stat64 *target_st;
7584 
7585         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7586             return -TARGET_EFAULT;
7587         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7588         __put_user(host_st->st_dev, &target_st->st_dev);
7589         __put_user(host_st->st_ino, &target_st->st_ino);
7590 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7591         __put_user(host_st->st_ino, &target_st->__st_ino);
7592 #endif
7593         __put_user(host_st->st_mode, &target_st->st_mode);
7594         __put_user(host_st->st_nlink, &target_st->st_nlink);
7595         __put_user(host_st->st_uid, &target_st->st_uid);
7596         __put_user(host_st->st_gid, &target_st->st_gid);
7597         __put_user(host_st->st_rdev, &target_st->st_rdev);
7598         __put_user(host_st->st_size, &target_st->st_size);
7599         __put_user(host_st->st_blksize, &target_st->st_blksize);
7600         __put_user(host_st->st_blocks, &target_st->st_blocks);
7601         __put_user(host_st->st_atime, &target_st->target_st_atime);
7602         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7603         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7604 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7605         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7606         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7607         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7608 #endif
7609         unlock_user_struct(target_st, target_addr, 1);
7610     } else
7611 #endif
7612     {
7613 #if defined(TARGET_HAS_STRUCT_STAT64)
7614         struct target_stat64 *target_st;
7615 #else
7616         struct target_stat *target_st;
7617 #endif
7618 
7619         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7620             return -TARGET_EFAULT;
7621         memset(target_st, 0, sizeof(*target_st));
7622         __put_user(host_st->st_dev, &target_st->st_dev);
7623         __put_user(host_st->st_ino, &target_st->st_ino);
7624 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7625         __put_user(host_st->st_ino, &target_st->__st_ino);
7626 #endif
7627         __put_user(host_st->st_mode, &target_st->st_mode);
7628         __put_user(host_st->st_nlink, &target_st->st_nlink);
7629         __put_user(host_st->st_uid, &target_st->st_uid);
7630         __put_user(host_st->st_gid, &target_st->st_gid);
7631         __put_user(host_st->st_rdev, &target_st->st_rdev);
7632         /* XXX: better use of kernel struct */
7633         __put_user(host_st->st_size, &target_st->st_size);
7634         __put_user(host_st->st_blksize, &target_st->st_blksize);
7635         __put_user(host_st->st_blocks, &target_st->st_blocks);
7636         __put_user(host_st->st_atime, &target_st->target_st_atime);
7637         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7638         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7639 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7640         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7641         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7642         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7643 #endif
7644         unlock_user_struct(target_st, target_addr, 1);
7645     }
7646 
7647     return 0;
7648 }
7649 #endif
7650 
7651 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7652 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7653                                             abi_ulong target_addr)
7654 {
7655     struct target_statx *target_stx;
7656 
7657     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7658         return -TARGET_EFAULT;
7659     }
7660     memset(target_stx, 0, sizeof(*target_stx));
7661 
7662     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7663     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7664     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7665     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7666     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7667     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7668     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7669     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7670     __put_user(host_stx->stx_size, &target_stx->stx_size);
7671     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7672     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7673     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7674     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7675     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7676     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7677     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7678     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7679     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7680     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7681     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7682     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7683     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7684     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7685 
7686     unlock_user_struct(target_stx, target_addr, 1);
7687 
7688     return 0;
7689 }
7690 #endif
7691 
7692 static int do_sys_futex(int *uaddr, int op, int val,
7693                          const struct timespec *timeout, int *uaddr2,
7694                          int val3)
7695 {
7696 #if HOST_LONG_BITS == 64
7697 #if defined(__NR_futex)
7698     /* always a 64-bit time_t, it doesn't define _time64 version  */
7699     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7700 
7701 #endif
7702 #else /* HOST_LONG_BITS == 64 */
7703 #if defined(__NR_futex_time64)
7704     if (sizeof(timeout->tv_sec) == 8) {
7705         /* _time64 function on 32bit arch */
7706         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7707     }
7708 #endif
7709 #if defined(__NR_futex)
7710     /* old function on 32bit arch */
7711     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7712 #endif
7713 #endif /* HOST_LONG_BITS == 64 */
7714     g_assert_not_reached();
7715 }
7716 
7717 static int do_safe_futex(int *uaddr, int op, int val,
7718                          const struct timespec *timeout, int *uaddr2,
7719                          int val3)
7720 {
7721 #if HOST_LONG_BITS == 64
7722 #if defined(__NR_futex)
7723     /* always a 64-bit time_t, it doesn't define _time64 version  */
7724     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7725 #endif
7726 #else /* HOST_LONG_BITS == 64 */
7727 #if defined(__NR_futex_time64)
7728     if (sizeof(timeout->tv_sec) == 8) {
7729         /* _time64 function on 32bit arch */
7730         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7731                                            val3));
7732     }
7733 #endif
7734 #if defined(__NR_futex)
7735     /* old function on 32bit arch */
7736     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7737 #endif
7738 #endif /* HOST_LONG_BITS == 64 */
7739     return -TARGET_ENOSYS;
7740 }
7741 
7742 /* ??? Using host futex calls even when target atomic operations
7743    are not really atomic probably breaks things.  However implementing
7744    futexes locally would make futexes shared between multiple processes
7745    tricky.  However they're probably useless because guest atomic
7746    operations won't work either.  */
7747 #if defined(TARGET_NR_futex)
7748 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7749                     target_ulong timeout, target_ulong uaddr2, int val3)
7750 {
7751     struct timespec ts, *pts;
7752     int base_op;
7753 
7754     /* ??? We assume FUTEX_* constants are the same on both host
7755        and target.  */
7756 #ifdef FUTEX_CMD_MASK
7757     base_op = op & FUTEX_CMD_MASK;
7758 #else
7759     base_op = op;
7760 #endif
7761     switch (base_op) {
7762     case FUTEX_WAIT:
7763     case FUTEX_WAIT_BITSET:
7764         if (timeout) {
7765             pts = &ts;
7766             target_to_host_timespec(pts, timeout);
7767         } else {
7768             pts = NULL;
7769         }
7770         return do_safe_futex(g2h(cpu, uaddr),
7771                              op, tswap32(val), pts, NULL, val3);
7772     case FUTEX_WAKE:
7773         return do_safe_futex(g2h(cpu, uaddr),
7774                              op, val, NULL, NULL, 0);
7775     case FUTEX_FD:
7776         return do_safe_futex(g2h(cpu, uaddr),
7777                              op, val, NULL, NULL, 0);
7778     case FUTEX_REQUEUE:
7779     case FUTEX_CMP_REQUEUE:
7780     case FUTEX_WAKE_OP:
7781         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7782            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7783            But the prototype takes a `struct timespec *'; insert casts
7784            to satisfy the compiler.  We do not need to tswap TIMEOUT
7785            since it's not compared to guest memory.  */
7786         pts = (struct timespec *)(uintptr_t) timeout;
7787         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7788                              (base_op == FUTEX_CMP_REQUEUE
7789                               ? tswap32(val3) : val3));
7790     default:
7791         return -TARGET_ENOSYS;
7792     }
7793 }
7794 #endif
7795 
7796 #if defined(TARGET_NR_futex_time64)
7797 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7798                            int val, target_ulong timeout,
7799                            target_ulong uaddr2, int val3)
7800 {
7801     struct timespec ts, *pts;
7802     int base_op;
7803 
7804     /* ??? We assume FUTEX_* constants are the same on both host
7805        and target.  */
7806 #ifdef FUTEX_CMD_MASK
7807     base_op = op & FUTEX_CMD_MASK;
7808 #else
7809     base_op = op;
7810 #endif
7811     switch (base_op) {
7812     case FUTEX_WAIT:
7813     case FUTEX_WAIT_BITSET:
7814         if (timeout) {
7815             pts = &ts;
7816             if (target_to_host_timespec64(pts, timeout)) {
7817                 return -TARGET_EFAULT;
7818             }
7819         } else {
7820             pts = NULL;
7821         }
7822         return do_safe_futex(g2h(cpu, uaddr), op,
7823                              tswap32(val), pts, NULL, val3);
7824     case FUTEX_WAKE:
7825         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7826     case FUTEX_FD:
7827         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7828     case FUTEX_REQUEUE:
7829     case FUTEX_CMP_REQUEUE:
7830     case FUTEX_WAKE_OP:
7831         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7832            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7833            But the prototype takes a `struct timespec *'; insert casts
7834            to satisfy the compiler.  We do not need to tswap TIMEOUT
7835            since it's not compared to guest memory.  */
7836         pts = (struct timespec *)(uintptr_t) timeout;
7837         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7838                              (base_op == FUTEX_CMP_REQUEUE
7839                               ? tswap32(val3) : val3));
7840     default:
7841         return -TARGET_ENOSYS;
7842     }
7843 }
7844 #endif
7845 
7846 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7847 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7848                                      abi_long handle, abi_long mount_id,
7849                                      abi_long flags)
7850 {
7851     struct file_handle *target_fh;
7852     struct file_handle *fh;
7853     int mid = 0;
7854     abi_long ret;
7855     char *name;
7856     unsigned int size, total_size;
7857 
7858     if (get_user_s32(size, handle)) {
7859         return -TARGET_EFAULT;
7860     }
7861 
7862     name = lock_user_string(pathname);
7863     if (!name) {
7864         return -TARGET_EFAULT;
7865     }
7866 
7867     total_size = sizeof(struct file_handle) + size;
7868     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7869     if (!target_fh) {
7870         unlock_user(name, pathname, 0);
7871         return -TARGET_EFAULT;
7872     }
7873 
7874     fh = g_malloc0(total_size);
7875     fh->handle_bytes = size;
7876 
7877     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7878     unlock_user(name, pathname, 0);
7879 
7880     /* man name_to_handle_at(2):
7881      * Other than the use of the handle_bytes field, the caller should treat
7882      * the file_handle structure as an opaque data type
7883      */
7884 
7885     memcpy(target_fh, fh, total_size);
7886     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7887     target_fh->handle_type = tswap32(fh->handle_type);
7888     g_free(fh);
7889     unlock_user(target_fh, handle, total_size);
7890 
7891     if (put_user_s32(mid, mount_id)) {
7892         return -TARGET_EFAULT;
7893     }
7894 
7895     return ret;
7896 
7897 }
7898 #endif
7899 
7900 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7901 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7902                                      abi_long flags)
7903 {
7904     struct file_handle *target_fh;
7905     struct file_handle *fh;
7906     unsigned int size, total_size;
7907     abi_long ret;
7908 
7909     if (get_user_s32(size, handle)) {
7910         return -TARGET_EFAULT;
7911     }
7912 
7913     total_size = sizeof(struct file_handle) + size;
7914     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7915     if (!target_fh) {
7916         return -TARGET_EFAULT;
7917     }
7918 
7919     fh = g_memdup(target_fh, total_size);
7920     fh->handle_bytes = size;
7921     fh->handle_type = tswap32(target_fh->handle_type);
7922 
7923     ret = get_errno(open_by_handle_at(mount_fd, fh,
7924                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7925 
7926     g_free(fh);
7927 
7928     unlock_user(target_fh, handle, total_size);
7929 
7930     return ret;
7931 }
7932 #endif
7933 
7934 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7935 
7936 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7937 {
7938     int host_flags;
7939     target_sigset_t *target_mask;
7940     sigset_t host_mask;
7941     abi_long ret;
7942 
7943     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7944         return -TARGET_EINVAL;
7945     }
7946     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7947         return -TARGET_EFAULT;
7948     }
7949 
7950     target_to_host_sigset(&host_mask, target_mask);
7951 
7952     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7953 
7954     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7955     if (ret >= 0) {
7956         fd_trans_register(ret, &target_signalfd_trans);
7957     }
7958 
7959     unlock_user_struct(target_mask, mask, 0);
7960 
7961     return ret;
7962 }
7963 #endif
7964 
7965 /* Map host to target signal numbers for the wait family of syscalls.
7966    Assume all other status bits are the same.  */
7967 int host_to_target_waitstatus(int status)
7968 {
7969     if (WIFSIGNALED(status)) {
7970         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7971     }
7972     if (WIFSTOPPED(status)) {
7973         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7974                | (status & 0xff);
7975     }
7976     return status;
7977 }
7978 
7979 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7980 {
7981     CPUState *cpu = env_cpu(cpu_env);
7982     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7983     int i;
7984 
7985     for (i = 0; i < bprm->argc; i++) {
7986         size_t len = strlen(bprm->argv[i]) + 1;
7987 
7988         if (write(fd, bprm->argv[i], len) != len) {
7989             return -1;
7990         }
7991     }
7992 
7993     return 0;
7994 }
7995 
7996 static int open_self_maps(CPUArchState *cpu_env, int fd)
7997 {
7998     CPUState *cpu = env_cpu(cpu_env);
7999     TaskState *ts = cpu->opaque;
8000     GSList *map_info = read_self_maps();
8001     GSList *s;
8002     int count;
8003 
8004     for (s = map_info; s; s = g_slist_next(s)) {
8005         MapInfo *e = (MapInfo *) s->data;
8006 
8007         if (h2g_valid(e->start)) {
8008             unsigned long min = e->start;
8009             unsigned long max = e->end;
8010             int flags = page_get_flags(h2g(min));
8011             const char *path;
8012 
8013             max = h2g_valid(max - 1) ?
8014                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8015 
8016             if (page_check_range(h2g(min), max - min, flags) == -1) {
8017                 continue;
8018             }
8019 
8020             if (h2g(min) == ts->info->stack_limit) {
8021                 path = "[stack]";
8022             } else {
8023                 path = e->path;
8024             }
8025 
8026             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8027                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8028                             h2g(min), h2g(max - 1) + 1,
8029                             (flags & PAGE_READ) ? 'r' : '-',
8030                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8031                             (flags & PAGE_EXEC) ? 'x' : '-',
8032                             e->is_priv ? 'p' : 's',
8033                             (uint64_t) e->offset, e->dev, e->inode);
8034             if (path) {
8035                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8036             } else {
8037                 dprintf(fd, "\n");
8038             }
8039         }
8040     }
8041 
8042     free_self_maps(map_info);
8043 
8044 #ifdef TARGET_VSYSCALL_PAGE
8045     /*
8046      * We only support execution from the vsyscall page.
8047      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8048      */
8049     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8050                     " --xp 00000000 00:00 0",
8051                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8052     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8053 #endif
8054 
8055     return 0;
8056 }
8057 
8058 static int open_self_stat(CPUArchState *cpu_env, int fd)
8059 {
8060     CPUState *cpu = env_cpu(cpu_env);
8061     TaskState *ts = cpu->opaque;
8062     g_autoptr(GString) buf = g_string_new(NULL);
8063     int i;
8064 
8065     for (i = 0; i < 44; i++) {
8066         if (i == 0) {
8067             /* pid */
8068             g_string_printf(buf, FMT_pid " ", getpid());
8069         } else if (i == 1) {
8070             /* app name */
8071             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8072             bin = bin ? bin + 1 : ts->bprm->argv[0];
8073             g_string_printf(buf, "(%.15s) ", bin);
8074         } else if (i == 3) {
8075             /* ppid */
8076             g_string_printf(buf, FMT_pid " ", getppid());
8077         } else if (i == 21) {
8078             /* starttime */
8079             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8080         } else if (i == 27) {
8081             /* stack bottom */
8082             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8083         } else {
8084             /* for the rest, there is MasterCard */
8085             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8086         }
8087 
8088         if (write(fd, buf->str, buf->len) != buf->len) {
8089             return -1;
8090         }
8091     }
8092 
8093     return 0;
8094 }
8095 
8096 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8097 {
8098     CPUState *cpu = env_cpu(cpu_env);
8099     TaskState *ts = cpu->opaque;
8100     abi_ulong auxv = ts->info->saved_auxv;
8101     abi_ulong len = ts->info->auxv_len;
8102     char *ptr;
8103 
8104     /*
8105      * Auxiliary vector is stored in target process stack.
8106      * read in whole auxv vector and copy it to file
8107      */
8108     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8109     if (ptr != NULL) {
8110         while (len > 0) {
8111             ssize_t r;
8112             r = write(fd, ptr, len);
8113             if (r <= 0) {
8114                 break;
8115             }
8116             len -= r;
8117             ptr += r;
8118         }
8119         lseek(fd, 0, SEEK_SET);
8120         unlock_user(ptr, auxv, len);
8121     }
8122 
8123     return 0;
8124 }
8125 
8126 static int is_proc_myself(const char *filename, const char *entry)
8127 {
8128     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8129         filename += strlen("/proc/");
8130         if (!strncmp(filename, "self/", strlen("self/"))) {
8131             filename += strlen("self/");
8132         } else if (*filename >= '1' && *filename <= '9') {
8133             char myself[80];
8134             snprintf(myself, sizeof(myself), "%d/", getpid());
8135             if (!strncmp(filename, myself, strlen(myself))) {
8136                 filename += strlen(myself);
8137             } else {
8138                 return 0;
8139             }
8140         } else {
8141             return 0;
8142         }
8143         if (!strcmp(filename, entry)) {
8144             return 1;
8145         }
8146     }
8147     return 0;
8148 }
8149 
8150 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8151     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8152 static int is_proc(const char *filename, const char *entry)
8153 {
8154     return strcmp(filename, entry) == 0;
8155 }
8156 #endif
8157 
8158 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8159 static int open_net_route(CPUArchState *cpu_env, int fd)
8160 {
8161     FILE *fp;
8162     char *line = NULL;
8163     size_t len = 0;
8164     ssize_t read;
8165 
8166     fp = fopen("/proc/net/route", "r");
8167     if (fp == NULL) {
8168         return -1;
8169     }
8170 
8171     /* read header */
8172 
8173     read = getline(&line, &len, fp);
8174     dprintf(fd, "%s", line);
8175 
8176     /* read routes */
8177 
8178     while ((read = getline(&line, &len, fp)) != -1) {
8179         char iface[16];
8180         uint32_t dest, gw, mask;
8181         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8182         int fields;
8183 
8184         fields = sscanf(line,
8185                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8186                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8187                         &mask, &mtu, &window, &irtt);
8188         if (fields != 11) {
8189             continue;
8190         }
8191         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8192                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8193                 metric, tswap32(mask), mtu, window, irtt);
8194     }
8195 
8196     free(line);
8197     fclose(fp);
8198 
8199     return 0;
8200 }
8201 #endif
8202 
8203 #if defined(TARGET_SPARC)
8204 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8205 {
8206     dprintf(fd, "type\t\t: sun4u\n");
8207     return 0;
8208 }
8209 #endif
8210 
8211 #if defined(TARGET_HPPA)
8212 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8213 {
8214     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8215     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8216     dprintf(fd, "capabilities\t: os32\n");
8217     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8218     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8219     return 0;
8220 }
8221 #endif
8222 
8223 #if defined(TARGET_M68K)
8224 static int open_hardware(CPUArchState *cpu_env, int fd)
8225 {
8226     dprintf(fd, "Model:\t\tqemu-m68k\n");
8227     return 0;
8228 }
8229 #endif
8230 
8231 static int do_openat(CPUArchState *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8232 {
8233     struct fake_open {
8234         const char *filename;
8235         int (*fill)(CPUArchState *cpu_env, int fd);
8236         int (*cmp)(const char *s1, const char *s2);
8237     };
8238     const struct fake_open *fake_open;
8239     static const struct fake_open fakes[] = {
8240         { "maps", open_self_maps, is_proc_myself },
8241         { "stat", open_self_stat, is_proc_myself },
8242         { "auxv", open_self_auxv, is_proc_myself },
8243         { "cmdline", open_self_cmdline, is_proc_myself },
8244 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8245         { "/proc/net/route", open_net_route, is_proc },
8246 #endif
8247 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8248         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8249 #endif
8250 #if defined(TARGET_M68K)
8251         { "/proc/hardware", open_hardware, is_proc },
8252 #endif
8253         { NULL, NULL, NULL }
8254     };
8255 
8256     if (is_proc_myself(pathname, "exe")) {
8257         int execfd = qemu_getauxval(AT_EXECFD);
8258         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8259     }
8260 
8261     for (fake_open = fakes; fake_open->filename; fake_open++) {
8262         if (fake_open->cmp(pathname, fake_open->filename)) {
8263             break;
8264         }
8265     }
8266 
8267     if (fake_open->filename) {
8268         const char *tmpdir;
8269         char filename[PATH_MAX];
8270         int fd, r;
8271 
8272         /* create temporary file to map stat to */
8273         tmpdir = getenv("TMPDIR");
8274         if (!tmpdir)
8275             tmpdir = "/tmp";
8276         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8277         fd = mkstemp(filename);
8278         if (fd < 0) {
8279             return fd;
8280         }
8281         unlink(filename);
8282 
8283         if ((r = fake_open->fill(cpu_env, fd))) {
8284             int e = errno;
8285             close(fd);
8286             errno = e;
8287             return r;
8288         }
8289         lseek(fd, 0, SEEK_SET);
8290 
8291         return fd;
8292     }
8293 
8294     return safe_openat(dirfd, path(pathname), flags, mode);
8295 }
8296 
8297 #define TIMER_MAGIC 0x0caf0000
8298 #define TIMER_MAGIC_MASK 0xffff0000
8299 
8300 /* Convert QEMU provided timer ID back to internal 16bit index format */
8301 static target_timer_t get_timer_id(abi_long arg)
8302 {
8303     target_timer_t timerid = arg;
8304 
8305     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8306         return -TARGET_EINVAL;
8307     }
8308 
8309     timerid &= 0xffff;
8310 
8311     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8312         return -TARGET_EINVAL;
8313     }
8314 
8315     return timerid;
8316 }
8317 
8318 static int target_to_host_cpu_mask(unsigned long *host_mask,
8319                                    size_t host_size,
8320                                    abi_ulong target_addr,
8321                                    size_t target_size)
8322 {
8323     unsigned target_bits = sizeof(abi_ulong) * 8;
8324     unsigned host_bits = sizeof(*host_mask) * 8;
8325     abi_ulong *target_mask;
8326     unsigned i, j;
8327 
8328     assert(host_size >= target_size);
8329 
8330     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8331     if (!target_mask) {
8332         return -TARGET_EFAULT;
8333     }
8334     memset(host_mask, 0, host_size);
8335 
8336     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8337         unsigned bit = i * target_bits;
8338         abi_ulong val;
8339 
8340         __get_user(val, &target_mask[i]);
8341         for (j = 0; j < target_bits; j++, bit++) {
8342             if (val & (1UL << j)) {
8343                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8344             }
8345         }
8346     }
8347 
8348     unlock_user(target_mask, target_addr, 0);
8349     return 0;
8350 }
8351 
8352 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8353                                    size_t host_size,
8354                                    abi_ulong target_addr,
8355                                    size_t target_size)
8356 {
8357     unsigned target_bits = sizeof(abi_ulong) * 8;
8358     unsigned host_bits = sizeof(*host_mask) * 8;
8359     abi_ulong *target_mask;
8360     unsigned i, j;
8361 
8362     assert(host_size >= target_size);
8363 
8364     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8365     if (!target_mask) {
8366         return -TARGET_EFAULT;
8367     }
8368 
8369     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8370         unsigned bit = i * target_bits;
8371         abi_ulong val = 0;
8372 
8373         for (j = 0; j < target_bits; j++, bit++) {
8374             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8375                 val |= 1UL << j;
8376             }
8377         }
8378         __put_user(val, &target_mask[i]);
8379     }
8380 
8381     unlock_user(target_mask, target_addr, target_size);
8382     return 0;
8383 }
8384 
8385 #ifdef TARGET_NR_getdents
8386 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8387 {
8388     g_autofree void *hdirp = NULL;
8389     void *tdirp;
8390     int hlen, hoff, toff;
8391     int hreclen, treclen;
8392     off64_t prev_diroff = 0;
8393 
8394     hdirp = g_try_malloc(count);
8395     if (!hdirp) {
8396         return -TARGET_ENOMEM;
8397     }
8398 
8399 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8400     hlen = sys_getdents(dirfd, hdirp, count);
8401 #else
8402     hlen = sys_getdents64(dirfd, hdirp, count);
8403 #endif
8404 
8405     hlen = get_errno(hlen);
8406     if (is_error(hlen)) {
8407         return hlen;
8408     }
8409 
8410     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8411     if (!tdirp) {
8412         return -TARGET_EFAULT;
8413     }
8414 
8415     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8416 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8417         struct linux_dirent *hde = hdirp + hoff;
8418 #else
8419         struct linux_dirent64 *hde = hdirp + hoff;
8420 #endif
8421         struct target_dirent *tde = tdirp + toff;
8422         int namelen;
8423         uint8_t type;
8424 
8425         namelen = strlen(hde->d_name);
8426         hreclen = hde->d_reclen;
8427         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8428         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8429 
8430         if (toff + treclen > count) {
8431             /*
8432              * If the host struct is smaller than the target struct, or
8433              * requires less alignment and thus packs into less space,
8434              * then the host can return more entries than we can pass
8435              * on to the guest.
8436              */
8437             if (toff == 0) {
8438                 toff = -TARGET_EINVAL; /* result buffer is too small */
8439                 break;
8440             }
8441             /*
8442              * Return what we have, resetting the file pointer to the
8443              * location of the first record not returned.
8444              */
8445             lseek64(dirfd, prev_diroff, SEEK_SET);
8446             break;
8447         }
8448 
8449         prev_diroff = hde->d_off;
8450         tde->d_ino = tswapal(hde->d_ino);
8451         tde->d_off = tswapal(hde->d_off);
8452         tde->d_reclen = tswap16(treclen);
8453         memcpy(tde->d_name, hde->d_name, namelen + 1);
8454 
8455         /*
8456          * The getdents type is in what was formerly a padding byte at the
8457          * end of the structure.
8458          */
8459 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8460         type = *((uint8_t *)hde + hreclen - 1);
8461 #else
8462         type = hde->d_type;
8463 #endif
8464         *((uint8_t *)tde + treclen - 1) = type;
8465     }
8466 
8467     unlock_user(tdirp, arg2, toff);
8468     return toff;
8469 }
8470 #endif /* TARGET_NR_getdents */
8471 
8472 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8473 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8474 {
8475     g_autofree void *hdirp = NULL;
8476     void *tdirp;
8477     int hlen, hoff, toff;
8478     int hreclen, treclen;
8479     off64_t prev_diroff = 0;
8480 
8481     hdirp = g_try_malloc(count);
8482     if (!hdirp) {
8483         return -TARGET_ENOMEM;
8484     }
8485 
8486     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8487     if (is_error(hlen)) {
8488         return hlen;
8489     }
8490 
8491     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8492     if (!tdirp) {
8493         return -TARGET_EFAULT;
8494     }
8495 
8496     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8497         struct linux_dirent64 *hde = hdirp + hoff;
8498         struct target_dirent64 *tde = tdirp + toff;
8499         int namelen;
8500 
8501         namelen = strlen(hde->d_name) + 1;
8502         hreclen = hde->d_reclen;
8503         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8504         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8505 
8506         if (toff + treclen > count) {
8507             /*
8508              * If the host struct is smaller than the target struct, or
8509              * requires less alignment and thus packs into less space,
8510              * then the host can return more entries than we can pass
8511              * on to the guest.
8512              */
8513             if (toff == 0) {
8514                 toff = -TARGET_EINVAL; /* result buffer is too small */
8515                 break;
8516             }
8517             /*
8518              * Return what we have, resetting the file pointer to the
8519              * location of the first record not returned.
8520              */
8521             lseek64(dirfd, prev_diroff, SEEK_SET);
8522             break;
8523         }
8524 
8525         prev_diroff = hde->d_off;
8526         tde->d_ino = tswap64(hde->d_ino);
8527         tde->d_off = tswap64(hde->d_off);
8528         tde->d_reclen = tswap16(treclen);
8529         tde->d_type = hde->d_type;
8530         memcpy(tde->d_name, hde->d_name, namelen);
8531     }
8532 
8533     unlock_user(tdirp, arg2, toff);
8534     return toff;
8535 }
8536 #endif /* TARGET_NR_getdents64 */
8537 
8538 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8539 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8540 #endif
8541 
8542 /* This is an internal helper for do_syscall so that it is easier
8543  * to have a single return point, so that actions, such as logging
8544  * of syscall results, can be performed.
8545  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8546  */
8547 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
8548                             abi_long arg2, abi_long arg3, abi_long arg4,
8549                             abi_long arg5, abi_long arg6, abi_long arg7,
8550                             abi_long arg8)
8551 {
8552     CPUState *cpu = env_cpu(cpu_env);
8553     abi_long ret;
8554 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8555     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8556     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8557     || defined(TARGET_NR_statx)
8558     struct stat st;
8559 #endif
8560 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8561     || defined(TARGET_NR_fstatfs)
8562     struct statfs stfs;
8563 #endif
8564     void *p;
8565 
8566     switch(num) {
8567     case TARGET_NR_exit:
8568         /* In old applications this may be used to implement _exit(2).
8569            However in threaded applications it is used for thread termination,
8570            and _exit_group is used for application termination.
8571            Do thread termination if we have more then one thread.  */
8572 
8573         if (block_signals()) {
8574             return -QEMU_ERESTARTSYS;
8575         }
8576 
8577         pthread_mutex_lock(&clone_lock);
8578 
8579         if (CPU_NEXT(first_cpu)) {
8580             TaskState *ts = cpu->opaque;
8581 
8582             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8583             object_unref(OBJECT(cpu));
8584             /*
8585              * At this point the CPU should be unrealized and removed
8586              * from cpu lists. We can clean-up the rest of the thread
8587              * data without the lock held.
8588              */
8589 
8590             pthread_mutex_unlock(&clone_lock);
8591 
8592             if (ts->child_tidptr) {
8593                 put_user_u32(0, ts->child_tidptr);
8594                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8595                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8596             }
8597             thread_cpu = NULL;
8598             g_free(ts);
8599             rcu_unregister_thread();
8600             pthread_exit(NULL);
8601         }
8602 
8603         pthread_mutex_unlock(&clone_lock);
8604         preexit_cleanup(cpu_env, arg1);
8605         _exit(arg1);
8606         return 0; /* avoid warning */
8607     case TARGET_NR_read:
8608         if (arg2 == 0 && arg3 == 0) {
8609             return get_errno(safe_read(arg1, 0, 0));
8610         } else {
8611             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8612                 return -TARGET_EFAULT;
8613             ret = get_errno(safe_read(arg1, p, arg3));
8614             if (ret >= 0 &&
8615                 fd_trans_host_to_target_data(arg1)) {
8616                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8617             }
8618             unlock_user(p, arg2, ret);
8619         }
8620         return ret;
8621     case TARGET_NR_write:
8622         if (arg2 == 0 && arg3 == 0) {
8623             return get_errno(safe_write(arg1, 0, 0));
8624         }
8625         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8626             return -TARGET_EFAULT;
8627         if (fd_trans_target_to_host_data(arg1)) {
8628             void *copy = g_malloc(arg3);
8629             memcpy(copy, p, arg3);
8630             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8631             if (ret >= 0) {
8632                 ret = get_errno(safe_write(arg1, copy, ret));
8633             }
8634             g_free(copy);
8635         } else {
8636             ret = get_errno(safe_write(arg1, p, arg3));
8637         }
8638         unlock_user(p, arg2, 0);
8639         return ret;
8640 
8641 #ifdef TARGET_NR_open
8642     case TARGET_NR_open:
8643         if (!(p = lock_user_string(arg1)))
8644             return -TARGET_EFAULT;
8645         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8646                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8647                                   arg3));
8648         fd_trans_unregister(ret);
8649         unlock_user(p, arg1, 0);
8650         return ret;
8651 #endif
8652     case TARGET_NR_openat:
8653         if (!(p = lock_user_string(arg2)))
8654             return -TARGET_EFAULT;
8655         ret = get_errno(do_openat(cpu_env, arg1, p,
8656                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8657                                   arg4));
8658         fd_trans_unregister(ret);
8659         unlock_user(p, arg2, 0);
8660         return ret;
8661 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8662     case TARGET_NR_name_to_handle_at:
8663         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8664         return ret;
8665 #endif
8666 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8667     case TARGET_NR_open_by_handle_at:
8668         ret = do_open_by_handle_at(arg1, arg2, arg3);
8669         fd_trans_unregister(ret);
8670         return ret;
8671 #endif
8672     case TARGET_NR_close:
8673         fd_trans_unregister(arg1);
8674         return get_errno(close(arg1));
8675 
8676     case TARGET_NR_brk:
8677         return do_brk(arg1);
8678 #ifdef TARGET_NR_fork
8679     case TARGET_NR_fork:
8680         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8681 #endif
8682 #ifdef TARGET_NR_waitpid
8683     case TARGET_NR_waitpid:
8684         {
8685             int status;
8686             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8687             if (!is_error(ret) && arg2 && ret
8688                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8689                 return -TARGET_EFAULT;
8690         }
8691         return ret;
8692 #endif
8693 #ifdef TARGET_NR_waitid
8694     case TARGET_NR_waitid:
8695         {
8696             siginfo_t info;
8697             info.si_pid = 0;
8698             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8699             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8700                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8701                     return -TARGET_EFAULT;
8702                 host_to_target_siginfo(p, &info);
8703                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8704             }
8705         }
8706         return ret;
8707 #endif
8708 #ifdef TARGET_NR_creat /* not on alpha */
8709     case TARGET_NR_creat:
8710         if (!(p = lock_user_string(arg1)))
8711             return -TARGET_EFAULT;
8712         ret = get_errno(creat(p, arg2));
8713         fd_trans_unregister(ret);
8714         unlock_user(p, arg1, 0);
8715         return ret;
8716 #endif
8717 #ifdef TARGET_NR_link
8718     case TARGET_NR_link:
8719         {
8720             void * p2;
8721             p = lock_user_string(arg1);
8722             p2 = lock_user_string(arg2);
8723             if (!p || !p2)
8724                 ret = -TARGET_EFAULT;
8725             else
8726                 ret = get_errno(link(p, p2));
8727             unlock_user(p2, arg2, 0);
8728             unlock_user(p, arg1, 0);
8729         }
8730         return ret;
8731 #endif
8732 #if defined(TARGET_NR_linkat)
8733     case TARGET_NR_linkat:
8734         {
8735             void * p2 = NULL;
8736             if (!arg2 || !arg4)
8737                 return -TARGET_EFAULT;
8738             p  = lock_user_string(arg2);
8739             p2 = lock_user_string(arg4);
8740             if (!p || !p2)
8741                 ret = -TARGET_EFAULT;
8742             else
8743                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8744             unlock_user(p, arg2, 0);
8745             unlock_user(p2, arg4, 0);
8746         }
8747         return ret;
8748 #endif
8749 #ifdef TARGET_NR_unlink
8750     case TARGET_NR_unlink:
8751         if (!(p = lock_user_string(arg1)))
8752             return -TARGET_EFAULT;
8753         ret = get_errno(unlink(p));
8754         unlock_user(p, arg1, 0);
8755         return ret;
8756 #endif
8757 #if defined(TARGET_NR_unlinkat)
8758     case TARGET_NR_unlinkat:
8759         if (!(p = lock_user_string(arg2)))
8760             return -TARGET_EFAULT;
8761         ret = get_errno(unlinkat(arg1, p, arg3));
8762         unlock_user(p, arg2, 0);
8763         return ret;
8764 #endif
8765     case TARGET_NR_execve:
8766         {
8767             char **argp, **envp;
8768             int argc, envc;
8769             abi_ulong gp;
8770             abi_ulong guest_argp;
8771             abi_ulong guest_envp;
8772             abi_ulong addr;
8773             char **q;
8774 
8775             argc = 0;
8776             guest_argp = arg2;
8777             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8778                 if (get_user_ual(addr, gp))
8779                     return -TARGET_EFAULT;
8780                 if (!addr)
8781                     break;
8782                 argc++;
8783             }
8784             envc = 0;
8785             guest_envp = arg3;
8786             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8787                 if (get_user_ual(addr, gp))
8788                     return -TARGET_EFAULT;
8789                 if (!addr)
8790                     break;
8791                 envc++;
8792             }
8793 
8794             argp = g_new0(char *, argc + 1);
8795             envp = g_new0(char *, envc + 1);
8796 
8797             for (gp = guest_argp, q = argp; gp;
8798                   gp += sizeof(abi_ulong), q++) {
8799                 if (get_user_ual(addr, gp))
8800                     goto execve_efault;
8801                 if (!addr)
8802                     break;
8803                 if (!(*q = lock_user_string(addr)))
8804                     goto execve_efault;
8805             }
8806             *q = NULL;
8807 
8808             for (gp = guest_envp, q = envp; gp;
8809                   gp += sizeof(abi_ulong), q++) {
8810                 if (get_user_ual(addr, gp))
8811                     goto execve_efault;
8812                 if (!addr)
8813                     break;
8814                 if (!(*q = lock_user_string(addr)))
8815                     goto execve_efault;
8816             }
8817             *q = NULL;
8818 
8819             if (!(p = lock_user_string(arg1)))
8820                 goto execve_efault;
8821             /* Although execve() is not an interruptible syscall it is
8822              * a special case where we must use the safe_syscall wrapper:
8823              * if we allow a signal to happen before we make the host
8824              * syscall then we will 'lose' it, because at the point of
8825              * execve the process leaves QEMU's control. So we use the
8826              * safe syscall wrapper to ensure that we either take the
8827              * signal as a guest signal, or else it does not happen
8828              * before the execve completes and makes it the other
8829              * program's problem.
8830              */
8831             ret = get_errno(safe_execve(p, argp, envp));
8832             unlock_user(p, arg1, 0);
8833 
8834             goto execve_end;
8835 
8836         execve_efault:
8837             ret = -TARGET_EFAULT;
8838 
8839         execve_end:
8840             for (gp = guest_argp, q = argp; *q;
8841                   gp += sizeof(abi_ulong), q++) {
8842                 if (get_user_ual(addr, gp)
8843                     || !addr)
8844                     break;
8845                 unlock_user(*q, addr, 0);
8846             }
8847             for (gp = guest_envp, q = envp; *q;
8848                   gp += sizeof(abi_ulong), q++) {
8849                 if (get_user_ual(addr, gp)
8850                     || !addr)
8851                     break;
8852                 unlock_user(*q, addr, 0);
8853             }
8854 
8855             g_free(argp);
8856             g_free(envp);
8857         }
8858         return ret;
8859     case TARGET_NR_chdir:
8860         if (!(p = lock_user_string(arg1)))
8861             return -TARGET_EFAULT;
8862         ret = get_errno(chdir(p));
8863         unlock_user(p, arg1, 0);
8864         return ret;
8865 #ifdef TARGET_NR_time
8866     case TARGET_NR_time:
8867         {
8868             time_t host_time;
8869             ret = get_errno(time(&host_time));
8870             if (!is_error(ret)
8871                 && arg1
8872                 && put_user_sal(host_time, arg1))
8873                 return -TARGET_EFAULT;
8874         }
8875         return ret;
8876 #endif
8877 #ifdef TARGET_NR_mknod
8878     case TARGET_NR_mknod:
8879         if (!(p = lock_user_string(arg1)))
8880             return -TARGET_EFAULT;
8881         ret = get_errno(mknod(p, arg2, arg3));
8882         unlock_user(p, arg1, 0);
8883         return ret;
8884 #endif
8885 #if defined(TARGET_NR_mknodat)
8886     case TARGET_NR_mknodat:
8887         if (!(p = lock_user_string(arg2)))
8888             return -TARGET_EFAULT;
8889         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8890         unlock_user(p, arg2, 0);
8891         return ret;
8892 #endif
8893 #ifdef TARGET_NR_chmod
8894     case TARGET_NR_chmod:
8895         if (!(p = lock_user_string(arg1)))
8896             return -TARGET_EFAULT;
8897         ret = get_errno(chmod(p, arg2));
8898         unlock_user(p, arg1, 0);
8899         return ret;
8900 #endif
8901 #ifdef TARGET_NR_lseek
8902     case TARGET_NR_lseek:
8903         return get_errno(lseek(arg1, arg2, arg3));
8904 #endif
8905 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8906     /* Alpha specific */
8907     case TARGET_NR_getxpid:
8908         cpu_env->ir[IR_A4] = getppid();
8909         return get_errno(getpid());
8910 #endif
8911 #ifdef TARGET_NR_getpid
8912     case TARGET_NR_getpid:
8913         return get_errno(getpid());
8914 #endif
8915     case TARGET_NR_mount:
8916         {
8917             /* need to look at the data field */
8918             void *p2, *p3;
8919 
8920             if (arg1) {
8921                 p = lock_user_string(arg1);
8922                 if (!p) {
8923                     return -TARGET_EFAULT;
8924                 }
8925             } else {
8926                 p = NULL;
8927             }
8928 
8929             p2 = lock_user_string(arg2);
8930             if (!p2) {
8931                 if (arg1) {
8932                     unlock_user(p, arg1, 0);
8933                 }
8934                 return -TARGET_EFAULT;
8935             }
8936 
8937             if (arg3) {
8938                 p3 = lock_user_string(arg3);
8939                 if (!p3) {
8940                     if (arg1) {
8941                         unlock_user(p, arg1, 0);
8942                     }
8943                     unlock_user(p2, arg2, 0);
8944                     return -TARGET_EFAULT;
8945                 }
8946             } else {
8947                 p3 = NULL;
8948             }
8949 
8950             /* FIXME - arg5 should be locked, but it isn't clear how to
8951              * do that since it's not guaranteed to be a NULL-terminated
8952              * string.
8953              */
8954             if (!arg5) {
8955                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8956             } else {
8957                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8958             }
8959             ret = get_errno(ret);
8960 
8961             if (arg1) {
8962                 unlock_user(p, arg1, 0);
8963             }
8964             unlock_user(p2, arg2, 0);
8965             if (arg3) {
8966                 unlock_user(p3, arg3, 0);
8967             }
8968         }
8969         return ret;
8970 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8971 #if defined(TARGET_NR_umount)
8972     case TARGET_NR_umount:
8973 #endif
8974 #if defined(TARGET_NR_oldumount)
8975     case TARGET_NR_oldumount:
8976 #endif
8977         if (!(p = lock_user_string(arg1)))
8978             return -TARGET_EFAULT;
8979         ret = get_errno(umount(p));
8980         unlock_user(p, arg1, 0);
8981         return ret;
8982 #endif
8983 #ifdef TARGET_NR_stime /* not on alpha */
8984     case TARGET_NR_stime:
8985         {
8986             struct timespec ts;
8987             ts.tv_nsec = 0;
8988             if (get_user_sal(ts.tv_sec, arg1)) {
8989                 return -TARGET_EFAULT;
8990             }
8991             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8992         }
8993 #endif
8994 #ifdef TARGET_NR_alarm /* not on alpha */
8995     case TARGET_NR_alarm:
8996         return alarm(arg1);
8997 #endif
8998 #ifdef TARGET_NR_pause /* not on alpha */
8999     case TARGET_NR_pause:
9000         if (!block_signals()) {
9001             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9002         }
9003         return -TARGET_EINTR;
9004 #endif
9005 #ifdef TARGET_NR_utime
9006     case TARGET_NR_utime:
9007         {
9008             struct utimbuf tbuf, *host_tbuf;
9009             struct target_utimbuf *target_tbuf;
9010             if (arg2) {
9011                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9012                     return -TARGET_EFAULT;
9013                 tbuf.actime = tswapal(target_tbuf->actime);
9014                 tbuf.modtime = tswapal(target_tbuf->modtime);
9015                 unlock_user_struct(target_tbuf, arg2, 0);
9016                 host_tbuf = &tbuf;
9017             } else {
9018                 host_tbuf = NULL;
9019             }
9020             if (!(p = lock_user_string(arg1)))
9021                 return -TARGET_EFAULT;
9022             ret = get_errno(utime(p, host_tbuf));
9023             unlock_user(p, arg1, 0);
9024         }
9025         return ret;
9026 #endif
9027 #ifdef TARGET_NR_utimes
9028     case TARGET_NR_utimes:
9029         {
9030             struct timeval *tvp, tv[2];
9031             if (arg2) {
9032                 if (copy_from_user_timeval(&tv[0], arg2)
9033                     || copy_from_user_timeval(&tv[1],
9034                                               arg2 + sizeof(struct target_timeval)))
9035                     return -TARGET_EFAULT;
9036                 tvp = tv;
9037             } else {
9038                 tvp = NULL;
9039             }
9040             if (!(p = lock_user_string(arg1)))
9041                 return -TARGET_EFAULT;
9042             ret = get_errno(utimes(p, tvp));
9043             unlock_user(p, arg1, 0);
9044         }
9045         return ret;
9046 #endif
9047 #if defined(TARGET_NR_futimesat)
9048     case TARGET_NR_futimesat:
9049         {
9050             struct timeval *tvp, tv[2];
9051             if (arg3) {
9052                 if (copy_from_user_timeval(&tv[0], arg3)
9053                     || copy_from_user_timeval(&tv[1],
9054                                               arg3 + sizeof(struct target_timeval)))
9055                     return -TARGET_EFAULT;
9056                 tvp = tv;
9057             } else {
9058                 tvp = NULL;
9059             }
9060             if (!(p = lock_user_string(arg2))) {
9061                 return -TARGET_EFAULT;
9062             }
9063             ret = get_errno(futimesat(arg1, path(p), tvp));
9064             unlock_user(p, arg2, 0);
9065         }
9066         return ret;
9067 #endif
9068 #ifdef TARGET_NR_access
9069     case TARGET_NR_access:
9070         if (!(p = lock_user_string(arg1))) {
9071             return -TARGET_EFAULT;
9072         }
9073         ret = get_errno(access(path(p), arg2));
9074         unlock_user(p, arg1, 0);
9075         return ret;
9076 #endif
9077 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9078     case TARGET_NR_faccessat:
9079         if (!(p = lock_user_string(arg2))) {
9080             return -TARGET_EFAULT;
9081         }
9082         ret = get_errno(faccessat(arg1, p, arg3, 0));
9083         unlock_user(p, arg2, 0);
9084         return ret;
9085 #endif
9086 #ifdef TARGET_NR_nice /* not on alpha */
9087     case TARGET_NR_nice:
9088         return get_errno(nice(arg1));
9089 #endif
9090     case TARGET_NR_sync:
9091         sync();
9092         return 0;
9093 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9094     case TARGET_NR_syncfs:
9095         return get_errno(syncfs(arg1));
9096 #endif
9097     case TARGET_NR_kill:
9098         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9099 #ifdef TARGET_NR_rename
9100     case TARGET_NR_rename:
9101         {
9102             void *p2;
9103             p = lock_user_string(arg1);
9104             p2 = lock_user_string(arg2);
9105             if (!p || !p2)
9106                 ret = -TARGET_EFAULT;
9107             else
9108                 ret = get_errno(rename(p, p2));
9109             unlock_user(p2, arg2, 0);
9110             unlock_user(p, arg1, 0);
9111         }
9112         return ret;
9113 #endif
9114 #if defined(TARGET_NR_renameat)
9115     case TARGET_NR_renameat:
9116         {
9117             void *p2;
9118             p  = lock_user_string(arg2);
9119             p2 = lock_user_string(arg4);
9120             if (!p || !p2)
9121                 ret = -TARGET_EFAULT;
9122             else
9123                 ret = get_errno(renameat(arg1, p, arg3, p2));
9124             unlock_user(p2, arg4, 0);
9125             unlock_user(p, arg2, 0);
9126         }
9127         return ret;
9128 #endif
9129 #if defined(TARGET_NR_renameat2)
9130     case TARGET_NR_renameat2:
9131         {
9132             void *p2;
9133             p  = lock_user_string(arg2);
9134             p2 = lock_user_string(arg4);
9135             if (!p || !p2) {
9136                 ret = -TARGET_EFAULT;
9137             } else {
9138                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9139             }
9140             unlock_user(p2, arg4, 0);
9141             unlock_user(p, arg2, 0);
9142         }
9143         return ret;
9144 #endif
9145 #ifdef TARGET_NR_mkdir
9146     case TARGET_NR_mkdir:
9147         if (!(p = lock_user_string(arg1)))
9148             return -TARGET_EFAULT;
9149         ret = get_errno(mkdir(p, arg2));
9150         unlock_user(p, arg1, 0);
9151         return ret;
9152 #endif
9153 #if defined(TARGET_NR_mkdirat)
9154     case TARGET_NR_mkdirat:
9155         if (!(p = lock_user_string(arg2)))
9156             return -TARGET_EFAULT;
9157         ret = get_errno(mkdirat(arg1, p, arg3));
9158         unlock_user(p, arg2, 0);
9159         return ret;
9160 #endif
9161 #ifdef TARGET_NR_rmdir
9162     case TARGET_NR_rmdir:
9163         if (!(p = lock_user_string(arg1)))
9164             return -TARGET_EFAULT;
9165         ret = get_errno(rmdir(p));
9166         unlock_user(p, arg1, 0);
9167         return ret;
9168 #endif
9169     case TARGET_NR_dup:
9170         ret = get_errno(dup(arg1));
9171         if (ret >= 0) {
9172             fd_trans_dup(arg1, ret);
9173         }
9174         return ret;
9175 #ifdef TARGET_NR_pipe
9176     case TARGET_NR_pipe:
9177         return do_pipe(cpu_env, arg1, 0, 0);
9178 #endif
9179 #ifdef TARGET_NR_pipe2
9180     case TARGET_NR_pipe2:
9181         return do_pipe(cpu_env, arg1,
9182                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9183 #endif
9184     case TARGET_NR_times:
9185         {
9186             struct target_tms *tmsp;
9187             struct tms tms;
9188             ret = get_errno(times(&tms));
9189             if (arg1) {
9190                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9191                 if (!tmsp)
9192                     return -TARGET_EFAULT;
9193                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9194                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9195                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9196                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9197             }
9198             if (!is_error(ret))
9199                 ret = host_to_target_clock_t(ret);
9200         }
9201         return ret;
9202     case TARGET_NR_acct:
9203         if (arg1 == 0) {
9204             ret = get_errno(acct(NULL));
9205         } else {
9206             if (!(p = lock_user_string(arg1))) {
9207                 return -TARGET_EFAULT;
9208             }
9209             ret = get_errno(acct(path(p)));
9210             unlock_user(p, arg1, 0);
9211         }
9212         return ret;
9213 #ifdef TARGET_NR_umount2
9214     case TARGET_NR_umount2:
9215         if (!(p = lock_user_string(arg1)))
9216             return -TARGET_EFAULT;
9217         ret = get_errno(umount2(p, arg2));
9218         unlock_user(p, arg1, 0);
9219         return ret;
9220 #endif
9221     case TARGET_NR_ioctl:
9222         return do_ioctl(arg1, arg2, arg3);
9223 #ifdef TARGET_NR_fcntl
9224     case TARGET_NR_fcntl:
9225         return do_fcntl(arg1, arg2, arg3);
9226 #endif
9227     case TARGET_NR_setpgid:
9228         return get_errno(setpgid(arg1, arg2));
9229     case TARGET_NR_umask:
9230         return get_errno(umask(arg1));
9231     case TARGET_NR_chroot:
9232         if (!(p = lock_user_string(arg1)))
9233             return -TARGET_EFAULT;
9234         ret = get_errno(chroot(p));
9235         unlock_user(p, arg1, 0);
9236         return ret;
9237 #ifdef TARGET_NR_dup2
9238     case TARGET_NR_dup2:
9239         ret = get_errno(dup2(arg1, arg2));
9240         if (ret >= 0) {
9241             fd_trans_dup(arg1, arg2);
9242         }
9243         return ret;
9244 #endif
9245 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9246     case TARGET_NR_dup3:
9247     {
9248         int host_flags;
9249 
9250         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9251             return -EINVAL;
9252         }
9253         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9254         ret = get_errno(dup3(arg1, arg2, host_flags));
9255         if (ret >= 0) {
9256             fd_trans_dup(arg1, arg2);
9257         }
9258         return ret;
9259     }
9260 #endif
9261 #ifdef TARGET_NR_getppid /* not on alpha */
9262     case TARGET_NR_getppid:
9263         return get_errno(getppid());
9264 #endif
9265 #ifdef TARGET_NR_getpgrp
9266     case TARGET_NR_getpgrp:
9267         return get_errno(getpgrp());
9268 #endif
9269     case TARGET_NR_setsid:
9270         return get_errno(setsid());
9271 #ifdef TARGET_NR_sigaction
9272     case TARGET_NR_sigaction:
9273         {
9274 #if defined(TARGET_MIPS)
9275 	    struct target_sigaction act, oact, *pact, *old_act;
9276 
9277 	    if (arg2) {
9278                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9279                     return -TARGET_EFAULT;
9280 		act._sa_handler = old_act->_sa_handler;
9281 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9282 		act.sa_flags = old_act->sa_flags;
9283 		unlock_user_struct(old_act, arg2, 0);
9284 		pact = &act;
9285 	    } else {
9286 		pact = NULL;
9287 	    }
9288 
9289         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9290 
9291 	    if (!is_error(ret) && arg3) {
9292                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9293                     return -TARGET_EFAULT;
9294 		old_act->_sa_handler = oact._sa_handler;
9295 		old_act->sa_flags = oact.sa_flags;
9296 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9297 		old_act->sa_mask.sig[1] = 0;
9298 		old_act->sa_mask.sig[2] = 0;
9299 		old_act->sa_mask.sig[3] = 0;
9300 		unlock_user_struct(old_act, arg3, 1);
9301 	    }
9302 #else
9303             struct target_old_sigaction *old_act;
9304             struct target_sigaction act, oact, *pact;
9305             if (arg2) {
9306                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9307                     return -TARGET_EFAULT;
9308                 act._sa_handler = old_act->_sa_handler;
9309                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9310                 act.sa_flags = old_act->sa_flags;
9311 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9312                 act.sa_restorer = old_act->sa_restorer;
9313 #endif
9314                 unlock_user_struct(old_act, arg2, 0);
9315                 pact = &act;
9316             } else {
9317                 pact = NULL;
9318             }
9319             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9320             if (!is_error(ret) && arg3) {
9321                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9322                     return -TARGET_EFAULT;
9323                 old_act->_sa_handler = oact._sa_handler;
9324                 old_act->sa_mask = oact.sa_mask.sig[0];
9325                 old_act->sa_flags = oact.sa_flags;
9326 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9327                 old_act->sa_restorer = oact.sa_restorer;
9328 #endif
9329                 unlock_user_struct(old_act, arg3, 1);
9330             }
9331 #endif
9332         }
9333         return ret;
9334 #endif
9335     case TARGET_NR_rt_sigaction:
9336         {
9337             /*
9338              * For Alpha and SPARC this is a 5 argument syscall, with
9339              * a 'restorer' parameter which must be copied into the
9340              * sa_restorer field of the sigaction struct.
9341              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9342              * and arg5 is the sigsetsize.
9343              */
9344 #if defined(TARGET_ALPHA)
9345             target_ulong sigsetsize = arg4;
9346             target_ulong restorer = arg5;
9347 #elif defined(TARGET_SPARC)
9348             target_ulong restorer = arg4;
9349             target_ulong sigsetsize = arg5;
9350 #else
9351             target_ulong sigsetsize = arg4;
9352             target_ulong restorer = 0;
9353 #endif
9354             struct target_sigaction *act = NULL;
9355             struct target_sigaction *oact = NULL;
9356 
9357             if (sigsetsize != sizeof(target_sigset_t)) {
9358                 return -TARGET_EINVAL;
9359             }
9360             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9361                 return -TARGET_EFAULT;
9362             }
9363             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9364                 ret = -TARGET_EFAULT;
9365             } else {
9366                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9367                 if (oact) {
9368                     unlock_user_struct(oact, arg3, 1);
9369                 }
9370             }
9371             if (act) {
9372                 unlock_user_struct(act, arg2, 0);
9373             }
9374         }
9375         return ret;
9376 #ifdef TARGET_NR_sgetmask /* not on alpha */
9377     case TARGET_NR_sgetmask:
9378         {
9379             sigset_t cur_set;
9380             abi_ulong target_set;
9381             ret = do_sigprocmask(0, NULL, &cur_set);
9382             if (!ret) {
9383                 host_to_target_old_sigset(&target_set, &cur_set);
9384                 ret = target_set;
9385             }
9386         }
9387         return ret;
9388 #endif
9389 #ifdef TARGET_NR_ssetmask /* not on alpha */
9390     case TARGET_NR_ssetmask:
9391         {
9392             sigset_t set, oset;
9393             abi_ulong target_set = arg1;
9394             target_to_host_old_sigset(&set, &target_set);
9395             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9396             if (!ret) {
9397                 host_to_target_old_sigset(&target_set, &oset);
9398                 ret = target_set;
9399             }
9400         }
9401         return ret;
9402 #endif
9403 #ifdef TARGET_NR_sigprocmask
9404     case TARGET_NR_sigprocmask:
9405         {
9406 #if defined(TARGET_ALPHA)
9407             sigset_t set, oldset;
9408             abi_ulong mask;
9409             int how;
9410 
9411             switch (arg1) {
9412             case TARGET_SIG_BLOCK:
9413                 how = SIG_BLOCK;
9414                 break;
9415             case TARGET_SIG_UNBLOCK:
9416                 how = SIG_UNBLOCK;
9417                 break;
9418             case TARGET_SIG_SETMASK:
9419                 how = SIG_SETMASK;
9420                 break;
9421             default:
9422                 return -TARGET_EINVAL;
9423             }
9424             mask = arg2;
9425             target_to_host_old_sigset(&set, &mask);
9426 
9427             ret = do_sigprocmask(how, &set, &oldset);
9428             if (!is_error(ret)) {
9429                 host_to_target_old_sigset(&mask, &oldset);
9430                 ret = mask;
9431                 cpu_env->ir[IR_V0] = 0; /* force no error */
9432             }
9433 #else
9434             sigset_t set, oldset, *set_ptr;
9435             int how;
9436 
9437             if (arg2) {
9438                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9439                 if (!p) {
9440                     return -TARGET_EFAULT;
9441                 }
9442                 target_to_host_old_sigset(&set, p);
9443                 unlock_user(p, arg2, 0);
9444                 set_ptr = &set;
9445                 switch (arg1) {
9446                 case TARGET_SIG_BLOCK:
9447                     how = SIG_BLOCK;
9448                     break;
9449                 case TARGET_SIG_UNBLOCK:
9450                     how = SIG_UNBLOCK;
9451                     break;
9452                 case TARGET_SIG_SETMASK:
9453                     how = SIG_SETMASK;
9454                     break;
9455                 default:
9456                     return -TARGET_EINVAL;
9457                 }
9458             } else {
9459                 how = 0;
9460                 set_ptr = NULL;
9461             }
9462             ret = do_sigprocmask(how, set_ptr, &oldset);
9463             if (!is_error(ret) && arg3) {
9464                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9465                     return -TARGET_EFAULT;
9466                 host_to_target_old_sigset(p, &oldset);
9467                 unlock_user(p, arg3, sizeof(target_sigset_t));
9468             }
9469 #endif
9470         }
9471         return ret;
9472 #endif
9473     case TARGET_NR_rt_sigprocmask:
9474         {
9475             int how = arg1;
9476             sigset_t set, oldset, *set_ptr;
9477 
9478             if (arg4 != sizeof(target_sigset_t)) {
9479                 return -TARGET_EINVAL;
9480             }
9481 
9482             if (arg2) {
9483                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9484                 if (!p) {
9485                     return -TARGET_EFAULT;
9486                 }
9487                 target_to_host_sigset(&set, p);
9488                 unlock_user(p, arg2, 0);
9489                 set_ptr = &set;
9490                 switch(how) {
9491                 case TARGET_SIG_BLOCK:
9492                     how = SIG_BLOCK;
9493                     break;
9494                 case TARGET_SIG_UNBLOCK:
9495                     how = SIG_UNBLOCK;
9496                     break;
9497                 case TARGET_SIG_SETMASK:
9498                     how = SIG_SETMASK;
9499                     break;
9500                 default:
9501                     return -TARGET_EINVAL;
9502                 }
9503             } else {
9504                 how = 0;
9505                 set_ptr = NULL;
9506             }
9507             ret = do_sigprocmask(how, set_ptr, &oldset);
9508             if (!is_error(ret) && arg3) {
9509                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9510                     return -TARGET_EFAULT;
9511                 host_to_target_sigset(p, &oldset);
9512                 unlock_user(p, arg3, sizeof(target_sigset_t));
9513             }
9514         }
9515         return ret;
9516 #ifdef TARGET_NR_sigpending
9517     case TARGET_NR_sigpending:
9518         {
9519             sigset_t set;
9520             ret = get_errno(sigpending(&set));
9521             if (!is_error(ret)) {
9522                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9523                     return -TARGET_EFAULT;
9524                 host_to_target_old_sigset(p, &set);
9525                 unlock_user(p, arg1, sizeof(target_sigset_t));
9526             }
9527         }
9528         return ret;
9529 #endif
9530     case TARGET_NR_rt_sigpending:
9531         {
9532             sigset_t set;
9533 
9534             /* Yes, this check is >, not != like most. We follow the kernel's
9535              * logic and it does it like this because it implements
9536              * NR_sigpending through the same code path, and in that case
9537              * the old_sigset_t is smaller in size.
9538              */
9539             if (arg2 > sizeof(target_sigset_t)) {
9540                 return -TARGET_EINVAL;
9541             }
9542 
9543             ret = get_errno(sigpending(&set));
9544             if (!is_error(ret)) {
9545                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9546                     return -TARGET_EFAULT;
9547                 host_to_target_sigset(p, &set);
9548                 unlock_user(p, arg1, sizeof(target_sigset_t));
9549             }
9550         }
9551         return ret;
9552 #ifdef TARGET_NR_sigsuspend
9553     case TARGET_NR_sigsuspend:
9554         {
9555             sigset_t *set;
9556 
9557 #if defined(TARGET_ALPHA)
9558             TaskState *ts = cpu->opaque;
9559             /* target_to_host_old_sigset will bswap back */
9560             abi_ulong mask = tswapal(arg1);
9561             set = &ts->sigsuspend_mask;
9562             target_to_host_old_sigset(set, &mask);
9563 #else
9564             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9565             if (ret != 0) {
9566                 return ret;
9567             }
9568 #endif
9569             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9570             finish_sigsuspend_mask(ret);
9571         }
9572         return ret;
9573 #endif
9574     case TARGET_NR_rt_sigsuspend:
9575         {
9576             sigset_t *set;
9577 
9578             ret = process_sigsuspend_mask(&set, arg1, arg2);
9579             if (ret != 0) {
9580                 return ret;
9581             }
9582             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9583             finish_sigsuspend_mask(ret);
9584         }
9585         return ret;
9586 #ifdef TARGET_NR_rt_sigtimedwait
9587     case TARGET_NR_rt_sigtimedwait:
9588         {
9589             sigset_t set;
9590             struct timespec uts, *puts;
9591             siginfo_t uinfo;
9592 
9593             if (arg4 != sizeof(target_sigset_t)) {
9594                 return -TARGET_EINVAL;
9595             }
9596 
9597             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9598                 return -TARGET_EFAULT;
9599             target_to_host_sigset(&set, p);
9600             unlock_user(p, arg1, 0);
9601             if (arg3) {
9602                 puts = &uts;
9603                 if (target_to_host_timespec(puts, arg3)) {
9604                     return -TARGET_EFAULT;
9605                 }
9606             } else {
9607                 puts = NULL;
9608             }
9609             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9610                                                  SIGSET_T_SIZE));
9611             if (!is_error(ret)) {
9612                 if (arg2) {
9613                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9614                                   0);
9615                     if (!p) {
9616                         return -TARGET_EFAULT;
9617                     }
9618                     host_to_target_siginfo(p, &uinfo);
9619                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9620                 }
9621                 ret = host_to_target_signal(ret);
9622             }
9623         }
9624         return ret;
9625 #endif
9626 #ifdef TARGET_NR_rt_sigtimedwait_time64
9627     case TARGET_NR_rt_sigtimedwait_time64:
9628         {
9629             sigset_t set;
9630             struct timespec uts, *puts;
9631             siginfo_t uinfo;
9632 
9633             if (arg4 != sizeof(target_sigset_t)) {
9634                 return -TARGET_EINVAL;
9635             }
9636 
9637             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9638             if (!p) {
9639                 return -TARGET_EFAULT;
9640             }
9641             target_to_host_sigset(&set, p);
9642             unlock_user(p, arg1, 0);
9643             if (arg3) {
9644                 puts = &uts;
9645                 if (target_to_host_timespec64(puts, arg3)) {
9646                     return -TARGET_EFAULT;
9647                 }
9648             } else {
9649                 puts = NULL;
9650             }
9651             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9652                                                  SIGSET_T_SIZE));
9653             if (!is_error(ret)) {
9654                 if (arg2) {
9655                     p = lock_user(VERIFY_WRITE, arg2,
9656                                   sizeof(target_siginfo_t), 0);
9657                     if (!p) {
9658                         return -TARGET_EFAULT;
9659                     }
9660                     host_to_target_siginfo(p, &uinfo);
9661                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9662                 }
9663                 ret = host_to_target_signal(ret);
9664             }
9665         }
9666         return ret;
9667 #endif
9668     case TARGET_NR_rt_sigqueueinfo:
9669         {
9670             siginfo_t uinfo;
9671 
9672             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9673             if (!p) {
9674                 return -TARGET_EFAULT;
9675             }
9676             target_to_host_siginfo(&uinfo, p);
9677             unlock_user(p, arg3, 0);
9678             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9679         }
9680         return ret;
9681     case TARGET_NR_rt_tgsigqueueinfo:
9682         {
9683             siginfo_t uinfo;
9684 
9685             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9686             if (!p) {
9687                 return -TARGET_EFAULT;
9688             }
9689             target_to_host_siginfo(&uinfo, p);
9690             unlock_user(p, arg4, 0);
9691             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9692         }
9693         return ret;
9694 #ifdef TARGET_NR_sigreturn
9695     case TARGET_NR_sigreturn:
9696         if (block_signals()) {
9697             return -QEMU_ERESTARTSYS;
9698         }
9699         return do_sigreturn(cpu_env);
9700 #endif
9701     case TARGET_NR_rt_sigreturn:
9702         if (block_signals()) {
9703             return -QEMU_ERESTARTSYS;
9704         }
9705         return do_rt_sigreturn(cpu_env);
9706     case TARGET_NR_sethostname:
9707         if (!(p = lock_user_string(arg1)))
9708             return -TARGET_EFAULT;
9709         ret = get_errno(sethostname(p, arg2));
9710         unlock_user(p, arg1, 0);
9711         return ret;
9712 #ifdef TARGET_NR_setrlimit
9713     case TARGET_NR_setrlimit:
9714         {
9715             int resource = target_to_host_resource(arg1);
9716             struct target_rlimit *target_rlim;
9717             struct rlimit rlim;
9718             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9719                 return -TARGET_EFAULT;
9720             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9721             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9722             unlock_user_struct(target_rlim, arg2, 0);
9723             /*
9724              * If we just passed through resource limit settings for memory then
9725              * they would also apply to QEMU's own allocations, and QEMU will
9726              * crash or hang or die if its allocations fail. Ideally we would
9727              * track the guest allocations in QEMU and apply the limits ourselves.
9728              * For now, just tell the guest the call succeeded but don't actually
9729              * limit anything.
9730              */
9731             if (resource != RLIMIT_AS &&
9732                 resource != RLIMIT_DATA &&
9733                 resource != RLIMIT_STACK) {
9734                 return get_errno(setrlimit(resource, &rlim));
9735             } else {
9736                 return 0;
9737             }
9738         }
9739 #endif
9740 #ifdef TARGET_NR_getrlimit
9741     case TARGET_NR_getrlimit:
9742         {
9743             int resource = target_to_host_resource(arg1);
9744             struct target_rlimit *target_rlim;
9745             struct rlimit rlim;
9746 
9747             ret = get_errno(getrlimit(resource, &rlim));
9748             if (!is_error(ret)) {
9749                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9750                     return -TARGET_EFAULT;
9751                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9752                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9753                 unlock_user_struct(target_rlim, arg2, 1);
9754             }
9755         }
9756         return ret;
9757 #endif
9758     case TARGET_NR_getrusage:
9759         {
9760             struct rusage rusage;
9761             ret = get_errno(getrusage(arg1, &rusage));
9762             if (!is_error(ret)) {
9763                 ret = host_to_target_rusage(arg2, &rusage);
9764             }
9765         }
9766         return ret;
9767 #if defined(TARGET_NR_gettimeofday)
9768     case TARGET_NR_gettimeofday:
9769         {
9770             struct timeval tv;
9771             struct timezone tz;
9772 
9773             ret = get_errno(gettimeofday(&tv, &tz));
9774             if (!is_error(ret)) {
9775                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9776                     return -TARGET_EFAULT;
9777                 }
9778                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9779                     return -TARGET_EFAULT;
9780                 }
9781             }
9782         }
9783         return ret;
9784 #endif
9785 #if defined(TARGET_NR_settimeofday)
9786     case TARGET_NR_settimeofday:
9787         {
9788             struct timeval tv, *ptv = NULL;
9789             struct timezone tz, *ptz = NULL;
9790 
9791             if (arg1) {
9792                 if (copy_from_user_timeval(&tv, arg1)) {
9793                     return -TARGET_EFAULT;
9794                 }
9795                 ptv = &tv;
9796             }
9797 
9798             if (arg2) {
9799                 if (copy_from_user_timezone(&tz, arg2)) {
9800                     return -TARGET_EFAULT;
9801                 }
9802                 ptz = &tz;
9803             }
9804 
9805             return get_errno(settimeofday(ptv, ptz));
9806         }
9807 #endif
9808 #if defined(TARGET_NR_select)
9809     case TARGET_NR_select:
9810 #if defined(TARGET_WANT_NI_OLD_SELECT)
9811         /* some architectures used to have old_select here
9812          * but now ENOSYS it.
9813          */
9814         ret = -TARGET_ENOSYS;
9815 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9816         ret = do_old_select(arg1);
9817 #else
9818         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9819 #endif
9820         return ret;
9821 #endif
9822 #ifdef TARGET_NR_pselect6
9823     case TARGET_NR_pselect6:
9824         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9825 #endif
9826 #ifdef TARGET_NR_pselect6_time64
9827     case TARGET_NR_pselect6_time64:
9828         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9829 #endif
9830 #ifdef TARGET_NR_symlink
9831     case TARGET_NR_symlink:
9832         {
9833             void *p2;
9834             p = lock_user_string(arg1);
9835             p2 = lock_user_string(arg2);
9836             if (!p || !p2)
9837                 ret = -TARGET_EFAULT;
9838             else
9839                 ret = get_errno(symlink(p, p2));
9840             unlock_user(p2, arg2, 0);
9841             unlock_user(p, arg1, 0);
9842         }
9843         return ret;
9844 #endif
9845 #if defined(TARGET_NR_symlinkat)
9846     case TARGET_NR_symlinkat:
9847         {
9848             void *p2;
9849             p  = lock_user_string(arg1);
9850             p2 = lock_user_string(arg3);
9851             if (!p || !p2)
9852                 ret = -TARGET_EFAULT;
9853             else
9854                 ret = get_errno(symlinkat(p, arg2, p2));
9855             unlock_user(p2, arg3, 0);
9856             unlock_user(p, arg1, 0);
9857         }
9858         return ret;
9859 #endif
9860 #ifdef TARGET_NR_readlink
9861     case TARGET_NR_readlink:
9862         {
9863             void *p2;
9864             p = lock_user_string(arg1);
9865             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9866             if (!p || !p2) {
9867                 ret = -TARGET_EFAULT;
9868             } else if (!arg3) {
9869                 /* Short circuit this for the magic exe check. */
9870                 ret = -TARGET_EINVAL;
9871             } else if (is_proc_myself((const char *)p, "exe")) {
9872                 char real[PATH_MAX], *temp;
9873                 temp = realpath(exec_path, real);
9874                 /* Return value is # of bytes that we wrote to the buffer. */
9875                 if (temp == NULL) {
9876                     ret = get_errno(-1);
9877                 } else {
9878                     /* Don't worry about sign mismatch as earlier mapping
9879                      * logic would have thrown a bad address error. */
9880                     ret = MIN(strlen(real), arg3);
9881                     /* We cannot NUL terminate the string. */
9882                     memcpy(p2, real, ret);
9883                 }
9884             } else {
9885                 ret = get_errno(readlink(path(p), p2, arg3));
9886             }
9887             unlock_user(p2, arg2, ret);
9888             unlock_user(p, arg1, 0);
9889         }
9890         return ret;
9891 #endif
9892 #if defined(TARGET_NR_readlinkat)
9893     case TARGET_NR_readlinkat:
9894         {
9895             void *p2;
9896             p  = lock_user_string(arg2);
9897             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9898             if (!p || !p2) {
9899                 ret = -TARGET_EFAULT;
9900             } else if (is_proc_myself((const char *)p, "exe")) {
9901                 char real[PATH_MAX], *temp;
9902                 temp = realpath(exec_path, real);
9903                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9904                 snprintf((char *)p2, arg4, "%s", real);
9905             } else {
9906                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9907             }
9908             unlock_user(p2, arg3, ret);
9909             unlock_user(p, arg2, 0);
9910         }
9911         return ret;
9912 #endif
9913 #ifdef TARGET_NR_swapon
9914     case TARGET_NR_swapon:
9915         if (!(p = lock_user_string(arg1)))
9916             return -TARGET_EFAULT;
9917         ret = get_errno(swapon(p, arg2));
9918         unlock_user(p, arg1, 0);
9919         return ret;
9920 #endif
9921     case TARGET_NR_reboot:
9922         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9923            /* arg4 must be ignored in all other cases */
9924            p = lock_user_string(arg4);
9925            if (!p) {
9926                return -TARGET_EFAULT;
9927            }
9928            ret = get_errno(reboot(arg1, arg2, arg3, p));
9929            unlock_user(p, arg4, 0);
9930         } else {
9931            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9932         }
9933         return ret;
9934 #ifdef TARGET_NR_mmap
9935     case TARGET_NR_mmap:
9936 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9937     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9938     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9939     || defined(TARGET_S390X)
9940         {
9941             abi_ulong *v;
9942             abi_ulong v1, v2, v3, v4, v5, v6;
9943             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9944                 return -TARGET_EFAULT;
9945             v1 = tswapal(v[0]);
9946             v2 = tswapal(v[1]);
9947             v3 = tswapal(v[2]);
9948             v4 = tswapal(v[3]);
9949             v5 = tswapal(v[4]);
9950             v6 = tswapal(v[5]);
9951             unlock_user(v, arg1, 0);
9952             ret = get_errno(target_mmap(v1, v2, v3,
9953                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9954                                         v5, v6));
9955         }
9956 #else
9957         /* mmap pointers are always untagged */
9958         ret = get_errno(target_mmap(arg1, arg2, arg3,
9959                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9960                                     arg5,
9961                                     arg6));
9962 #endif
9963         return ret;
9964 #endif
9965 #ifdef TARGET_NR_mmap2
9966     case TARGET_NR_mmap2:
9967 #ifndef MMAP_SHIFT
9968 #define MMAP_SHIFT 12
9969 #endif
9970         ret = target_mmap(arg1, arg2, arg3,
9971                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9972                           arg5, arg6 << MMAP_SHIFT);
9973         return get_errno(ret);
9974 #endif
9975     case TARGET_NR_munmap:
9976         arg1 = cpu_untagged_addr(cpu, arg1);
9977         return get_errno(target_munmap(arg1, arg2));
9978     case TARGET_NR_mprotect:
9979         arg1 = cpu_untagged_addr(cpu, arg1);
9980         {
9981             TaskState *ts = cpu->opaque;
9982             /* Special hack to detect libc making the stack executable.  */
9983             if ((arg3 & PROT_GROWSDOWN)
9984                 && arg1 >= ts->info->stack_limit
9985                 && arg1 <= ts->info->start_stack) {
9986                 arg3 &= ~PROT_GROWSDOWN;
9987                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9988                 arg1 = ts->info->stack_limit;
9989             }
9990         }
9991         return get_errno(target_mprotect(arg1, arg2, arg3));
9992 #ifdef TARGET_NR_mremap
9993     case TARGET_NR_mremap:
9994         arg1 = cpu_untagged_addr(cpu, arg1);
9995         /* mremap new_addr (arg5) is always untagged */
9996         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9997 #endif
9998         /* ??? msync/mlock/munlock are broken for softmmu.  */
9999 #ifdef TARGET_NR_msync
10000     case TARGET_NR_msync:
10001         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
10002 #endif
10003 #ifdef TARGET_NR_mlock
10004     case TARGET_NR_mlock:
10005         return get_errno(mlock(g2h(cpu, arg1), arg2));
10006 #endif
10007 #ifdef TARGET_NR_munlock
10008     case TARGET_NR_munlock:
10009         return get_errno(munlock(g2h(cpu, arg1), arg2));
10010 #endif
10011 #ifdef TARGET_NR_mlockall
10012     case TARGET_NR_mlockall:
10013         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10014 #endif
10015 #ifdef TARGET_NR_munlockall
10016     case TARGET_NR_munlockall:
10017         return get_errno(munlockall());
10018 #endif
10019 #ifdef TARGET_NR_truncate
10020     case TARGET_NR_truncate:
10021         if (!(p = lock_user_string(arg1)))
10022             return -TARGET_EFAULT;
10023         ret = get_errno(truncate(p, arg2));
10024         unlock_user(p, arg1, 0);
10025         return ret;
10026 #endif
10027 #ifdef TARGET_NR_ftruncate
10028     case TARGET_NR_ftruncate:
10029         return get_errno(ftruncate(arg1, arg2));
10030 #endif
10031     case TARGET_NR_fchmod:
10032         return get_errno(fchmod(arg1, arg2));
10033 #if defined(TARGET_NR_fchmodat)
10034     case TARGET_NR_fchmodat:
10035         if (!(p = lock_user_string(arg2)))
10036             return -TARGET_EFAULT;
10037         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10038         unlock_user(p, arg2, 0);
10039         return ret;
10040 #endif
10041     case TARGET_NR_getpriority:
10042         /* Note that negative values are valid for getpriority, so we must
10043            differentiate based on errno settings.  */
10044         errno = 0;
10045         ret = getpriority(arg1, arg2);
10046         if (ret == -1 && errno != 0) {
10047             return -host_to_target_errno(errno);
10048         }
10049 #ifdef TARGET_ALPHA
10050         /* Return value is the unbiased priority.  Signal no error.  */
10051         cpu_env->ir[IR_V0] = 0;
10052 #else
10053         /* Return value is a biased priority to avoid negative numbers.  */
10054         ret = 20 - ret;
10055 #endif
10056         return ret;
10057     case TARGET_NR_setpriority:
10058         return get_errno(setpriority(arg1, arg2, arg3));
10059 #ifdef TARGET_NR_statfs
10060     case TARGET_NR_statfs:
10061         if (!(p = lock_user_string(arg1))) {
10062             return -TARGET_EFAULT;
10063         }
10064         ret = get_errno(statfs(path(p), &stfs));
10065         unlock_user(p, arg1, 0);
10066     convert_statfs:
10067         if (!is_error(ret)) {
10068             struct target_statfs *target_stfs;
10069 
10070             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10071                 return -TARGET_EFAULT;
10072             __put_user(stfs.f_type, &target_stfs->f_type);
10073             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10074             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10075             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10076             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10077             __put_user(stfs.f_files, &target_stfs->f_files);
10078             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10079             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10080             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10081             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10082             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10083 #ifdef _STATFS_F_FLAGS
10084             __put_user(stfs.f_flags, &target_stfs->f_flags);
10085 #else
10086             __put_user(0, &target_stfs->f_flags);
10087 #endif
10088             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10089             unlock_user_struct(target_stfs, arg2, 1);
10090         }
10091         return ret;
10092 #endif
10093 #ifdef TARGET_NR_fstatfs
10094     case TARGET_NR_fstatfs:
10095         ret = get_errno(fstatfs(arg1, &stfs));
10096         goto convert_statfs;
10097 #endif
10098 #ifdef TARGET_NR_statfs64
10099     case TARGET_NR_statfs64:
10100         if (!(p = lock_user_string(arg1))) {
10101             return -TARGET_EFAULT;
10102         }
10103         ret = get_errno(statfs(path(p), &stfs));
10104         unlock_user(p, arg1, 0);
10105     convert_statfs64:
10106         if (!is_error(ret)) {
10107             struct target_statfs64 *target_stfs;
10108 
10109             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10110                 return -TARGET_EFAULT;
10111             __put_user(stfs.f_type, &target_stfs->f_type);
10112             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10113             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10114             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10115             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10116             __put_user(stfs.f_files, &target_stfs->f_files);
10117             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10118             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10119             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10120             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10121             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10122 #ifdef _STATFS_F_FLAGS
10123             __put_user(stfs.f_flags, &target_stfs->f_flags);
10124 #else
10125             __put_user(0, &target_stfs->f_flags);
10126 #endif
10127             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10128             unlock_user_struct(target_stfs, arg3, 1);
10129         }
10130         return ret;
10131     case TARGET_NR_fstatfs64:
10132         ret = get_errno(fstatfs(arg1, &stfs));
10133         goto convert_statfs64;
10134 #endif
10135 #ifdef TARGET_NR_socketcall
10136     case TARGET_NR_socketcall:
10137         return do_socketcall(arg1, arg2);
10138 #endif
10139 #ifdef TARGET_NR_accept
10140     case TARGET_NR_accept:
10141         return do_accept4(arg1, arg2, arg3, 0);
10142 #endif
10143 #ifdef TARGET_NR_accept4
10144     case TARGET_NR_accept4:
10145         return do_accept4(arg1, arg2, arg3, arg4);
10146 #endif
10147 #ifdef TARGET_NR_bind
10148     case TARGET_NR_bind:
10149         return do_bind(arg1, arg2, arg3);
10150 #endif
10151 #ifdef TARGET_NR_connect
10152     case TARGET_NR_connect:
10153         return do_connect(arg1, arg2, arg3);
10154 #endif
10155 #ifdef TARGET_NR_getpeername
10156     case TARGET_NR_getpeername:
10157         return do_getpeername(arg1, arg2, arg3);
10158 #endif
10159 #ifdef TARGET_NR_getsockname
10160     case TARGET_NR_getsockname:
10161         return do_getsockname(arg1, arg2, arg3);
10162 #endif
10163 #ifdef TARGET_NR_getsockopt
10164     case TARGET_NR_getsockopt:
10165         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10166 #endif
10167 #ifdef TARGET_NR_listen
10168     case TARGET_NR_listen:
10169         return get_errno(listen(arg1, arg2));
10170 #endif
10171 #ifdef TARGET_NR_recv
10172     case TARGET_NR_recv:
10173         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10174 #endif
10175 #ifdef TARGET_NR_recvfrom
10176     case TARGET_NR_recvfrom:
10177         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10178 #endif
10179 #ifdef TARGET_NR_recvmsg
10180     case TARGET_NR_recvmsg:
10181         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10182 #endif
10183 #ifdef TARGET_NR_send
10184     case TARGET_NR_send:
10185         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10186 #endif
10187 #ifdef TARGET_NR_sendmsg
10188     case TARGET_NR_sendmsg:
10189         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10190 #endif
10191 #ifdef TARGET_NR_sendmmsg
10192     case TARGET_NR_sendmmsg:
10193         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10194 #endif
10195 #ifdef TARGET_NR_recvmmsg
10196     case TARGET_NR_recvmmsg:
10197         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10198 #endif
10199 #ifdef TARGET_NR_sendto
10200     case TARGET_NR_sendto:
10201         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10202 #endif
10203 #ifdef TARGET_NR_shutdown
10204     case TARGET_NR_shutdown:
10205         return get_errno(shutdown(arg1, arg2));
10206 #endif
10207 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10208     case TARGET_NR_getrandom:
10209         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10210         if (!p) {
10211             return -TARGET_EFAULT;
10212         }
10213         ret = get_errno(getrandom(p, arg2, arg3));
10214         unlock_user(p, arg1, ret);
10215         return ret;
10216 #endif
10217 #ifdef TARGET_NR_socket
10218     case TARGET_NR_socket:
10219         return do_socket(arg1, arg2, arg3);
10220 #endif
10221 #ifdef TARGET_NR_socketpair
10222     case TARGET_NR_socketpair:
10223         return do_socketpair(arg1, arg2, arg3, arg4);
10224 #endif
10225 #ifdef TARGET_NR_setsockopt
10226     case TARGET_NR_setsockopt:
10227         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10228 #endif
10229 #if defined(TARGET_NR_syslog)
10230     case TARGET_NR_syslog:
10231         {
10232             int len = arg2;
10233 
10234             switch (arg1) {
10235             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10236             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10237             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10238             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10239             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10240             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10241             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10242             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10243                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10244             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10245             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10246             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10247                 {
10248                     if (len < 0) {
10249                         return -TARGET_EINVAL;
10250                     }
10251                     if (len == 0) {
10252                         return 0;
10253                     }
10254                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10255                     if (!p) {
10256                         return -TARGET_EFAULT;
10257                     }
10258                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10259                     unlock_user(p, arg2, arg3);
10260                 }
10261                 return ret;
10262             default:
10263                 return -TARGET_EINVAL;
10264             }
10265         }
10266         break;
10267 #endif
10268     case TARGET_NR_setitimer:
10269         {
10270             struct itimerval value, ovalue, *pvalue;
10271 
10272             if (arg2) {
10273                 pvalue = &value;
10274                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10275                     || copy_from_user_timeval(&pvalue->it_value,
10276                                               arg2 + sizeof(struct target_timeval)))
10277                     return -TARGET_EFAULT;
10278             } else {
10279                 pvalue = NULL;
10280             }
10281             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10282             if (!is_error(ret) && arg3) {
10283                 if (copy_to_user_timeval(arg3,
10284                                          &ovalue.it_interval)
10285                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10286                                             &ovalue.it_value))
10287                     return -TARGET_EFAULT;
10288             }
10289         }
10290         return ret;
10291     case TARGET_NR_getitimer:
10292         {
10293             struct itimerval value;
10294 
10295             ret = get_errno(getitimer(arg1, &value));
10296             if (!is_error(ret) && arg2) {
10297                 if (copy_to_user_timeval(arg2,
10298                                          &value.it_interval)
10299                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10300                                             &value.it_value))
10301                     return -TARGET_EFAULT;
10302             }
10303         }
10304         return ret;
10305 #ifdef TARGET_NR_stat
10306     case TARGET_NR_stat:
10307         if (!(p = lock_user_string(arg1))) {
10308             return -TARGET_EFAULT;
10309         }
10310         ret = get_errno(stat(path(p), &st));
10311         unlock_user(p, arg1, 0);
10312         goto do_stat;
10313 #endif
10314 #ifdef TARGET_NR_lstat
10315     case TARGET_NR_lstat:
10316         if (!(p = lock_user_string(arg1))) {
10317             return -TARGET_EFAULT;
10318         }
10319         ret = get_errno(lstat(path(p), &st));
10320         unlock_user(p, arg1, 0);
10321         goto do_stat;
10322 #endif
10323 #ifdef TARGET_NR_fstat
10324     case TARGET_NR_fstat:
10325         {
10326             ret = get_errno(fstat(arg1, &st));
10327 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10328         do_stat:
10329 #endif
10330             if (!is_error(ret)) {
10331                 struct target_stat *target_st;
10332 
10333                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10334                     return -TARGET_EFAULT;
10335                 memset(target_st, 0, sizeof(*target_st));
10336                 __put_user(st.st_dev, &target_st->st_dev);
10337                 __put_user(st.st_ino, &target_st->st_ino);
10338                 __put_user(st.st_mode, &target_st->st_mode);
10339                 __put_user(st.st_uid, &target_st->st_uid);
10340                 __put_user(st.st_gid, &target_st->st_gid);
10341                 __put_user(st.st_nlink, &target_st->st_nlink);
10342                 __put_user(st.st_rdev, &target_st->st_rdev);
10343                 __put_user(st.st_size, &target_st->st_size);
10344                 __put_user(st.st_blksize, &target_st->st_blksize);
10345                 __put_user(st.st_blocks, &target_st->st_blocks);
10346                 __put_user(st.st_atime, &target_st->target_st_atime);
10347                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10348                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10349 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10350                 __put_user(st.st_atim.tv_nsec,
10351                            &target_st->target_st_atime_nsec);
10352                 __put_user(st.st_mtim.tv_nsec,
10353                            &target_st->target_st_mtime_nsec);
10354                 __put_user(st.st_ctim.tv_nsec,
10355                            &target_st->target_st_ctime_nsec);
10356 #endif
10357                 unlock_user_struct(target_st, arg2, 1);
10358             }
10359         }
10360         return ret;
10361 #endif
10362     case TARGET_NR_vhangup:
10363         return get_errno(vhangup());
10364 #ifdef TARGET_NR_syscall
10365     case TARGET_NR_syscall:
10366         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10367                           arg6, arg7, arg8, 0);
10368 #endif
10369 #if defined(TARGET_NR_wait4)
10370     case TARGET_NR_wait4:
10371         {
10372             int status;
10373             abi_long status_ptr = arg2;
10374             struct rusage rusage, *rusage_ptr;
10375             abi_ulong target_rusage = arg4;
10376             abi_long rusage_err;
10377             if (target_rusage)
10378                 rusage_ptr = &rusage;
10379             else
10380                 rusage_ptr = NULL;
10381             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10382             if (!is_error(ret)) {
10383                 if (status_ptr && ret) {
10384                     status = host_to_target_waitstatus(status);
10385                     if (put_user_s32(status, status_ptr))
10386                         return -TARGET_EFAULT;
10387                 }
10388                 if (target_rusage) {
10389                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10390                     if (rusage_err) {
10391                         ret = rusage_err;
10392                     }
10393                 }
10394             }
10395         }
10396         return ret;
10397 #endif
10398 #ifdef TARGET_NR_swapoff
10399     case TARGET_NR_swapoff:
10400         if (!(p = lock_user_string(arg1)))
10401             return -TARGET_EFAULT;
10402         ret = get_errno(swapoff(p));
10403         unlock_user(p, arg1, 0);
10404         return ret;
10405 #endif
10406     case TARGET_NR_sysinfo:
10407         {
10408             struct target_sysinfo *target_value;
10409             struct sysinfo value;
10410             ret = get_errno(sysinfo(&value));
10411             if (!is_error(ret) && arg1)
10412             {
10413                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10414                     return -TARGET_EFAULT;
10415                 __put_user(value.uptime, &target_value->uptime);
10416                 __put_user(value.loads[0], &target_value->loads[0]);
10417                 __put_user(value.loads[1], &target_value->loads[1]);
10418                 __put_user(value.loads[2], &target_value->loads[2]);
10419                 __put_user(value.totalram, &target_value->totalram);
10420                 __put_user(value.freeram, &target_value->freeram);
10421                 __put_user(value.sharedram, &target_value->sharedram);
10422                 __put_user(value.bufferram, &target_value->bufferram);
10423                 __put_user(value.totalswap, &target_value->totalswap);
10424                 __put_user(value.freeswap, &target_value->freeswap);
10425                 __put_user(value.procs, &target_value->procs);
10426                 __put_user(value.totalhigh, &target_value->totalhigh);
10427                 __put_user(value.freehigh, &target_value->freehigh);
10428                 __put_user(value.mem_unit, &target_value->mem_unit);
10429                 unlock_user_struct(target_value, arg1, 1);
10430             }
10431         }
10432         return ret;
10433 #ifdef TARGET_NR_ipc
10434     case TARGET_NR_ipc:
10435         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10436 #endif
10437 #ifdef TARGET_NR_semget
10438     case TARGET_NR_semget:
10439         return get_errno(semget(arg1, arg2, arg3));
10440 #endif
10441 #ifdef TARGET_NR_semop
10442     case TARGET_NR_semop:
10443         return do_semtimedop(arg1, arg2, arg3, 0, false);
10444 #endif
10445 #ifdef TARGET_NR_semtimedop
10446     case TARGET_NR_semtimedop:
10447         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10448 #endif
10449 #ifdef TARGET_NR_semtimedop_time64
10450     case TARGET_NR_semtimedop_time64:
10451         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10452 #endif
10453 #ifdef TARGET_NR_semctl
10454     case TARGET_NR_semctl:
10455         return do_semctl(arg1, arg2, arg3, arg4);
10456 #endif
10457 #ifdef TARGET_NR_msgctl
10458     case TARGET_NR_msgctl:
10459         return do_msgctl(arg1, arg2, arg3);
10460 #endif
10461 #ifdef TARGET_NR_msgget
10462     case TARGET_NR_msgget:
10463         return get_errno(msgget(arg1, arg2));
10464 #endif
10465 #ifdef TARGET_NR_msgrcv
10466     case TARGET_NR_msgrcv:
10467         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10468 #endif
10469 #ifdef TARGET_NR_msgsnd
10470     case TARGET_NR_msgsnd:
10471         return do_msgsnd(arg1, arg2, arg3, arg4);
10472 #endif
10473 #ifdef TARGET_NR_shmget
10474     case TARGET_NR_shmget:
10475         return get_errno(shmget(arg1, arg2, arg3));
10476 #endif
10477 #ifdef TARGET_NR_shmctl
10478     case TARGET_NR_shmctl:
10479         return do_shmctl(arg1, arg2, arg3);
10480 #endif
10481 #ifdef TARGET_NR_shmat
10482     case TARGET_NR_shmat:
10483         return do_shmat(cpu_env, arg1, arg2, arg3);
10484 #endif
10485 #ifdef TARGET_NR_shmdt
10486     case TARGET_NR_shmdt:
10487         return do_shmdt(arg1);
10488 #endif
10489     case TARGET_NR_fsync:
10490         return get_errno(fsync(arg1));
10491     case TARGET_NR_clone:
10492         /* Linux manages to have three different orderings for its
10493          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10494          * match the kernel's CONFIG_CLONE_* settings.
10495          * Microblaze is further special in that it uses a sixth
10496          * implicit argument to clone for the TLS pointer.
10497          */
10498 #if defined(TARGET_MICROBLAZE)
10499         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10500 #elif defined(TARGET_CLONE_BACKWARDS)
10501         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10502 #elif defined(TARGET_CLONE_BACKWARDS2)
10503         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10504 #else
10505         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10506 #endif
10507         return ret;
10508 #ifdef __NR_exit_group
10509         /* new thread calls */
10510     case TARGET_NR_exit_group:
10511         preexit_cleanup(cpu_env, arg1);
10512         return get_errno(exit_group(arg1));
10513 #endif
10514     case TARGET_NR_setdomainname:
10515         if (!(p = lock_user_string(arg1)))
10516             return -TARGET_EFAULT;
10517         ret = get_errno(setdomainname(p, arg2));
10518         unlock_user(p, arg1, 0);
10519         return ret;
10520     case TARGET_NR_uname:
10521         /* no need to transcode because we use the linux syscall */
10522         {
10523             struct new_utsname * buf;
10524 
10525             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10526                 return -TARGET_EFAULT;
10527             ret = get_errno(sys_uname(buf));
10528             if (!is_error(ret)) {
10529                 /* Overwrite the native machine name with whatever is being
10530                    emulated. */
10531                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10532                           sizeof(buf->machine));
10533                 /* Allow the user to override the reported release.  */
10534                 if (qemu_uname_release && *qemu_uname_release) {
10535                     g_strlcpy(buf->release, qemu_uname_release,
10536                               sizeof(buf->release));
10537                 }
10538             }
10539             unlock_user_struct(buf, arg1, 1);
10540         }
10541         return ret;
10542 #ifdef TARGET_I386
10543     case TARGET_NR_modify_ldt:
10544         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10545 #if !defined(TARGET_X86_64)
10546     case TARGET_NR_vm86:
10547         return do_vm86(cpu_env, arg1, arg2);
10548 #endif
10549 #endif
10550 #if defined(TARGET_NR_adjtimex)
10551     case TARGET_NR_adjtimex:
10552         {
10553             struct timex host_buf;
10554 
10555             if (target_to_host_timex(&host_buf, arg1) != 0) {
10556                 return -TARGET_EFAULT;
10557             }
10558             ret = get_errno(adjtimex(&host_buf));
10559             if (!is_error(ret)) {
10560                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10561                     return -TARGET_EFAULT;
10562                 }
10563             }
10564         }
10565         return ret;
10566 #endif
10567 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10568     case TARGET_NR_clock_adjtime:
10569         {
10570             struct timex htx, *phtx = &htx;
10571 
10572             if (target_to_host_timex(phtx, arg2) != 0) {
10573                 return -TARGET_EFAULT;
10574             }
10575             ret = get_errno(clock_adjtime(arg1, phtx));
10576             if (!is_error(ret) && phtx) {
10577                 if (host_to_target_timex(arg2, phtx) != 0) {
10578                     return -TARGET_EFAULT;
10579                 }
10580             }
10581         }
10582         return ret;
10583 #endif
10584 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10585     case TARGET_NR_clock_adjtime64:
10586         {
10587             struct timex htx;
10588 
10589             if (target_to_host_timex64(&htx, arg2) != 0) {
10590                 return -TARGET_EFAULT;
10591             }
10592             ret = get_errno(clock_adjtime(arg1, &htx));
10593             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10594                     return -TARGET_EFAULT;
10595             }
10596         }
10597         return ret;
10598 #endif
10599     case TARGET_NR_getpgid:
10600         return get_errno(getpgid(arg1));
10601     case TARGET_NR_fchdir:
10602         return get_errno(fchdir(arg1));
10603     case TARGET_NR_personality:
10604         return get_errno(personality(arg1));
10605 #ifdef TARGET_NR__llseek /* Not on alpha */
10606     case TARGET_NR__llseek:
10607         {
10608             int64_t res;
10609 #if !defined(__NR_llseek)
10610             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10611             if (res == -1) {
10612                 ret = get_errno(res);
10613             } else {
10614                 ret = 0;
10615             }
10616 #else
10617             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10618 #endif
10619             if ((ret == 0) && put_user_s64(res, arg4)) {
10620                 return -TARGET_EFAULT;
10621             }
10622         }
10623         return ret;
10624 #endif
10625 #ifdef TARGET_NR_getdents
10626     case TARGET_NR_getdents:
10627         return do_getdents(arg1, arg2, arg3);
10628 #endif /* TARGET_NR_getdents */
10629 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10630     case TARGET_NR_getdents64:
10631         return do_getdents64(arg1, arg2, arg3);
10632 #endif /* TARGET_NR_getdents64 */
10633 #if defined(TARGET_NR__newselect)
10634     case TARGET_NR__newselect:
10635         return do_select(arg1, arg2, arg3, arg4, arg5);
10636 #endif
10637 #ifdef TARGET_NR_poll
10638     case TARGET_NR_poll:
10639         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10640 #endif
10641 #ifdef TARGET_NR_ppoll
10642     case TARGET_NR_ppoll:
10643         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10644 #endif
10645 #ifdef TARGET_NR_ppoll_time64
10646     case TARGET_NR_ppoll_time64:
10647         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10648 #endif
10649     case TARGET_NR_flock:
10650         /* NOTE: the flock constant seems to be the same for every
10651            Linux platform */
10652         return get_errno(safe_flock(arg1, arg2));
10653     case TARGET_NR_readv:
10654         {
10655             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10656             if (vec != NULL) {
10657                 ret = get_errno(safe_readv(arg1, vec, arg3));
10658                 unlock_iovec(vec, arg2, arg3, 1);
10659             } else {
10660                 ret = -host_to_target_errno(errno);
10661             }
10662         }
10663         return ret;
10664     case TARGET_NR_writev:
10665         {
10666             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10667             if (vec != NULL) {
10668                 ret = get_errno(safe_writev(arg1, vec, arg3));
10669                 unlock_iovec(vec, arg2, arg3, 0);
10670             } else {
10671                 ret = -host_to_target_errno(errno);
10672             }
10673         }
10674         return ret;
10675 #if defined(TARGET_NR_preadv)
10676     case TARGET_NR_preadv:
10677         {
10678             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10679             if (vec != NULL) {
10680                 unsigned long low, high;
10681 
10682                 target_to_host_low_high(arg4, arg5, &low, &high);
10683                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10684                 unlock_iovec(vec, arg2, arg3, 1);
10685             } else {
10686                 ret = -host_to_target_errno(errno);
10687            }
10688         }
10689         return ret;
10690 #endif
10691 #if defined(TARGET_NR_pwritev)
10692     case TARGET_NR_pwritev:
10693         {
10694             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10695             if (vec != NULL) {
10696                 unsigned long low, high;
10697 
10698                 target_to_host_low_high(arg4, arg5, &low, &high);
10699                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10700                 unlock_iovec(vec, arg2, arg3, 0);
10701             } else {
10702                 ret = -host_to_target_errno(errno);
10703            }
10704         }
10705         return ret;
10706 #endif
10707     case TARGET_NR_getsid:
10708         return get_errno(getsid(arg1));
10709 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10710     case TARGET_NR_fdatasync:
10711         return get_errno(fdatasync(arg1));
10712 #endif
10713     case TARGET_NR_sched_getaffinity:
10714         {
10715             unsigned int mask_size;
10716             unsigned long *mask;
10717 
10718             /*
10719              * sched_getaffinity needs multiples of ulong, so need to take
10720              * care of mismatches between target ulong and host ulong sizes.
10721              */
10722             if (arg2 & (sizeof(abi_ulong) - 1)) {
10723                 return -TARGET_EINVAL;
10724             }
10725             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10726 
10727             mask = alloca(mask_size);
10728             memset(mask, 0, mask_size);
10729             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10730 
10731             if (!is_error(ret)) {
10732                 if (ret > arg2) {
10733                     /* More data returned than the caller's buffer will fit.
10734                      * This only happens if sizeof(abi_long) < sizeof(long)
10735                      * and the caller passed us a buffer holding an odd number
10736                      * of abi_longs. If the host kernel is actually using the
10737                      * extra 4 bytes then fail EINVAL; otherwise we can just
10738                      * ignore them and only copy the interesting part.
10739                      */
10740                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10741                     if (numcpus > arg2 * 8) {
10742                         return -TARGET_EINVAL;
10743                     }
10744                     ret = arg2;
10745                 }
10746 
10747                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10748                     return -TARGET_EFAULT;
10749                 }
10750             }
10751         }
10752         return ret;
10753     case TARGET_NR_sched_setaffinity:
10754         {
10755             unsigned int mask_size;
10756             unsigned long *mask;
10757 
10758             /*
10759              * sched_setaffinity needs multiples of ulong, so need to take
10760              * care of mismatches between target ulong and host ulong sizes.
10761              */
10762             if (arg2 & (sizeof(abi_ulong) - 1)) {
10763                 return -TARGET_EINVAL;
10764             }
10765             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10766             mask = alloca(mask_size);
10767 
10768             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10769             if (ret) {
10770                 return ret;
10771             }
10772 
10773             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10774         }
10775     case TARGET_NR_getcpu:
10776         {
10777             unsigned cpu, node;
10778             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10779                                        arg2 ? &node : NULL,
10780                                        NULL));
10781             if (is_error(ret)) {
10782                 return ret;
10783             }
10784             if (arg1 && put_user_u32(cpu, arg1)) {
10785                 return -TARGET_EFAULT;
10786             }
10787             if (arg2 && put_user_u32(node, arg2)) {
10788                 return -TARGET_EFAULT;
10789             }
10790         }
10791         return ret;
10792     case TARGET_NR_sched_setparam:
10793         {
10794             struct target_sched_param *target_schp;
10795             struct sched_param schp;
10796 
10797             if (arg2 == 0) {
10798                 return -TARGET_EINVAL;
10799             }
10800             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10801                 return -TARGET_EFAULT;
10802             }
10803             schp.sched_priority = tswap32(target_schp->sched_priority);
10804             unlock_user_struct(target_schp, arg2, 0);
10805             return get_errno(sys_sched_setparam(arg1, &schp));
10806         }
10807     case TARGET_NR_sched_getparam:
10808         {
10809             struct target_sched_param *target_schp;
10810             struct sched_param schp;
10811 
10812             if (arg2 == 0) {
10813                 return -TARGET_EINVAL;
10814             }
10815             ret = get_errno(sys_sched_getparam(arg1, &schp));
10816             if (!is_error(ret)) {
10817                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10818                     return -TARGET_EFAULT;
10819                 }
10820                 target_schp->sched_priority = tswap32(schp.sched_priority);
10821                 unlock_user_struct(target_schp, arg2, 1);
10822             }
10823         }
10824         return ret;
10825     case TARGET_NR_sched_setscheduler:
10826         {
10827             struct target_sched_param *target_schp;
10828             struct sched_param schp;
10829             if (arg3 == 0) {
10830                 return -TARGET_EINVAL;
10831             }
10832             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10833                 return -TARGET_EFAULT;
10834             }
10835             schp.sched_priority = tswap32(target_schp->sched_priority);
10836             unlock_user_struct(target_schp, arg3, 0);
10837             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10838         }
10839     case TARGET_NR_sched_getscheduler:
10840         return get_errno(sys_sched_getscheduler(arg1));
10841     case TARGET_NR_sched_getattr:
10842         {
10843             struct target_sched_attr *target_scha;
10844             struct sched_attr scha;
10845             if (arg2 == 0) {
10846                 return -TARGET_EINVAL;
10847             }
10848             if (arg3 > sizeof(scha)) {
10849                 arg3 = sizeof(scha);
10850             }
10851             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10852             if (!is_error(ret)) {
10853                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10854                 if (!target_scha) {
10855                     return -TARGET_EFAULT;
10856                 }
10857                 target_scha->size = tswap32(scha.size);
10858                 target_scha->sched_policy = tswap32(scha.sched_policy);
10859                 target_scha->sched_flags = tswap64(scha.sched_flags);
10860                 target_scha->sched_nice = tswap32(scha.sched_nice);
10861                 target_scha->sched_priority = tswap32(scha.sched_priority);
10862                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10863                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10864                 target_scha->sched_period = tswap64(scha.sched_period);
10865                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10866                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10867                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10868                 }
10869                 unlock_user(target_scha, arg2, arg3);
10870             }
10871             return ret;
10872         }
10873     case TARGET_NR_sched_setattr:
10874         {
10875             struct target_sched_attr *target_scha;
10876             struct sched_attr scha;
10877             uint32_t size;
10878             int zeroed;
10879             if (arg2 == 0) {
10880                 return -TARGET_EINVAL;
10881             }
10882             if (get_user_u32(size, arg2)) {
10883                 return -TARGET_EFAULT;
10884             }
10885             if (!size) {
10886                 size = offsetof(struct target_sched_attr, sched_util_min);
10887             }
10888             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10889                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10890                     return -TARGET_EFAULT;
10891                 }
10892                 return -TARGET_E2BIG;
10893             }
10894 
10895             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10896             if (zeroed < 0) {
10897                 return zeroed;
10898             } else if (zeroed == 0) {
10899                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10900                     return -TARGET_EFAULT;
10901                 }
10902                 return -TARGET_E2BIG;
10903             }
10904             if (size > sizeof(struct target_sched_attr)) {
10905                 size = sizeof(struct target_sched_attr);
10906             }
10907 
10908             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10909             if (!target_scha) {
10910                 return -TARGET_EFAULT;
10911             }
10912             scha.size = size;
10913             scha.sched_policy = tswap32(target_scha->sched_policy);
10914             scha.sched_flags = tswap64(target_scha->sched_flags);
10915             scha.sched_nice = tswap32(target_scha->sched_nice);
10916             scha.sched_priority = tswap32(target_scha->sched_priority);
10917             scha.sched_runtime = tswap64(target_scha->sched_runtime);
10918             scha.sched_deadline = tswap64(target_scha->sched_deadline);
10919             scha.sched_period = tswap64(target_scha->sched_period);
10920             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10921                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10922                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10923             }
10924             unlock_user(target_scha, arg2, 0);
10925             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10926         }
10927     case TARGET_NR_sched_yield:
10928         return get_errno(sched_yield());
10929     case TARGET_NR_sched_get_priority_max:
10930         return get_errno(sched_get_priority_max(arg1));
10931     case TARGET_NR_sched_get_priority_min:
10932         return get_errno(sched_get_priority_min(arg1));
10933 #ifdef TARGET_NR_sched_rr_get_interval
10934     case TARGET_NR_sched_rr_get_interval:
10935         {
10936             struct timespec ts;
10937             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10938             if (!is_error(ret)) {
10939                 ret = host_to_target_timespec(arg2, &ts);
10940             }
10941         }
10942         return ret;
10943 #endif
10944 #ifdef TARGET_NR_sched_rr_get_interval_time64
10945     case TARGET_NR_sched_rr_get_interval_time64:
10946         {
10947             struct timespec ts;
10948             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10949             if (!is_error(ret)) {
10950                 ret = host_to_target_timespec64(arg2, &ts);
10951             }
10952         }
10953         return ret;
10954 #endif
10955 #if defined(TARGET_NR_nanosleep)
10956     case TARGET_NR_nanosleep:
10957         {
10958             struct timespec req, rem;
10959             target_to_host_timespec(&req, arg1);
10960             ret = get_errno(safe_nanosleep(&req, &rem));
10961             if (is_error(ret) && arg2) {
10962                 host_to_target_timespec(arg2, &rem);
10963             }
10964         }
10965         return ret;
10966 #endif
10967     case TARGET_NR_prctl:
10968         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10969         break;
10970 #ifdef TARGET_NR_arch_prctl
10971     case TARGET_NR_arch_prctl:
10972         return do_arch_prctl(cpu_env, arg1, arg2);
10973 #endif
10974 #ifdef TARGET_NR_pread64
10975     case TARGET_NR_pread64:
10976         if (regpairs_aligned(cpu_env, num)) {
10977             arg4 = arg5;
10978             arg5 = arg6;
10979         }
10980         if (arg2 == 0 && arg3 == 0) {
10981             /* Special-case NULL buffer and zero length, which should succeed */
10982             p = 0;
10983         } else {
10984             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10985             if (!p) {
10986                 return -TARGET_EFAULT;
10987             }
10988         }
10989         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10990         unlock_user(p, arg2, ret);
10991         return ret;
10992     case TARGET_NR_pwrite64:
10993         if (regpairs_aligned(cpu_env, num)) {
10994             arg4 = arg5;
10995             arg5 = arg6;
10996         }
10997         if (arg2 == 0 && arg3 == 0) {
10998             /* Special-case NULL buffer and zero length, which should succeed */
10999             p = 0;
11000         } else {
11001             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11002             if (!p) {
11003                 return -TARGET_EFAULT;
11004             }
11005         }
11006         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11007         unlock_user(p, arg2, 0);
11008         return ret;
11009 #endif
11010     case TARGET_NR_getcwd:
11011         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11012             return -TARGET_EFAULT;
11013         ret = get_errno(sys_getcwd1(p, arg2));
11014         unlock_user(p, arg1, ret);
11015         return ret;
11016     case TARGET_NR_capget:
11017     case TARGET_NR_capset:
11018     {
11019         struct target_user_cap_header *target_header;
11020         struct target_user_cap_data *target_data = NULL;
11021         struct __user_cap_header_struct header;
11022         struct __user_cap_data_struct data[2];
11023         struct __user_cap_data_struct *dataptr = NULL;
11024         int i, target_datalen;
11025         int data_items = 1;
11026 
11027         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11028             return -TARGET_EFAULT;
11029         }
11030         header.version = tswap32(target_header->version);
11031         header.pid = tswap32(target_header->pid);
11032 
11033         if (header.version != _LINUX_CAPABILITY_VERSION) {
11034             /* Version 2 and up takes pointer to two user_data structs */
11035             data_items = 2;
11036         }
11037 
11038         target_datalen = sizeof(*target_data) * data_items;
11039 
11040         if (arg2) {
11041             if (num == TARGET_NR_capget) {
11042                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11043             } else {
11044                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11045             }
11046             if (!target_data) {
11047                 unlock_user_struct(target_header, arg1, 0);
11048                 return -TARGET_EFAULT;
11049             }
11050 
11051             if (num == TARGET_NR_capset) {
11052                 for (i = 0; i < data_items; i++) {
11053                     data[i].effective = tswap32(target_data[i].effective);
11054                     data[i].permitted = tswap32(target_data[i].permitted);
11055                     data[i].inheritable = tswap32(target_data[i].inheritable);
11056                 }
11057             }
11058 
11059             dataptr = data;
11060         }
11061 
11062         if (num == TARGET_NR_capget) {
11063             ret = get_errno(capget(&header, dataptr));
11064         } else {
11065             ret = get_errno(capset(&header, dataptr));
11066         }
11067 
11068         /* The kernel always updates version for both capget and capset */
11069         target_header->version = tswap32(header.version);
11070         unlock_user_struct(target_header, arg1, 1);
11071 
11072         if (arg2) {
11073             if (num == TARGET_NR_capget) {
11074                 for (i = 0; i < data_items; i++) {
11075                     target_data[i].effective = tswap32(data[i].effective);
11076                     target_data[i].permitted = tswap32(data[i].permitted);
11077                     target_data[i].inheritable = tswap32(data[i].inheritable);
11078                 }
11079                 unlock_user(target_data, arg2, target_datalen);
11080             } else {
11081                 unlock_user(target_data, arg2, 0);
11082             }
11083         }
11084         return ret;
11085     }
11086     case TARGET_NR_sigaltstack:
11087         return do_sigaltstack(arg1, arg2, cpu_env);
11088 
11089 #ifdef CONFIG_SENDFILE
11090 #ifdef TARGET_NR_sendfile
11091     case TARGET_NR_sendfile:
11092     {
11093         off_t *offp = NULL;
11094         off_t off;
11095         if (arg3) {
11096             ret = get_user_sal(off, arg3);
11097             if (is_error(ret)) {
11098                 return ret;
11099             }
11100             offp = &off;
11101         }
11102         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11103         if (!is_error(ret) && arg3) {
11104             abi_long ret2 = put_user_sal(off, arg3);
11105             if (is_error(ret2)) {
11106                 ret = ret2;
11107             }
11108         }
11109         return ret;
11110     }
11111 #endif
11112 #ifdef TARGET_NR_sendfile64
11113     case TARGET_NR_sendfile64:
11114     {
11115         off_t *offp = NULL;
11116         off_t off;
11117         if (arg3) {
11118             ret = get_user_s64(off, arg3);
11119             if (is_error(ret)) {
11120                 return ret;
11121             }
11122             offp = &off;
11123         }
11124         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11125         if (!is_error(ret) && arg3) {
11126             abi_long ret2 = put_user_s64(off, arg3);
11127             if (is_error(ret2)) {
11128                 ret = ret2;
11129             }
11130         }
11131         return ret;
11132     }
11133 #endif
11134 #endif
11135 #ifdef TARGET_NR_vfork
11136     case TARGET_NR_vfork:
11137         return get_errno(do_fork(cpu_env,
11138                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11139                          0, 0, 0, 0));
11140 #endif
11141 #ifdef TARGET_NR_ugetrlimit
11142     case TARGET_NR_ugetrlimit:
11143     {
11144 	struct rlimit rlim;
11145 	int resource = target_to_host_resource(arg1);
11146 	ret = get_errno(getrlimit(resource, &rlim));
11147 	if (!is_error(ret)) {
11148 	    struct target_rlimit *target_rlim;
11149             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11150                 return -TARGET_EFAULT;
11151 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11152 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11153             unlock_user_struct(target_rlim, arg2, 1);
11154 	}
11155         return ret;
11156     }
11157 #endif
11158 #ifdef TARGET_NR_truncate64
11159     case TARGET_NR_truncate64:
11160         if (!(p = lock_user_string(arg1)))
11161             return -TARGET_EFAULT;
11162 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11163         unlock_user(p, arg1, 0);
11164         return ret;
11165 #endif
11166 #ifdef TARGET_NR_ftruncate64
11167     case TARGET_NR_ftruncate64:
11168         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11169 #endif
11170 #ifdef TARGET_NR_stat64
11171     case TARGET_NR_stat64:
11172         if (!(p = lock_user_string(arg1))) {
11173             return -TARGET_EFAULT;
11174         }
11175         ret = get_errno(stat(path(p), &st));
11176         unlock_user(p, arg1, 0);
11177         if (!is_error(ret))
11178             ret = host_to_target_stat64(cpu_env, arg2, &st);
11179         return ret;
11180 #endif
11181 #ifdef TARGET_NR_lstat64
11182     case TARGET_NR_lstat64:
11183         if (!(p = lock_user_string(arg1))) {
11184             return -TARGET_EFAULT;
11185         }
11186         ret = get_errno(lstat(path(p), &st));
11187         unlock_user(p, arg1, 0);
11188         if (!is_error(ret))
11189             ret = host_to_target_stat64(cpu_env, arg2, &st);
11190         return ret;
11191 #endif
11192 #ifdef TARGET_NR_fstat64
11193     case TARGET_NR_fstat64:
11194         ret = get_errno(fstat(arg1, &st));
11195         if (!is_error(ret))
11196             ret = host_to_target_stat64(cpu_env, arg2, &st);
11197         return ret;
11198 #endif
11199 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11200 #ifdef TARGET_NR_fstatat64
11201     case TARGET_NR_fstatat64:
11202 #endif
11203 #ifdef TARGET_NR_newfstatat
11204     case TARGET_NR_newfstatat:
11205 #endif
11206         if (!(p = lock_user_string(arg2))) {
11207             return -TARGET_EFAULT;
11208         }
11209         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11210         unlock_user(p, arg2, 0);
11211         if (!is_error(ret))
11212             ret = host_to_target_stat64(cpu_env, arg3, &st);
11213         return ret;
11214 #endif
11215 #if defined(TARGET_NR_statx)
11216     case TARGET_NR_statx:
11217         {
11218             struct target_statx *target_stx;
11219             int dirfd = arg1;
11220             int flags = arg3;
11221 
11222             p = lock_user_string(arg2);
11223             if (p == NULL) {
11224                 return -TARGET_EFAULT;
11225             }
11226 #if defined(__NR_statx)
11227             {
11228                 /*
11229                  * It is assumed that struct statx is architecture independent.
11230                  */
11231                 struct target_statx host_stx;
11232                 int mask = arg4;
11233 
11234                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11235                 if (!is_error(ret)) {
11236                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11237                         unlock_user(p, arg2, 0);
11238                         return -TARGET_EFAULT;
11239                     }
11240                 }
11241 
11242                 if (ret != -TARGET_ENOSYS) {
11243                     unlock_user(p, arg2, 0);
11244                     return ret;
11245                 }
11246             }
11247 #endif
11248             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11249             unlock_user(p, arg2, 0);
11250 
11251             if (!is_error(ret)) {
11252                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11253                     return -TARGET_EFAULT;
11254                 }
11255                 memset(target_stx, 0, sizeof(*target_stx));
11256                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11257                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11258                 __put_user(st.st_ino, &target_stx->stx_ino);
11259                 __put_user(st.st_mode, &target_stx->stx_mode);
11260                 __put_user(st.st_uid, &target_stx->stx_uid);
11261                 __put_user(st.st_gid, &target_stx->stx_gid);
11262                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11263                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11264                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11265                 __put_user(st.st_size, &target_stx->stx_size);
11266                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11267                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11268                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11269                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11270                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11271                 unlock_user_struct(target_stx, arg5, 1);
11272             }
11273         }
11274         return ret;
11275 #endif
11276 #ifdef TARGET_NR_lchown
11277     case TARGET_NR_lchown:
11278         if (!(p = lock_user_string(arg1)))
11279             return -TARGET_EFAULT;
11280         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11281         unlock_user(p, arg1, 0);
11282         return ret;
11283 #endif
11284 #ifdef TARGET_NR_getuid
11285     case TARGET_NR_getuid:
11286         return get_errno(high2lowuid(getuid()));
11287 #endif
11288 #ifdef TARGET_NR_getgid
11289     case TARGET_NR_getgid:
11290         return get_errno(high2lowgid(getgid()));
11291 #endif
11292 #ifdef TARGET_NR_geteuid
11293     case TARGET_NR_geteuid:
11294         return get_errno(high2lowuid(geteuid()));
11295 #endif
11296 #ifdef TARGET_NR_getegid
11297     case TARGET_NR_getegid:
11298         return get_errno(high2lowgid(getegid()));
11299 #endif
11300     case TARGET_NR_setreuid:
11301         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11302     case TARGET_NR_setregid:
11303         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11304     case TARGET_NR_getgroups:
11305         {
11306             int gidsetsize = arg1;
11307             target_id *target_grouplist;
11308             gid_t *grouplist;
11309             int i;
11310 
11311             grouplist = alloca(gidsetsize * sizeof(gid_t));
11312             ret = get_errno(getgroups(gidsetsize, grouplist));
11313             if (gidsetsize == 0)
11314                 return ret;
11315             if (!is_error(ret)) {
11316                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11317                 if (!target_grouplist)
11318                     return -TARGET_EFAULT;
11319                 for(i = 0;i < ret; i++)
11320                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11321                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11322             }
11323         }
11324         return ret;
11325     case TARGET_NR_setgroups:
11326         {
11327             int gidsetsize = arg1;
11328             target_id *target_grouplist;
11329             gid_t *grouplist = NULL;
11330             int i;
11331             if (gidsetsize) {
11332                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11333                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11334                 if (!target_grouplist) {
11335                     return -TARGET_EFAULT;
11336                 }
11337                 for (i = 0; i < gidsetsize; i++) {
11338                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11339                 }
11340                 unlock_user(target_grouplist, arg2, 0);
11341             }
11342             return get_errno(setgroups(gidsetsize, grouplist));
11343         }
11344     case TARGET_NR_fchown:
11345         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11346 #if defined(TARGET_NR_fchownat)
11347     case TARGET_NR_fchownat:
11348         if (!(p = lock_user_string(arg2)))
11349             return -TARGET_EFAULT;
11350         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11351                                  low2highgid(arg4), arg5));
11352         unlock_user(p, arg2, 0);
11353         return ret;
11354 #endif
11355 #ifdef TARGET_NR_setresuid
11356     case TARGET_NR_setresuid:
11357         return get_errno(sys_setresuid(low2highuid(arg1),
11358                                        low2highuid(arg2),
11359                                        low2highuid(arg3)));
11360 #endif
11361 #ifdef TARGET_NR_getresuid
11362     case TARGET_NR_getresuid:
11363         {
11364             uid_t ruid, euid, suid;
11365             ret = get_errno(getresuid(&ruid, &euid, &suid));
11366             if (!is_error(ret)) {
11367                 if (put_user_id(high2lowuid(ruid), arg1)
11368                     || put_user_id(high2lowuid(euid), arg2)
11369                     || put_user_id(high2lowuid(suid), arg3))
11370                     return -TARGET_EFAULT;
11371             }
11372         }
11373         return ret;
11374 #endif
11375 #ifdef TARGET_NR_getresgid
11376     case TARGET_NR_setresgid:
11377         return get_errno(sys_setresgid(low2highgid(arg1),
11378                                        low2highgid(arg2),
11379                                        low2highgid(arg3)));
11380 #endif
11381 #ifdef TARGET_NR_getresgid
11382     case TARGET_NR_getresgid:
11383         {
11384             gid_t rgid, egid, sgid;
11385             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11386             if (!is_error(ret)) {
11387                 if (put_user_id(high2lowgid(rgid), arg1)
11388                     || put_user_id(high2lowgid(egid), arg2)
11389                     || put_user_id(high2lowgid(sgid), arg3))
11390                     return -TARGET_EFAULT;
11391             }
11392         }
11393         return ret;
11394 #endif
11395 #ifdef TARGET_NR_chown
11396     case TARGET_NR_chown:
11397         if (!(p = lock_user_string(arg1)))
11398             return -TARGET_EFAULT;
11399         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11400         unlock_user(p, arg1, 0);
11401         return ret;
11402 #endif
11403     case TARGET_NR_setuid:
11404         return get_errno(sys_setuid(low2highuid(arg1)));
11405     case TARGET_NR_setgid:
11406         return get_errno(sys_setgid(low2highgid(arg1)));
11407     case TARGET_NR_setfsuid:
11408         return get_errno(setfsuid(arg1));
11409     case TARGET_NR_setfsgid:
11410         return get_errno(setfsgid(arg1));
11411 
11412 #ifdef TARGET_NR_lchown32
11413     case TARGET_NR_lchown32:
11414         if (!(p = lock_user_string(arg1)))
11415             return -TARGET_EFAULT;
11416         ret = get_errno(lchown(p, arg2, arg3));
11417         unlock_user(p, arg1, 0);
11418         return ret;
11419 #endif
11420 #ifdef TARGET_NR_getuid32
11421     case TARGET_NR_getuid32:
11422         return get_errno(getuid());
11423 #endif
11424 
11425 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11426    /* Alpha specific */
11427     case TARGET_NR_getxuid:
11428          {
11429             uid_t euid;
11430             euid=geteuid();
11431             cpu_env->ir[IR_A4]=euid;
11432          }
11433         return get_errno(getuid());
11434 #endif
11435 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11436    /* Alpha specific */
11437     case TARGET_NR_getxgid:
11438          {
11439             uid_t egid;
11440             egid=getegid();
11441             cpu_env->ir[IR_A4]=egid;
11442          }
11443         return get_errno(getgid());
11444 #endif
11445 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11446     /* Alpha specific */
11447     case TARGET_NR_osf_getsysinfo:
11448         ret = -TARGET_EOPNOTSUPP;
11449         switch (arg1) {
11450           case TARGET_GSI_IEEE_FP_CONTROL:
11451             {
11452                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11453                 uint64_t swcr = cpu_env->swcr;
11454 
11455                 swcr &= ~SWCR_STATUS_MASK;
11456                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11457 
11458                 if (put_user_u64 (swcr, arg2))
11459                         return -TARGET_EFAULT;
11460                 ret = 0;
11461             }
11462             break;
11463 
11464           /* case GSI_IEEE_STATE_AT_SIGNAL:
11465              -- Not implemented in linux kernel.
11466              case GSI_UACPROC:
11467              -- Retrieves current unaligned access state; not much used.
11468              case GSI_PROC_TYPE:
11469              -- Retrieves implver information; surely not used.
11470              case GSI_GET_HWRPB:
11471              -- Grabs a copy of the HWRPB; surely not used.
11472           */
11473         }
11474         return ret;
11475 #endif
11476 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11477     /* Alpha specific */
11478     case TARGET_NR_osf_setsysinfo:
11479         ret = -TARGET_EOPNOTSUPP;
11480         switch (arg1) {
11481           case TARGET_SSI_IEEE_FP_CONTROL:
11482             {
11483                 uint64_t swcr, fpcr;
11484 
11485                 if (get_user_u64 (swcr, arg2)) {
11486                     return -TARGET_EFAULT;
11487                 }
11488 
11489                 /*
11490                  * The kernel calls swcr_update_status to update the
11491                  * status bits from the fpcr at every point that it
11492                  * could be queried.  Therefore, we store the status
11493                  * bits only in FPCR.
11494                  */
11495                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11496 
11497                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11498                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11499                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11500                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11501                 ret = 0;
11502             }
11503             break;
11504 
11505           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11506             {
11507                 uint64_t exc, fpcr, fex;
11508 
11509                 if (get_user_u64(exc, arg2)) {
11510                     return -TARGET_EFAULT;
11511                 }
11512                 exc &= SWCR_STATUS_MASK;
11513                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11514 
11515                 /* Old exceptions are not signaled.  */
11516                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11517                 fex = exc & ~fex;
11518                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11519                 fex &= (cpu_env)->swcr;
11520 
11521                 /* Update the hardware fpcr.  */
11522                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11523                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11524 
11525                 if (fex) {
11526                     int si_code = TARGET_FPE_FLTUNK;
11527                     target_siginfo_t info;
11528 
11529                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11530                         si_code = TARGET_FPE_FLTUND;
11531                     }
11532                     if (fex & SWCR_TRAP_ENABLE_INE) {
11533                         si_code = TARGET_FPE_FLTRES;
11534                     }
11535                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11536                         si_code = TARGET_FPE_FLTUND;
11537                     }
11538                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11539                         si_code = TARGET_FPE_FLTOVF;
11540                     }
11541                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11542                         si_code = TARGET_FPE_FLTDIV;
11543                     }
11544                     if (fex & SWCR_TRAP_ENABLE_INV) {
11545                         si_code = TARGET_FPE_FLTINV;
11546                     }
11547 
11548                     info.si_signo = SIGFPE;
11549                     info.si_errno = 0;
11550                     info.si_code = si_code;
11551                     info._sifields._sigfault._addr = (cpu_env)->pc;
11552                     queue_signal(cpu_env, info.si_signo,
11553                                  QEMU_SI_FAULT, &info);
11554                 }
11555                 ret = 0;
11556             }
11557             break;
11558 
11559           /* case SSI_NVPAIRS:
11560              -- Used with SSIN_UACPROC to enable unaligned accesses.
11561              case SSI_IEEE_STATE_AT_SIGNAL:
11562              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11563              -- Not implemented in linux kernel
11564           */
11565         }
11566         return ret;
11567 #endif
11568 #ifdef TARGET_NR_osf_sigprocmask
11569     /* Alpha specific.  */
11570     case TARGET_NR_osf_sigprocmask:
11571         {
11572             abi_ulong mask;
11573             int how;
11574             sigset_t set, oldset;
11575 
11576             switch(arg1) {
11577             case TARGET_SIG_BLOCK:
11578                 how = SIG_BLOCK;
11579                 break;
11580             case TARGET_SIG_UNBLOCK:
11581                 how = SIG_UNBLOCK;
11582                 break;
11583             case TARGET_SIG_SETMASK:
11584                 how = SIG_SETMASK;
11585                 break;
11586             default:
11587                 return -TARGET_EINVAL;
11588             }
11589             mask = arg2;
11590             target_to_host_old_sigset(&set, &mask);
11591             ret = do_sigprocmask(how, &set, &oldset);
11592             if (!ret) {
11593                 host_to_target_old_sigset(&mask, &oldset);
11594                 ret = mask;
11595             }
11596         }
11597         return ret;
11598 #endif
11599 
11600 #ifdef TARGET_NR_getgid32
11601     case TARGET_NR_getgid32:
11602         return get_errno(getgid());
11603 #endif
11604 #ifdef TARGET_NR_geteuid32
11605     case TARGET_NR_geteuid32:
11606         return get_errno(geteuid());
11607 #endif
11608 #ifdef TARGET_NR_getegid32
11609     case TARGET_NR_getegid32:
11610         return get_errno(getegid());
11611 #endif
11612 #ifdef TARGET_NR_setreuid32
11613     case TARGET_NR_setreuid32:
11614         return get_errno(setreuid(arg1, arg2));
11615 #endif
11616 #ifdef TARGET_NR_setregid32
11617     case TARGET_NR_setregid32:
11618         return get_errno(setregid(arg1, arg2));
11619 #endif
11620 #ifdef TARGET_NR_getgroups32
11621     case TARGET_NR_getgroups32:
11622         {
11623             int gidsetsize = arg1;
11624             uint32_t *target_grouplist;
11625             gid_t *grouplist;
11626             int i;
11627 
11628             grouplist = alloca(gidsetsize * sizeof(gid_t));
11629             ret = get_errno(getgroups(gidsetsize, grouplist));
11630             if (gidsetsize == 0)
11631                 return ret;
11632             if (!is_error(ret)) {
11633                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11634                 if (!target_grouplist) {
11635                     return -TARGET_EFAULT;
11636                 }
11637                 for(i = 0;i < ret; i++)
11638                     target_grouplist[i] = tswap32(grouplist[i]);
11639                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11640             }
11641         }
11642         return ret;
11643 #endif
11644 #ifdef TARGET_NR_setgroups32
11645     case TARGET_NR_setgroups32:
11646         {
11647             int gidsetsize = arg1;
11648             uint32_t *target_grouplist;
11649             gid_t *grouplist;
11650             int i;
11651 
11652             grouplist = alloca(gidsetsize * sizeof(gid_t));
11653             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11654             if (!target_grouplist) {
11655                 return -TARGET_EFAULT;
11656             }
11657             for(i = 0;i < gidsetsize; i++)
11658                 grouplist[i] = tswap32(target_grouplist[i]);
11659             unlock_user(target_grouplist, arg2, 0);
11660             return get_errno(setgroups(gidsetsize, grouplist));
11661         }
11662 #endif
11663 #ifdef TARGET_NR_fchown32
11664     case TARGET_NR_fchown32:
11665         return get_errno(fchown(arg1, arg2, arg3));
11666 #endif
11667 #ifdef TARGET_NR_setresuid32
11668     case TARGET_NR_setresuid32:
11669         return get_errno(sys_setresuid(arg1, arg2, arg3));
11670 #endif
11671 #ifdef TARGET_NR_getresuid32
11672     case TARGET_NR_getresuid32:
11673         {
11674             uid_t ruid, euid, suid;
11675             ret = get_errno(getresuid(&ruid, &euid, &suid));
11676             if (!is_error(ret)) {
11677                 if (put_user_u32(ruid, arg1)
11678                     || put_user_u32(euid, arg2)
11679                     || put_user_u32(suid, arg3))
11680                     return -TARGET_EFAULT;
11681             }
11682         }
11683         return ret;
11684 #endif
11685 #ifdef TARGET_NR_setresgid32
11686     case TARGET_NR_setresgid32:
11687         return get_errno(sys_setresgid(arg1, arg2, arg3));
11688 #endif
11689 #ifdef TARGET_NR_getresgid32
11690     case TARGET_NR_getresgid32:
11691         {
11692             gid_t rgid, egid, sgid;
11693             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11694             if (!is_error(ret)) {
11695                 if (put_user_u32(rgid, arg1)
11696                     || put_user_u32(egid, arg2)
11697                     || put_user_u32(sgid, arg3))
11698                     return -TARGET_EFAULT;
11699             }
11700         }
11701         return ret;
11702 #endif
11703 #ifdef TARGET_NR_chown32
11704     case TARGET_NR_chown32:
11705         if (!(p = lock_user_string(arg1)))
11706             return -TARGET_EFAULT;
11707         ret = get_errno(chown(p, arg2, arg3));
11708         unlock_user(p, arg1, 0);
11709         return ret;
11710 #endif
11711 #ifdef TARGET_NR_setuid32
11712     case TARGET_NR_setuid32:
11713         return get_errno(sys_setuid(arg1));
11714 #endif
11715 #ifdef TARGET_NR_setgid32
11716     case TARGET_NR_setgid32:
11717         return get_errno(sys_setgid(arg1));
11718 #endif
11719 #ifdef TARGET_NR_setfsuid32
11720     case TARGET_NR_setfsuid32:
11721         return get_errno(setfsuid(arg1));
11722 #endif
11723 #ifdef TARGET_NR_setfsgid32
11724     case TARGET_NR_setfsgid32:
11725         return get_errno(setfsgid(arg1));
11726 #endif
11727 #ifdef TARGET_NR_mincore
11728     case TARGET_NR_mincore:
11729         {
11730             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11731             if (!a) {
11732                 return -TARGET_ENOMEM;
11733             }
11734             p = lock_user_string(arg3);
11735             if (!p) {
11736                 ret = -TARGET_EFAULT;
11737             } else {
11738                 ret = get_errno(mincore(a, arg2, p));
11739                 unlock_user(p, arg3, ret);
11740             }
11741             unlock_user(a, arg1, 0);
11742         }
11743         return ret;
11744 #endif
11745 #ifdef TARGET_NR_arm_fadvise64_64
11746     case TARGET_NR_arm_fadvise64_64:
11747         /* arm_fadvise64_64 looks like fadvise64_64 but
11748          * with different argument order: fd, advice, offset, len
11749          * rather than the usual fd, offset, len, advice.
11750          * Note that offset and len are both 64-bit so appear as
11751          * pairs of 32-bit registers.
11752          */
11753         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11754                             target_offset64(arg5, arg6), arg2);
11755         return -host_to_target_errno(ret);
11756 #endif
11757 
11758 #if TARGET_ABI_BITS == 32
11759 
11760 #ifdef TARGET_NR_fadvise64_64
11761     case TARGET_NR_fadvise64_64:
11762 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11763         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11764         ret = arg2;
11765         arg2 = arg3;
11766         arg3 = arg4;
11767         arg4 = arg5;
11768         arg5 = arg6;
11769         arg6 = ret;
11770 #else
11771         /* 6 args: fd, offset (high, low), len (high, low), advice */
11772         if (regpairs_aligned(cpu_env, num)) {
11773             /* offset is in (3,4), len in (5,6) and advice in 7 */
11774             arg2 = arg3;
11775             arg3 = arg4;
11776             arg4 = arg5;
11777             arg5 = arg6;
11778             arg6 = arg7;
11779         }
11780 #endif
11781         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11782                             target_offset64(arg4, arg5), arg6);
11783         return -host_to_target_errno(ret);
11784 #endif
11785 
11786 #ifdef TARGET_NR_fadvise64
11787     case TARGET_NR_fadvise64:
11788         /* 5 args: fd, offset (high, low), len, advice */
11789         if (regpairs_aligned(cpu_env, num)) {
11790             /* offset is in (3,4), len in 5 and advice in 6 */
11791             arg2 = arg3;
11792             arg3 = arg4;
11793             arg4 = arg5;
11794             arg5 = arg6;
11795         }
11796         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11797         return -host_to_target_errno(ret);
11798 #endif
11799 
11800 #else /* not a 32-bit ABI */
11801 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11802 #ifdef TARGET_NR_fadvise64_64
11803     case TARGET_NR_fadvise64_64:
11804 #endif
11805 #ifdef TARGET_NR_fadvise64
11806     case TARGET_NR_fadvise64:
11807 #endif
11808 #ifdef TARGET_S390X
11809         switch (arg4) {
11810         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11811         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11812         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11813         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11814         default: break;
11815         }
11816 #endif
11817         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11818 #endif
11819 #endif /* end of 64-bit ABI fadvise handling */
11820 
11821 #ifdef TARGET_NR_madvise
11822     case TARGET_NR_madvise:
11823         return target_madvise(arg1, arg2, arg3);
11824 #endif
11825 #ifdef TARGET_NR_fcntl64
11826     case TARGET_NR_fcntl64:
11827     {
11828         int cmd;
11829         struct flock64 fl;
11830         from_flock64_fn *copyfrom = copy_from_user_flock64;
11831         to_flock64_fn *copyto = copy_to_user_flock64;
11832 
11833 #ifdef TARGET_ARM
11834         if (!cpu_env->eabi) {
11835             copyfrom = copy_from_user_oabi_flock64;
11836             copyto = copy_to_user_oabi_flock64;
11837         }
11838 #endif
11839 
11840         cmd = target_to_host_fcntl_cmd(arg2);
11841         if (cmd == -TARGET_EINVAL) {
11842             return cmd;
11843         }
11844 
11845         switch(arg2) {
11846         case TARGET_F_GETLK64:
11847             ret = copyfrom(&fl, arg3);
11848             if (ret) {
11849                 break;
11850             }
11851             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11852             if (ret == 0) {
11853                 ret = copyto(arg3, &fl);
11854             }
11855 	    break;
11856 
11857         case TARGET_F_SETLK64:
11858         case TARGET_F_SETLKW64:
11859             ret = copyfrom(&fl, arg3);
11860             if (ret) {
11861                 break;
11862             }
11863             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11864 	    break;
11865         default:
11866             ret = do_fcntl(arg1, arg2, arg3);
11867             break;
11868         }
11869         return ret;
11870     }
11871 #endif
11872 #ifdef TARGET_NR_cacheflush
11873     case TARGET_NR_cacheflush:
11874         /* self-modifying code is handled automatically, so nothing needed */
11875         return 0;
11876 #endif
11877 #ifdef TARGET_NR_getpagesize
11878     case TARGET_NR_getpagesize:
11879         return TARGET_PAGE_SIZE;
11880 #endif
11881     case TARGET_NR_gettid:
11882         return get_errno(sys_gettid());
11883 #ifdef TARGET_NR_readahead
11884     case TARGET_NR_readahead:
11885 #if TARGET_ABI_BITS == 32
11886         if (regpairs_aligned(cpu_env, num)) {
11887             arg2 = arg3;
11888             arg3 = arg4;
11889             arg4 = arg5;
11890         }
11891         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11892 #else
11893         ret = get_errno(readahead(arg1, arg2, arg3));
11894 #endif
11895         return ret;
11896 #endif
11897 #ifdef CONFIG_ATTR
11898 #ifdef TARGET_NR_setxattr
11899     case TARGET_NR_listxattr:
11900     case TARGET_NR_llistxattr:
11901     {
11902         void *p, *b = 0;
11903         if (arg2) {
11904             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11905             if (!b) {
11906                 return -TARGET_EFAULT;
11907             }
11908         }
11909         p = lock_user_string(arg1);
11910         if (p) {
11911             if (num == TARGET_NR_listxattr) {
11912                 ret = get_errno(listxattr(p, b, arg3));
11913             } else {
11914                 ret = get_errno(llistxattr(p, b, arg3));
11915             }
11916         } else {
11917             ret = -TARGET_EFAULT;
11918         }
11919         unlock_user(p, arg1, 0);
11920         unlock_user(b, arg2, arg3);
11921         return ret;
11922     }
11923     case TARGET_NR_flistxattr:
11924     {
11925         void *b = 0;
11926         if (arg2) {
11927             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11928             if (!b) {
11929                 return -TARGET_EFAULT;
11930             }
11931         }
11932         ret = get_errno(flistxattr(arg1, b, arg3));
11933         unlock_user(b, arg2, arg3);
11934         return ret;
11935     }
11936     case TARGET_NR_setxattr:
11937     case TARGET_NR_lsetxattr:
11938         {
11939             void *p, *n, *v = 0;
11940             if (arg3) {
11941                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11942                 if (!v) {
11943                     return -TARGET_EFAULT;
11944                 }
11945             }
11946             p = lock_user_string(arg1);
11947             n = lock_user_string(arg2);
11948             if (p && n) {
11949                 if (num == TARGET_NR_setxattr) {
11950                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11951                 } else {
11952                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11953                 }
11954             } else {
11955                 ret = -TARGET_EFAULT;
11956             }
11957             unlock_user(p, arg1, 0);
11958             unlock_user(n, arg2, 0);
11959             unlock_user(v, arg3, 0);
11960         }
11961         return ret;
11962     case TARGET_NR_fsetxattr:
11963         {
11964             void *n, *v = 0;
11965             if (arg3) {
11966                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11967                 if (!v) {
11968                     return -TARGET_EFAULT;
11969                 }
11970             }
11971             n = lock_user_string(arg2);
11972             if (n) {
11973                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11974             } else {
11975                 ret = -TARGET_EFAULT;
11976             }
11977             unlock_user(n, arg2, 0);
11978             unlock_user(v, arg3, 0);
11979         }
11980         return ret;
11981     case TARGET_NR_getxattr:
11982     case TARGET_NR_lgetxattr:
11983         {
11984             void *p, *n, *v = 0;
11985             if (arg3) {
11986                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11987                 if (!v) {
11988                     return -TARGET_EFAULT;
11989                 }
11990             }
11991             p = lock_user_string(arg1);
11992             n = lock_user_string(arg2);
11993             if (p && n) {
11994                 if (num == TARGET_NR_getxattr) {
11995                     ret = get_errno(getxattr(p, n, v, arg4));
11996                 } else {
11997                     ret = get_errno(lgetxattr(p, n, v, arg4));
11998                 }
11999             } else {
12000                 ret = -TARGET_EFAULT;
12001             }
12002             unlock_user(p, arg1, 0);
12003             unlock_user(n, arg2, 0);
12004             unlock_user(v, arg3, arg4);
12005         }
12006         return ret;
12007     case TARGET_NR_fgetxattr:
12008         {
12009             void *n, *v = 0;
12010             if (arg3) {
12011                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12012                 if (!v) {
12013                     return -TARGET_EFAULT;
12014                 }
12015             }
12016             n = lock_user_string(arg2);
12017             if (n) {
12018                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12019             } else {
12020                 ret = -TARGET_EFAULT;
12021             }
12022             unlock_user(n, arg2, 0);
12023             unlock_user(v, arg3, arg4);
12024         }
12025         return ret;
12026     case TARGET_NR_removexattr:
12027     case TARGET_NR_lremovexattr:
12028         {
12029             void *p, *n;
12030             p = lock_user_string(arg1);
12031             n = lock_user_string(arg2);
12032             if (p && n) {
12033                 if (num == TARGET_NR_removexattr) {
12034                     ret = get_errno(removexattr(p, n));
12035                 } else {
12036                     ret = get_errno(lremovexattr(p, n));
12037                 }
12038             } else {
12039                 ret = -TARGET_EFAULT;
12040             }
12041             unlock_user(p, arg1, 0);
12042             unlock_user(n, arg2, 0);
12043         }
12044         return ret;
12045     case TARGET_NR_fremovexattr:
12046         {
12047             void *n;
12048             n = lock_user_string(arg2);
12049             if (n) {
12050                 ret = get_errno(fremovexattr(arg1, n));
12051             } else {
12052                 ret = -TARGET_EFAULT;
12053             }
12054             unlock_user(n, arg2, 0);
12055         }
12056         return ret;
12057 #endif
12058 #endif /* CONFIG_ATTR */
12059 #ifdef TARGET_NR_set_thread_area
12060     case TARGET_NR_set_thread_area:
12061 #if defined(TARGET_MIPS)
12062       cpu_env->active_tc.CP0_UserLocal = arg1;
12063       return 0;
12064 #elif defined(TARGET_CRIS)
12065       if (arg1 & 0xff)
12066           ret = -TARGET_EINVAL;
12067       else {
12068           cpu_env->pregs[PR_PID] = arg1;
12069           ret = 0;
12070       }
12071       return ret;
12072 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12073       return do_set_thread_area(cpu_env, arg1);
12074 #elif defined(TARGET_M68K)
12075       {
12076           TaskState *ts = cpu->opaque;
12077           ts->tp_value = arg1;
12078           return 0;
12079       }
12080 #else
12081       return -TARGET_ENOSYS;
12082 #endif
12083 #endif
12084 #ifdef TARGET_NR_get_thread_area
12085     case TARGET_NR_get_thread_area:
12086 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12087         return do_get_thread_area(cpu_env, arg1);
12088 #elif defined(TARGET_M68K)
12089         {
12090             TaskState *ts = cpu->opaque;
12091             return ts->tp_value;
12092         }
12093 #else
12094         return -TARGET_ENOSYS;
12095 #endif
12096 #endif
12097 #ifdef TARGET_NR_getdomainname
12098     case TARGET_NR_getdomainname:
12099         return -TARGET_ENOSYS;
12100 #endif
12101 
12102 #ifdef TARGET_NR_clock_settime
12103     case TARGET_NR_clock_settime:
12104     {
12105         struct timespec ts;
12106 
12107         ret = target_to_host_timespec(&ts, arg2);
12108         if (!is_error(ret)) {
12109             ret = get_errno(clock_settime(arg1, &ts));
12110         }
12111         return ret;
12112     }
12113 #endif
12114 #ifdef TARGET_NR_clock_settime64
12115     case TARGET_NR_clock_settime64:
12116     {
12117         struct timespec ts;
12118 
12119         ret = target_to_host_timespec64(&ts, arg2);
12120         if (!is_error(ret)) {
12121             ret = get_errno(clock_settime(arg1, &ts));
12122         }
12123         return ret;
12124     }
12125 #endif
12126 #ifdef TARGET_NR_clock_gettime
12127     case TARGET_NR_clock_gettime:
12128     {
12129         struct timespec ts;
12130         ret = get_errno(clock_gettime(arg1, &ts));
12131         if (!is_error(ret)) {
12132             ret = host_to_target_timespec(arg2, &ts);
12133         }
12134         return ret;
12135     }
12136 #endif
12137 #ifdef TARGET_NR_clock_gettime64
12138     case TARGET_NR_clock_gettime64:
12139     {
12140         struct timespec ts;
12141         ret = get_errno(clock_gettime(arg1, &ts));
12142         if (!is_error(ret)) {
12143             ret = host_to_target_timespec64(arg2, &ts);
12144         }
12145         return ret;
12146     }
12147 #endif
12148 #ifdef TARGET_NR_clock_getres
12149     case TARGET_NR_clock_getres:
12150     {
12151         struct timespec ts;
12152         ret = get_errno(clock_getres(arg1, &ts));
12153         if (!is_error(ret)) {
12154             host_to_target_timespec(arg2, &ts);
12155         }
12156         return ret;
12157     }
12158 #endif
12159 #ifdef TARGET_NR_clock_getres_time64
12160     case TARGET_NR_clock_getres_time64:
12161     {
12162         struct timespec ts;
12163         ret = get_errno(clock_getres(arg1, &ts));
12164         if (!is_error(ret)) {
12165             host_to_target_timespec64(arg2, &ts);
12166         }
12167         return ret;
12168     }
12169 #endif
12170 #ifdef TARGET_NR_clock_nanosleep
12171     case TARGET_NR_clock_nanosleep:
12172     {
12173         struct timespec ts;
12174         if (target_to_host_timespec(&ts, arg3)) {
12175             return -TARGET_EFAULT;
12176         }
12177         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12178                                              &ts, arg4 ? &ts : NULL));
12179         /*
12180          * if the call is interrupted by a signal handler, it fails
12181          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12182          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12183          */
12184         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12185             host_to_target_timespec(arg4, &ts)) {
12186               return -TARGET_EFAULT;
12187         }
12188 
12189         return ret;
12190     }
12191 #endif
12192 #ifdef TARGET_NR_clock_nanosleep_time64
12193     case TARGET_NR_clock_nanosleep_time64:
12194     {
12195         struct timespec ts;
12196 
12197         if (target_to_host_timespec64(&ts, arg3)) {
12198             return -TARGET_EFAULT;
12199         }
12200 
12201         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12202                                              &ts, arg4 ? &ts : NULL));
12203 
12204         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12205             host_to_target_timespec64(arg4, &ts)) {
12206             return -TARGET_EFAULT;
12207         }
12208         return ret;
12209     }
12210 #endif
12211 
12212 #if defined(TARGET_NR_set_tid_address)
12213     case TARGET_NR_set_tid_address:
12214     {
12215         TaskState *ts = cpu->opaque;
12216         ts->child_tidptr = arg1;
12217         /* do not call host set_tid_address() syscall, instead return tid() */
12218         return get_errno(sys_gettid());
12219     }
12220 #endif
12221 
12222     case TARGET_NR_tkill:
12223         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12224 
12225     case TARGET_NR_tgkill:
12226         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12227                          target_to_host_signal(arg3)));
12228 
12229 #ifdef TARGET_NR_set_robust_list
12230     case TARGET_NR_set_robust_list:
12231     case TARGET_NR_get_robust_list:
12232         /* The ABI for supporting robust futexes has userspace pass
12233          * the kernel a pointer to a linked list which is updated by
12234          * userspace after the syscall; the list is walked by the kernel
12235          * when the thread exits. Since the linked list in QEMU guest
12236          * memory isn't a valid linked list for the host and we have
12237          * no way to reliably intercept the thread-death event, we can't
12238          * support these. Silently return ENOSYS so that guest userspace
12239          * falls back to a non-robust futex implementation (which should
12240          * be OK except in the corner case of the guest crashing while
12241          * holding a mutex that is shared with another process via
12242          * shared memory).
12243          */
12244         return -TARGET_ENOSYS;
12245 #endif
12246 
12247 #if defined(TARGET_NR_utimensat)
12248     case TARGET_NR_utimensat:
12249         {
12250             struct timespec *tsp, ts[2];
12251             if (!arg3) {
12252                 tsp = NULL;
12253             } else {
12254                 if (target_to_host_timespec(ts, arg3)) {
12255                     return -TARGET_EFAULT;
12256                 }
12257                 if (target_to_host_timespec(ts + 1, arg3 +
12258                                             sizeof(struct target_timespec))) {
12259                     return -TARGET_EFAULT;
12260                 }
12261                 tsp = ts;
12262             }
12263             if (!arg2)
12264                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12265             else {
12266                 if (!(p = lock_user_string(arg2))) {
12267                     return -TARGET_EFAULT;
12268                 }
12269                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12270                 unlock_user(p, arg2, 0);
12271             }
12272         }
12273         return ret;
12274 #endif
12275 #ifdef TARGET_NR_utimensat_time64
12276     case TARGET_NR_utimensat_time64:
12277         {
12278             struct timespec *tsp, ts[2];
12279             if (!arg3) {
12280                 tsp = NULL;
12281             } else {
12282                 if (target_to_host_timespec64(ts, arg3)) {
12283                     return -TARGET_EFAULT;
12284                 }
12285                 if (target_to_host_timespec64(ts + 1, arg3 +
12286                                      sizeof(struct target__kernel_timespec))) {
12287                     return -TARGET_EFAULT;
12288                 }
12289                 tsp = ts;
12290             }
12291             if (!arg2)
12292                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12293             else {
12294                 p = lock_user_string(arg2);
12295                 if (!p) {
12296                     return -TARGET_EFAULT;
12297                 }
12298                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12299                 unlock_user(p, arg2, 0);
12300             }
12301         }
12302         return ret;
12303 #endif
12304 #ifdef TARGET_NR_futex
12305     case TARGET_NR_futex:
12306         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12307 #endif
12308 #ifdef TARGET_NR_futex_time64
12309     case TARGET_NR_futex_time64:
12310         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12311 #endif
12312 #ifdef CONFIG_INOTIFY
12313 #if defined(TARGET_NR_inotify_init)
12314     case TARGET_NR_inotify_init:
12315         ret = get_errno(inotify_init());
12316         if (ret >= 0) {
12317             fd_trans_register(ret, &target_inotify_trans);
12318         }
12319         return ret;
12320 #endif
12321 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12322     case TARGET_NR_inotify_init1:
12323         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12324                                           fcntl_flags_tbl)));
12325         if (ret >= 0) {
12326             fd_trans_register(ret, &target_inotify_trans);
12327         }
12328         return ret;
12329 #endif
12330 #if defined(TARGET_NR_inotify_add_watch)
12331     case TARGET_NR_inotify_add_watch:
12332         p = lock_user_string(arg2);
12333         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12334         unlock_user(p, arg2, 0);
12335         return ret;
12336 #endif
12337 #if defined(TARGET_NR_inotify_rm_watch)
12338     case TARGET_NR_inotify_rm_watch:
12339         return get_errno(inotify_rm_watch(arg1, arg2));
12340 #endif
12341 #endif
12342 
12343 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12344     case TARGET_NR_mq_open:
12345         {
12346             struct mq_attr posix_mq_attr;
12347             struct mq_attr *pposix_mq_attr;
12348             int host_flags;
12349 
12350             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12351             pposix_mq_attr = NULL;
12352             if (arg4) {
12353                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12354                     return -TARGET_EFAULT;
12355                 }
12356                 pposix_mq_attr = &posix_mq_attr;
12357             }
12358             p = lock_user_string(arg1 - 1);
12359             if (!p) {
12360                 return -TARGET_EFAULT;
12361             }
12362             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12363             unlock_user (p, arg1, 0);
12364         }
12365         return ret;
12366 
12367     case TARGET_NR_mq_unlink:
12368         p = lock_user_string(arg1 - 1);
12369         if (!p) {
12370             return -TARGET_EFAULT;
12371         }
12372         ret = get_errno(mq_unlink(p));
12373         unlock_user (p, arg1, 0);
12374         return ret;
12375 
12376 #ifdef TARGET_NR_mq_timedsend
12377     case TARGET_NR_mq_timedsend:
12378         {
12379             struct timespec ts;
12380 
12381             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12382             if (arg5 != 0) {
12383                 if (target_to_host_timespec(&ts, arg5)) {
12384                     return -TARGET_EFAULT;
12385                 }
12386                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12387                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12388                     return -TARGET_EFAULT;
12389                 }
12390             } else {
12391                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12392             }
12393             unlock_user (p, arg2, arg3);
12394         }
12395         return ret;
12396 #endif
12397 #ifdef TARGET_NR_mq_timedsend_time64
12398     case TARGET_NR_mq_timedsend_time64:
12399         {
12400             struct timespec ts;
12401 
12402             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12403             if (arg5 != 0) {
12404                 if (target_to_host_timespec64(&ts, arg5)) {
12405                     return -TARGET_EFAULT;
12406                 }
12407                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12408                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12409                     return -TARGET_EFAULT;
12410                 }
12411             } else {
12412                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12413             }
12414             unlock_user(p, arg2, arg3);
12415         }
12416         return ret;
12417 #endif
12418 
12419 #ifdef TARGET_NR_mq_timedreceive
12420     case TARGET_NR_mq_timedreceive:
12421         {
12422             struct timespec ts;
12423             unsigned int prio;
12424 
12425             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12426             if (arg5 != 0) {
12427                 if (target_to_host_timespec(&ts, arg5)) {
12428                     return -TARGET_EFAULT;
12429                 }
12430                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12431                                                      &prio, &ts));
12432                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12433                     return -TARGET_EFAULT;
12434                 }
12435             } else {
12436                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12437                                                      &prio, NULL));
12438             }
12439             unlock_user (p, arg2, arg3);
12440             if (arg4 != 0)
12441                 put_user_u32(prio, arg4);
12442         }
12443         return ret;
12444 #endif
12445 #ifdef TARGET_NR_mq_timedreceive_time64
12446     case TARGET_NR_mq_timedreceive_time64:
12447         {
12448             struct timespec ts;
12449             unsigned int prio;
12450 
12451             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12452             if (arg5 != 0) {
12453                 if (target_to_host_timespec64(&ts, arg5)) {
12454                     return -TARGET_EFAULT;
12455                 }
12456                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12457                                                      &prio, &ts));
12458                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12459                     return -TARGET_EFAULT;
12460                 }
12461             } else {
12462                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12463                                                      &prio, NULL));
12464             }
12465             unlock_user(p, arg2, arg3);
12466             if (arg4 != 0) {
12467                 put_user_u32(prio, arg4);
12468             }
12469         }
12470         return ret;
12471 #endif
12472 
12473     /* Not implemented for now... */
12474 /*     case TARGET_NR_mq_notify: */
12475 /*         break; */
12476 
12477     case TARGET_NR_mq_getsetattr:
12478         {
12479             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12480             ret = 0;
12481             if (arg2 != 0) {
12482                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12483                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12484                                            &posix_mq_attr_out));
12485             } else if (arg3 != 0) {
12486                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12487             }
12488             if (ret == 0 && arg3 != 0) {
12489                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12490             }
12491         }
12492         return ret;
12493 #endif
12494 
12495 #ifdef CONFIG_SPLICE
12496 #ifdef TARGET_NR_tee
12497     case TARGET_NR_tee:
12498         {
12499             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12500         }
12501         return ret;
12502 #endif
12503 #ifdef TARGET_NR_splice
12504     case TARGET_NR_splice:
12505         {
12506             loff_t loff_in, loff_out;
12507             loff_t *ploff_in = NULL, *ploff_out = NULL;
12508             if (arg2) {
12509                 if (get_user_u64(loff_in, arg2)) {
12510                     return -TARGET_EFAULT;
12511                 }
12512                 ploff_in = &loff_in;
12513             }
12514             if (arg4) {
12515                 if (get_user_u64(loff_out, arg4)) {
12516                     return -TARGET_EFAULT;
12517                 }
12518                 ploff_out = &loff_out;
12519             }
12520             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12521             if (arg2) {
12522                 if (put_user_u64(loff_in, arg2)) {
12523                     return -TARGET_EFAULT;
12524                 }
12525             }
12526             if (arg4) {
12527                 if (put_user_u64(loff_out, arg4)) {
12528                     return -TARGET_EFAULT;
12529                 }
12530             }
12531         }
12532         return ret;
12533 #endif
12534 #ifdef TARGET_NR_vmsplice
12535 	case TARGET_NR_vmsplice:
12536         {
12537             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12538             if (vec != NULL) {
12539                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12540                 unlock_iovec(vec, arg2, arg3, 0);
12541             } else {
12542                 ret = -host_to_target_errno(errno);
12543             }
12544         }
12545         return ret;
12546 #endif
12547 #endif /* CONFIG_SPLICE */
12548 #ifdef CONFIG_EVENTFD
12549 #if defined(TARGET_NR_eventfd)
12550     case TARGET_NR_eventfd:
12551         ret = get_errno(eventfd(arg1, 0));
12552         if (ret >= 0) {
12553             fd_trans_register(ret, &target_eventfd_trans);
12554         }
12555         return ret;
12556 #endif
12557 #if defined(TARGET_NR_eventfd2)
12558     case TARGET_NR_eventfd2:
12559     {
12560         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12561         if (arg2 & TARGET_O_NONBLOCK) {
12562             host_flags |= O_NONBLOCK;
12563         }
12564         if (arg2 & TARGET_O_CLOEXEC) {
12565             host_flags |= O_CLOEXEC;
12566         }
12567         ret = get_errno(eventfd(arg1, host_flags));
12568         if (ret >= 0) {
12569             fd_trans_register(ret, &target_eventfd_trans);
12570         }
12571         return ret;
12572     }
12573 #endif
12574 #endif /* CONFIG_EVENTFD  */
12575 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12576     case TARGET_NR_fallocate:
12577 #if TARGET_ABI_BITS == 32
12578         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12579                                   target_offset64(arg5, arg6)));
12580 #else
12581         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12582 #endif
12583         return ret;
12584 #endif
12585 #if defined(CONFIG_SYNC_FILE_RANGE)
12586 #if defined(TARGET_NR_sync_file_range)
12587     case TARGET_NR_sync_file_range:
12588 #if TARGET_ABI_BITS == 32
12589 #if defined(TARGET_MIPS)
12590         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12591                                         target_offset64(arg5, arg6), arg7));
12592 #else
12593         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12594                                         target_offset64(arg4, arg5), arg6));
12595 #endif /* !TARGET_MIPS */
12596 #else
12597         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12598 #endif
12599         return ret;
12600 #endif
12601 #if defined(TARGET_NR_sync_file_range2) || \
12602     defined(TARGET_NR_arm_sync_file_range)
12603 #if defined(TARGET_NR_sync_file_range2)
12604     case TARGET_NR_sync_file_range2:
12605 #endif
12606 #if defined(TARGET_NR_arm_sync_file_range)
12607     case TARGET_NR_arm_sync_file_range:
12608 #endif
12609         /* This is like sync_file_range but the arguments are reordered */
12610 #if TARGET_ABI_BITS == 32
12611         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12612                                         target_offset64(arg5, arg6), arg2));
12613 #else
12614         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12615 #endif
12616         return ret;
12617 #endif
12618 #endif
12619 #if defined(TARGET_NR_signalfd4)
12620     case TARGET_NR_signalfd4:
12621         return do_signalfd4(arg1, arg2, arg4);
12622 #endif
12623 #if defined(TARGET_NR_signalfd)
12624     case TARGET_NR_signalfd:
12625         return do_signalfd4(arg1, arg2, 0);
12626 #endif
12627 #if defined(CONFIG_EPOLL)
12628 #if defined(TARGET_NR_epoll_create)
12629     case TARGET_NR_epoll_create:
12630         return get_errno(epoll_create(arg1));
12631 #endif
12632 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12633     case TARGET_NR_epoll_create1:
12634         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12635 #endif
12636 #if defined(TARGET_NR_epoll_ctl)
12637     case TARGET_NR_epoll_ctl:
12638     {
12639         struct epoll_event ep;
12640         struct epoll_event *epp = 0;
12641         if (arg4) {
12642             if (arg2 != EPOLL_CTL_DEL) {
12643                 struct target_epoll_event *target_ep;
12644                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12645                     return -TARGET_EFAULT;
12646                 }
12647                 ep.events = tswap32(target_ep->events);
12648                 /*
12649                  * The epoll_data_t union is just opaque data to the kernel,
12650                  * so we transfer all 64 bits across and need not worry what
12651                  * actual data type it is.
12652                  */
12653                 ep.data.u64 = tswap64(target_ep->data.u64);
12654                 unlock_user_struct(target_ep, arg4, 0);
12655             }
12656             /*
12657              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12658              * non-null pointer, even though this argument is ignored.
12659              *
12660              */
12661             epp = &ep;
12662         }
12663         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12664     }
12665 #endif
12666 
12667 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12668 #if defined(TARGET_NR_epoll_wait)
12669     case TARGET_NR_epoll_wait:
12670 #endif
12671 #if defined(TARGET_NR_epoll_pwait)
12672     case TARGET_NR_epoll_pwait:
12673 #endif
12674     {
12675         struct target_epoll_event *target_ep;
12676         struct epoll_event *ep;
12677         int epfd = arg1;
12678         int maxevents = arg3;
12679         int timeout = arg4;
12680 
12681         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12682             return -TARGET_EINVAL;
12683         }
12684 
12685         target_ep = lock_user(VERIFY_WRITE, arg2,
12686                               maxevents * sizeof(struct target_epoll_event), 1);
12687         if (!target_ep) {
12688             return -TARGET_EFAULT;
12689         }
12690 
12691         ep = g_try_new(struct epoll_event, maxevents);
12692         if (!ep) {
12693             unlock_user(target_ep, arg2, 0);
12694             return -TARGET_ENOMEM;
12695         }
12696 
12697         switch (num) {
12698 #if defined(TARGET_NR_epoll_pwait)
12699         case TARGET_NR_epoll_pwait:
12700         {
12701             sigset_t *set = NULL;
12702 
12703             if (arg5) {
12704                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12705                 if (ret != 0) {
12706                     break;
12707                 }
12708             }
12709 
12710             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12711                                              set, SIGSET_T_SIZE));
12712 
12713             if (set) {
12714                 finish_sigsuspend_mask(ret);
12715             }
12716             break;
12717         }
12718 #endif
12719 #if defined(TARGET_NR_epoll_wait)
12720         case TARGET_NR_epoll_wait:
12721             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12722                                              NULL, 0));
12723             break;
12724 #endif
12725         default:
12726             ret = -TARGET_ENOSYS;
12727         }
12728         if (!is_error(ret)) {
12729             int i;
12730             for (i = 0; i < ret; i++) {
12731                 target_ep[i].events = tswap32(ep[i].events);
12732                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12733             }
12734             unlock_user(target_ep, arg2,
12735                         ret * sizeof(struct target_epoll_event));
12736         } else {
12737             unlock_user(target_ep, arg2, 0);
12738         }
12739         g_free(ep);
12740         return ret;
12741     }
12742 #endif
12743 #endif
12744 #ifdef TARGET_NR_prlimit64
12745     case TARGET_NR_prlimit64:
12746     {
12747         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12748         struct target_rlimit64 *target_rnew, *target_rold;
12749         struct host_rlimit64 rnew, rold, *rnewp = 0;
12750         int resource = target_to_host_resource(arg2);
12751 
12752         if (arg3 && (resource != RLIMIT_AS &&
12753                      resource != RLIMIT_DATA &&
12754                      resource != RLIMIT_STACK)) {
12755             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12756                 return -TARGET_EFAULT;
12757             }
12758             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12759             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12760             unlock_user_struct(target_rnew, arg3, 0);
12761             rnewp = &rnew;
12762         }
12763 
12764         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12765         if (!is_error(ret) && arg4) {
12766             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12767                 return -TARGET_EFAULT;
12768             }
12769             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12770             target_rold->rlim_max = tswap64(rold.rlim_max);
12771             unlock_user_struct(target_rold, arg4, 1);
12772         }
12773         return ret;
12774     }
12775 #endif
12776 #ifdef TARGET_NR_gethostname
12777     case TARGET_NR_gethostname:
12778     {
12779         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12780         if (name) {
12781             ret = get_errno(gethostname(name, arg2));
12782             unlock_user(name, arg1, arg2);
12783         } else {
12784             ret = -TARGET_EFAULT;
12785         }
12786         return ret;
12787     }
12788 #endif
12789 #ifdef TARGET_NR_atomic_cmpxchg_32
12790     case TARGET_NR_atomic_cmpxchg_32:
12791     {
12792         /* should use start_exclusive from main.c */
12793         abi_ulong mem_value;
12794         if (get_user_u32(mem_value, arg6)) {
12795             target_siginfo_t info;
12796             info.si_signo = SIGSEGV;
12797             info.si_errno = 0;
12798             info.si_code = TARGET_SEGV_MAPERR;
12799             info._sifields._sigfault._addr = arg6;
12800             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
12801             ret = 0xdeadbeef;
12802 
12803         }
12804         if (mem_value == arg2)
12805             put_user_u32(arg1, arg6);
12806         return mem_value;
12807     }
12808 #endif
12809 #ifdef TARGET_NR_atomic_barrier
12810     case TARGET_NR_atomic_barrier:
12811         /* Like the kernel implementation and the
12812            qemu arm barrier, no-op this? */
12813         return 0;
12814 #endif
12815 
12816 #ifdef TARGET_NR_timer_create
12817     case TARGET_NR_timer_create:
12818     {
12819         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12820 
12821         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12822 
12823         int clkid = arg1;
12824         int timer_index = next_free_host_timer();
12825 
12826         if (timer_index < 0) {
12827             ret = -TARGET_EAGAIN;
12828         } else {
12829             timer_t *phtimer = g_posix_timers  + timer_index;
12830 
12831             if (arg2) {
12832                 phost_sevp = &host_sevp;
12833                 ret = target_to_host_sigevent(phost_sevp, arg2);
12834                 if (ret != 0) {
12835                     return ret;
12836                 }
12837             }
12838 
12839             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12840             if (ret) {
12841                 phtimer = NULL;
12842             } else {
12843                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12844                     return -TARGET_EFAULT;
12845                 }
12846             }
12847         }
12848         return ret;
12849     }
12850 #endif
12851 
12852 #ifdef TARGET_NR_timer_settime
12853     case TARGET_NR_timer_settime:
12854     {
12855         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12856          * struct itimerspec * old_value */
12857         target_timer_t timerid = get_timer_id(arg1);
12858 
12859         if (timerid < 0) {
12860             ret = timerid;
12861         } else if (arg3 == 0) {
12862             ret = -TARGET_EINVAL;
12863         } else {
12864             timer_t htimer = g_posix_timers[timerid];
12865             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12866 
12867             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12868                 return -TARGET_EFAULT;
12869             }
12870             ret = get_errno(
12871                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12872             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12873                 return -TARGET_EFAULT;
12874             }
12875         }
12876         return ret;
12877     }
12878 #endif
12879 
12880 #ifdef TARGET_NR_timer_settime64
12881     case TARGET_NR_timer_settime64:
12882     {
12883         target_timer_t timerid = get_timer_id(arg1);
12884 
12885         if (timerid < 0) {
12886             ret = timerid;
12887         } else if (arg3 == 0) {
12888             ret = -TARGET_EINVAL;
12889         } else {
12890             timer_t htimer = g_posix_timers[timerid];
12891             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12892 
12893             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12894                 return -TARGET_EFAULT;
12895             }
12896             ret = get_errno(
12897                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12898             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12899                 return -TARGET_EFAULT;
12900             }
12901         }
12902         return ret;
12903     }
12904 #endif
12905 
12906 #ifdef TARGET_NR_timer_gettime
12907     case TARGET_NR_timer_gettime:
12908     {
12909         /* args: timer_t timerid, struct itimerspec *curr_value */
12910         target_timer_t timerid = get_timer_id(arg1);
12911 
12912         if (timerid < 0) {
12913             ret = timerid;
12914         } else if (!arg2) {
12915             ret = -TARGET_EFAULT;
12916         } else {
12917             timer_t htimer = g_posix_timers[timerid];
12918             struct itimerspec hspec;
12919             ret = get_errno(timer_gettime(htimer, &hspec));
12920 
12921             if (host_to_target_itimerspec(arg2, &hspec)) {
12922                 ret = -TARGET_EFAULT;
12923             }
12924         }
12925         return ret;
12926     }
12927 #endif
12928 
12929 #ifdef TARGET_NR_timer_gettime64
12930     case TARGET_NR_timer_gettime64:
12931     {
12932         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12933         target_timer_t timerid = get_timer_id(arg1);
12934 
12935         if (timerid < 0) {
12936             ret = timerid;
12937         } else if (!arg2) {
12938             ret = -TARGET_EFAULT;
12939         } else {
12940             timer_t htimer = g_posix_timers[timerid];
12941             struct itimerspec hspec;
12942             ret = get_errno(timer_gettime(htimer, &hspec));
12943 
12944             if (host_to_target_itimerspec64(arg2, &hspec)) {
12945                 ret = -TARGET_EFAULT;
12946             }
12947         }
12948         return ret;
12949     }
12950 #endif
12951 
12952 #ifdef TARGET_NR_timer_getoverrun
12953     case TARGET_NR_timer_getoverrun:
12954     {
12955         /* args: timer_t timerid */
12956         target_timer_t timerid = get_timer_id(arg1);
12957 
12958         if (timerid < 0) {
12959             ret = timerid;
12960         } else {
12961             timer_t htimer = g_posix_timers[timerid];
12962             ret = get_errno(timer_getoverrun(htimer));
12963         }
12964         return ret;
12965     }
12966 #endif
12967 
12968 #ifdef TARGET_NR_timer_delete
12969     case TARGET_NR_timer_delete:
12970     {
12971         /* args: timer_t timerid */
12972         target_timer_t timerid = get_timer_id(arg1);
12973 
12974         if (timerid < 0) {
12975             ret = timerid;
12976         } else {
12977             timer_t htimer = g_posix_timers[timerid];
12978             ret = get_errno(timer_delete(htimer));
12979             g_posix_timers[timerid] = 0;
12980         }
12981         return ret;
12982     }
12983 #endif
12984 
12985 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12986     case TARGET_NR_timerfd_create:
12987         return get_errno(timerfd_create(arg1,
12988                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12989 #endif
12990 
12991 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12992     case TARGET_NR_timerfd_gettime:
12993         {
12994             struct itimerspec its_curr;
12995 
12996             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12997 
12998             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12999                 return -TARGET_EFAULT;
13000             }
13001         }
13002         return ret;
13003 #endif
13004 
13005 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13006     case TARGET_NR_timerfd_gettime64:
13007         {
13008             struct itimerspec its_curr;
13009 
13010             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13011 
13012             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13013                 return -TARGET_EFAULT;
13014             }
13015         }
13016         return ret;
13017 #endif
13018 
13019 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13020     case TARGET_NR_timerfd_settime:
13021         {
13022             struct itimerspec its_new, its_old, *p_new;
13023 
13024             if (arg3) {
13025                 if (target_to_host_itimerspec(&its_new, arg3)) {
13026                     return -TARGET_EFAULT;
13027                 }
13028                 p_new = &its_new;
13029             } else {
13030                 p_new = NULL;
13031             }
13032 
13033             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13034 
13035             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13036                 return -TARGET_EFAULT;
13037             }
13038         }
13039         return ret;
13040 #endif
13041 
13042 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13043     case TARGET_NR_timerfd_settime64:
13044         {
13045             struct itimerspec its_new, its_old, *p_new;
13046 
13047             if (arg3) {
13048                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13049                     return -TARGET_EFAULT;
13050                 }
13051                 p_new = &its_new;
13052             } else {
13053                 p_new = NULL;
13054             }
13055 
13056             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13057 
13058             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13059                 return -TARGET_EFAULT;
13060             }
13061         }
13062         return ret;
13063 #endif
13064 
13065 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13066     case TARGET_NR_ioprio_get:
13067         return get_errno(ioprio_get(arg1, arg2));
13068 #endif
13069 
13070 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13071     case TARGET_NR_ioprio_set:
13072         return get_errno(ioprio_set(arg1, arg2, arg3));
13073 #endif
13074 
13075 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13076     case TARGET_NR_setns:
13077         return get_errno(setns(arg1, arg2));
13078 #endif
13079 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13080     case TARGET_NR_unshare:
13081         return get_errno(unshare(arg1));
13082 #endif
13083 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13084     case TARGET_NR_kcmp:
13085         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13086 #endif
13087 #ifdef TARGET_NR_swapcontext
13088     case TARGET_NR_swapcontext:
13089         /* PowerPC specific.  */
13090         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13091 #endif
13092 #ifdef TARGET_NR_memfd_create
13093     case TARGET_NR_memfd_create:
13094         p = lock_user_string(arg1);
13095         if (!p) {
13096             return -TARGET_EFAULT;
13097         }
13098         ret = get_errno(memfd_create(p, arg2));
13099         fd_trans_unregister(ret);
13100         unlock_user(p, arg1, 0);
13101         return ret;
13102 #endif
13103 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13104     case TARGET_NR_membarrier:
13105         return get_errno(membarrier(arg1, arg2));
13106 #endif
13107 
13108 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13109     case TARGET_NR_copy_file_range:
13110         {
13111             loff_t inoff, outoff;
13112             loff_t *pinoff = NULL, *poutoff = NULL;
13113 
13114             if (arg2) {
13115                 if (get_user_u64(inoff, arg2)) {
13116                     return -TARGET_EFAULT;
13117                 }
13118                 pinoff = &inoff;
13119             }
13120             if (arg4) {
13121                 if (get_user_u64(outoff, arg4)) {
13122                     return -TARGET_EFAULT;
13123                 }
13124                 poutoff = &outoff;
13125             }
13126             /* Do not sign-extend the count parameter. */
13127             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13128                                                  (abi_ulong)arg5, arg6));
13129             if (!is_error(ret) && ret > 0) {
13130                 if (arg2) {
13131                     if (put_user_u64(inoff, arg2)) {
13132                         return -TARGET_EFAULT;
13133                     }
13134                 }
13135                 if (arg4) {
13136                     if (put_user_u64(outoff, arg4)) {
13137                         return -TARGET_EFAULT;
13138                     }
13139                 }
13140             }
13141         }
13142         return ret;
13143 #endif
13144 
13145 #if defined(TARGET_NR_pivot_root)
13146     case TARGET_NR_pivot_root:
13147         {
13148             void *p2;
13149             p = lock_user_string(arg1); /* new_root */
13150             p2 = lock_user_string(arg2); /* put_old */
13151             if (!p || !p2) {
13152                 ret = -TARGET_EFAULT;
13153             } else {
13154                 ret = get_errno(pivot_root(p, p2));
13155             }
13156             unlock_user(p2, arg2, 0);
13157             unlock_user(p, arg1, 0);
13158         }
13159         return ret;
13160 #endif
13161 
13162     default:
13163         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13164         return -TARGET_ENOSYS;
13165     }
13166     return ret;
13167 }
13168 
13169 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13170                     abi_long arg2, abi_long arg3, abi_long arg4,
13171                     abi_long arg5, abi_long arg6, abi_long arg7,
13172                     abi_long arg8)
13173 {
13174     CPUState *cpu = env_cpu(cpu_env);
13175     abi_long ret;
13176 
13177 #ifdef DEBUG_ERESTARTSYS
13178     /* Debug-only code for exercising the syscall-restart code paths
13179      * in the per-architecture cpu main loops: restart every syscall
13180      * the guest makes once before letting it through.
13181      */
13182     {
13183         static bool flag;
13184         flag = !flag;
13185         if (flag) {
13186             return -QEMU_ERESTARTSYS;
13187         }
13188     }
13189 #endif
13190 
13191     record_syscall_start(cpu, num, arg1,
13192                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13193 
13194     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13195         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13196     }
13197 
13198     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13199                       arg5, arg6, arg7, arg8);
13200 
13201     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13202         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13203                           arg3, arg4, arg5, arg6);
13204     }
13205 
13206     record_syscall_return(cpu, num, ret);
13207     return ret;
13208 }
13209