xref: /openbmc/qemu/linux-user/syscall.c (revision cd0e31a4)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include <elf.h>
26 #include <endian.h>
27 #include <grp.h>
28 #include <sys/ipc.h>
29 #include <sys/msg.h>
30 #include <sys/wait.h>
31 #include <sys/mount.h>
32 #include <sys/file.h>
33 #include <sys/fsuid.h>
34 #include <sys/personality.h>
35 #include <sys/prctl.h>
36 #include <sys/resource.h>
37 #include <sys/swap.h>
38 #include <linux/capability.h>
39 #include <sched.h>
40 #include <sys/timex.h>
41 #include <sys/socket.h>
42 #include <linux/sockios.h>
43 #include <sys/un.h>
44 #include <sys/uio.h>
45 #include <poll.h>
46 #include <sys/times.h>
47 #include <sys/shm.h>
48 #include <sys/sem.h>
49 #include <sys/statfs.h>
50 #include <utime.h>
51 #include <sys/sysinfo.h>
52 #include <sys/signalfd.h>
53 //#include <sys/user.h>
54 #include <netinet/in.h>
55 #include <netinet/ip.h>
56 #include <netinet/tcp.h>
57 #include <netinet/udp.h>
58 #include <linux/wireless.h>
59 #include <linux/icmp.h>
60 #include <linux/icmpv6.h>
61 #include <linux/if_tun.h>
62 #include <linux/in6.h>
63 #include <linux/errqueue.h>
64 #include <linux/random.h>
65 #ifdef CONFIG_TIMERFD
66 #include <sys/timerfd.h>
67 #endif
68 #ifdef CONFIG_EVENTFD
69 #include <sys/eventfd.h>
70 #endif
71 #ifdef CONFIG_EPOLL
72 #include <sys/epoll.h>
73 #endif
74 #ifdef CONFIG_ATTR
75 #include "qemu/xattr.h"
76 #endif
77 #ifdef CONFIG_SENDFILE
78 #include <sys/sendfile.h>
79 #endif
80 #ifdef HAVE_SYS_KCOV_H
81 #include <sys/kcov.h>
82 #endif
83 
84 #define termios host_termios
85 #define winsize host_winsize
86 #define termio host_termio
87 #define sgttyb host_sgttyb /* same as target */
88 #define tchars host_tchars /* same as target */
89 #define ltchars host_ltchars /* same as target */
90 
91 #include <linux/termios.h>
92 #include <linux/unistd.h>
93 #include <linux/cdrom.h>
94 #include <linux/hdreg.h>
95 #include <linux/soundcard.h>
96 #include <linux/kd.h>
97 #include <linux/mtio.h>
98 #include <linux/fs.h>
99 #include <linux/fd.h>
100 #if defined(CONFIG_FIEMAP)
101 #include <linux/fiemap.h>
102 #endif
103 #include <linux/fb.h>
104 #if defined(CONFIG_USBFS)
105 #include <linux/usbdevice_fs.h>
106 #include <linux/usb/ch9.h>
107 #endif
108 #include <linux/vt.h>
109 #include <linux/dm-ioctl.h>
110 #include <linux/reboot.h>
111 #include <linux/route.h>
112 #include <linux/filter.h>
113 #include <linux/blkpg.h>
114 #include <netpacket/packet.h>
115 #include <linux/netlink.h>
116 #include <linux/if_alg.h>
117 #include <linux/rtc.h>
118 #include <sound/asound.h>
119 #ifdef HAVE_BTRFS_H
120 #include <linux/btrfs.h>
121 #endif
122 #ifdef HAVE_DRM_H
123 #include <libdrm/drm.h>
124 #include <libdrm/i915_drm.h>
125 #endif
126 #include "linux_loop.h"
127 #include "uname.h"
128 
129 #include "qemu.h"
130 #include "user-internals.h"
131 #include "strace.h"
132 #include "signal-common.h"
133 #include "loader.h"
134 #include "user-mmap.h"
135 #include "user/safe-syscall.h"
136 #include "qemu/guest-random.h"
137 #include "qemu/selfmap.h"
138 #include "user/syscall-trace.h"
139 #include "special-errno.h"
140 #include "qapi/error.h"
141 #include "fd-trans.h"
142 #include "tcg/tcg.h"
143 
144 #ifndef CLONE_IO
145 #define CLONE_IO                0x80000000      /* Clone io context */
146 #endif
147 
148 /* We can't directly call the host clone syscall, because this will
149  * badly confuse libc (breaking mutexes, for example). So we must
150  * divide clone flags into:
151  *  * flag combinations that look like pthread_create()
152  *  * flag combinations that look like fork()
153  *  * flags we can implement within QEMU itself
154  *  * flags we can't support and will return an error for
155  */
156 /* For thread creation, all these flags must be present; for
157  * fork, none must be present.
158  */
159 #define CLONE_THREAD_FLAGS                              \
160     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
161      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
162 
163 /* These flags are ignored:
164  * CLONE_DETACHED is now ignored by the kernel;
165  * CLONE_IO is just an optimisation hint to the I/O scheduler
166  */
167 #define CLONE_IGNORED_FLAGS                     \
168     (CLONE_DETACHED | CLONE_IO)
169 
170 /* Flags for fork which we can implement within QEMU itself */
171 #define CLONE_OPTIONAL_FORK_FLAGS               \
172     (CLONE_SETTLS | CLONE_PARENT_SETTID |       \
173      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
174 
175 /* Flags for thread creation which we can implement within QEMU itself */
176 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
177     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
178      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
179 
180 #define CLONE_INVALID_FORK_FLAGS                                        \
181     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
182 
183 #define CLONE_INVALID_THREAD_FLAGS                                      \
184     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
185        CLONE_IGNORED_FLAGS))
186 
187 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
188  * have almost all been allocated. We cannot support any of
189  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
190  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
191  * The checks against the invalid thread masks above will catch these.
192  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
193  */
194 
195 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
196  * once. This exercises the codepaths for restart.
197  */
198 //#define DEBUG_ERESTARTSYS
199 
200 //#include <linux/msdos_fs.h>
201 #define VFAT_IOCTL_READDIR_BOTH \
202     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
203 #define VFAT_IOCTL_READDIR_SHORT \
204     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
205 
206 #undef _syscall0
207 #undef _syscall1
208 #undef _syscall2
209 #undef _syscall3
210 #undef _syscall4
211 #undef _syscall5
212 #undef _syscall6
213 
214 #define _syscall0(type,name)		\
215 static type name (void)			\
216 {					\
217 	return syscall(__NR_##name);	\
218 }
219 
220 #define _syscall1(type,name,type1,arg1)		\
221 static type name (type1 arg1)			\
222 {						\
223 	return syscall(__NR_##name, arg1);	\
224 }
225 
226 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
227 static type name (type1 arg1,type2 arg2)		\
228 {							\
229 	return syscall(__NR_##name, arg1, arg2);	\
230 }
231 
232 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
233 static type name (type1 arg1,type2 arg2,type3 arg3)		\
234 {								\
235 	return syscall(__NR_##name, arg1, arg2, arg3);		\
236 }
237 
238 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
239 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
240 {										\
241 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
242 }
243 
244 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
245 		  type5,arg5)							\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
249 }
250 
251 
252 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5,type6,arg6)					\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
255                   type6 arg6)							\
256 {										\
257 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
258 }
259 
260 
261 #define __NR_sys_uname __NR_uname
262 #define __NR_sys_getcwd1 __NR_getcwd
263 #define __NR_sys_getdents __NR_getdents
264 #define __NR_sys_getdents64 __NR_getdents64
265 #define __NR_sys_getpriority __NR_getpriority
266 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
267 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
268 #define __NR_sys_syslog __NR_syslog
269 #if defined(__NR_futex)
270 # define __NR_sys_futex __NR_futex
271 #endif
272 #if defined(__NR_futex_time64)
273 # define __NR_sys_futex_time64 __NR_futex_time64
274 #endif
275 #define __NR_sys_statx __NR_statx
276 
277 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
278 #define __NR__llseek __NR_lseek
279 #endif
280 
281 /* Newer kernel ports have llseek() instead of _llseek() */
282 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
283 #define TARGET_NR__llseek TARGET_NR_llseek
284 #endif
285 
286 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
287 #ifndef TARGET_O_NONBLOCK_MASK
288 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
289 #endif
290 
291 #define __NR_sys_gettid __NR_gettid
292 _syscall0(int, sys_gettid)
293 
294 /* For the 64-bit guest on 32-bit host case we must emulate
295  * getdents using getdents64, because otherwise the host
296  * might hand us back more dirent records than we can fit
297  * into the guest buffer after structure format conversion.
298  * Otherwise we emulate getdents with getdents if the host has it.
299  */
300 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
301 #define EMULATE_GETDENTS_WITH_GETDENTS
302 #endif
303 
304 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
305 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
306 #endif
307 #if (defined(TARGET_NR_getdents) && \
308       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
309     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
310 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
311 #endif
312 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
313 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
314           loff_t *, res, uint, wh);
315 #endif
316 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
317 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
318           siginfo_t *, uinfo)
319 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
320 #ifdef __NR_exit_group
321 _syscall1(int,exit_group,int,error_code)
322 #endif
323 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
324 _syscall1(int,set_tid_address,int *,tidptr)
325 #endif
326 #if defined(__NR_futex)
327 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
328           const struct timespec *,timeout,int *,uaddr2,int,val3)
329 #endif
330 #if defined(__NR_futex_time64)
331 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
332           const struct timespec *,timeout,int *,uaddr2,int,val3)
333 #endif
334 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
335 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
336           unsigned long *, user_mask_ptr);
337 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
338 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
339           unsigned long *, user_mask_ptr);
340 /* sched_attr is not defined in glibc */
341 struct sched_attr {
342     uint32_t size;
343     uint32_t sched_policy;
344     uint64_t sched_flags;
345     int32_t sched_nice;
346     uint32_t sched_priority;
347     uint64_t sched_runtime;
348     uint64_t sched_deadline;
349     uint64_t sched_period;
350     uint32_t sched_util_min;
351     uint32_t sched_util_max;
352 };
353 #define __NR_sys_sched_getattr __NR_sched_getattr
354 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
355           unsigned int, size, unsigned int, flags);
356 #define __NR_sys_sched_setattr __NR_sched_setattr
357 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
358           unsigned int, flags);
359 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
360 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
361 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
362 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
363           const struct sched_param *, param);
364 #define __NR_sys_sched_getparam __NR_sched_getparam
365 _syscall2(int, sys_sched_getparam, pid_t, pid,
366           struct sched_param *, param);
367 #define __NR_sys_sched_setparam __NR_sched_setparam
368 _syscall2(int, sys_sched_setparam, pid_t, pid,
369           const struct sched_param *, param);
370 #define __NR_sys_getcpu __NR_getcpu
371 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
372 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
373           void *, arg);
374 _syscall2(int, capget, struct __user_cap_header_struct *, header,
375           struct __user_cap_data_struct *, data);
376 _syscall2(int, capset, struct __user_cap_header_struct *, header,
377           struct __user_cap_data_struct *, data);
378 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
379 _syscall2(int, ioprio_get, int, which, int, who)
380 #endif
381 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
382 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
383 #endif
384 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
385 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
386 #endif
387 
388 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
389 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
390           unsigned long, idx1, unsigned long, idx2)
391 #endif
392 
393 /*
394  * It is assumed that struct statx is architecture independent.
395  */
396 #if defined(TARGET_NR_statx) && defined(__NR_statx)
397 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
398           unsigned int, mask, struct target_statx *, statxbuf)
399 #endif
400 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
401 _syscall2(int, membarrier, int, cmd, int, flags)
402 #endif
403 
404 static const bitmask_transtbl fcntl_flags_tbl[] = {
405   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
406   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
407   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
408   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
409   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
410   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
411   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
412   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
413   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
414   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
415   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
416   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
417   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
418 #if defined(O_DIRECT)
419   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
420 #endif
421 #if defined(O_NOATIME)
422   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
423 #endif
424 #if defined(O_CLOEXEC)
425   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
426 #endif
427 #if defined(O_PATH)
428   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
429 #endif
430 #if defined(O_TMPFILE)
431   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
432 #endif
433   /* Don't terminate the list prematurely on 64-bit host+guest.  */
434 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
435   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
436 #endif
437   { 0, 0, 0, 0 }
438 };
439 
440 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
441 
442 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
443 #if defined(__NR_utimensat)
444 #define __NR_sys_utimensat __NR_utimensat
445 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
446           const struct timespec *,tsp,int,flags)
447 #else
448 static int sys_utimensat(int dirfd, const char *pathname,
449                          const struct timespec times[2], int flags)
450 {
451     errno = ENOSYS;
452     return -1;
453 }
454 #endif
455 #endif /* TARGET_NR_utimensat */
456 
457 #ifdef TARGET_NR_renameat2
458 #if defined(__NR_renameat2)
459 #define __NR_sys_renameat2 __NR_renameat2
460 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
461           const char *, new, unsigned int, flags)
462 #else
463 static int sys_renameat2(int oldfd, const char *old,
464                          int newfd, const char *new, int flags)
465 {
466     if (flags == 0) {
467         return renameat(oldfd, old, newfd, new);
468     }
469     errno = ENOSYS;
470     return -1;
471 }
472 #endif
473 #endif /* TARGET_NR_renameat2 */
474 
475 #ifdef CONFIG_INOTIFY
476 #include <sys/inotify.h>
477 #else
478 /* Userspace can usually survive runtime without inotify */
479 #undef TARGET_NR_inotify_init
480 #undef TARGET_NR_inotify_init1
481 #undef TARGET_NR_inotify_add_watch
482 #undef TARGET_NR_inotify_rm_watch
483 #endif /* CONFIG_INOTIFY  */
484 
485 #if defined(TARGET_NR_prlimit64)
486 #ifndef __NR_prlimit64
487 # define __NR_prlimit64 -1
488 #endif
489 #define __NR_sys_prlimit64 __NR_prlimit64
490 /* The glibc rlimit structure may not be that used by the underlying syscall */
491 struct host_rlimit64 {
492     uint64_t rlim_cur;
493     uint64_t rlim_max;
494 };
495 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
496           const struct host_rlimit64 *, new_limit,
497           struct host_rlimit64 *, old_limit)
498 #endif
499 
500 
501 #if defined(TARGET_NR_timer_create)
502 /* Maximum of 32 active POSIX timers allowed at any one time. */
503 static timer_t g_posix_timers[32] = { 0, } ;
504 
505 static inline int next_free_host_timer(void)
506 {
507     int k ;
508     /* FIXME: Does finding the next free slot require a lock? */
509     for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
510         if (g_posix_timers[k] == 0) {
511             g_posix_timers[k] = (timer_t) 1;
512             return k;
513         }
514     }
515     return -1;
516 }
517 #endif
518 
519 static inline int host_to_target_errno(int host_errno)
520 {
521     switch (host_errno) {
522 #define E(X)  case X: return TARGET_##X;
523 #include "errnos.c.inc"
524 #undef E
525     default:
526         return host_errno;
527     }
528 }
529 
530 static inline int target_to_host_errno(int target_errno)
531 {
532     switch (target_errno) {
533 #define E(X)  case TARGET_##X: return X;
534 #include "errnos.c.inc"
535 #undef E
536     default:
537         return target_errno;
538     }
539 }
540 
541 static inline abi_long get_errno(abi_long ret)
542 {
543     if (ret == -1)
544         return -host_to_target_errno(errno);
545     else
546         return ret;
547 }
548 
549 const char *target_strerror(int err)
550 {
551     if (err == QEMU_ERESTARTSYS) {
552         return "To be restarted";
553     }
554     if (err == QEMU_ESIGRETURN) {
555         return "Successful exit from sigreturn";
556     }
557 
558     return strerror(target_to_host_errno(err));
559 }
560 
561 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
562 {
563     int i;
564     uint8_t b;
565     if (usize <= ksize) {
566         return 1;
567     }
568     for (i = ksize; i < usize; i++) {
569         if (get_user_u8(b, addr + i)) {
570             return -TARGET_EFAULT;
571         }
572         if (b != 0) {
573             return 0;
574         }
575     }
576     return 1;
577 }
578 
579 #define safe_syscall0(type, name) \
580 static type safe_##name(void) \
581 { \
582     return safe_syscall(__NR_##name); \
583 }
584 
585 #define safe_syscall1(type, name, type1, arg1) \
586 static type safe_##name(type1 arg1) \
587 { \
588     return safe_syscall(__NR_##name, arg1); \
589 }
590 
591 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
592 static type safe_##name(type1 arg1, type2 arg2) \
593 { \
594     return safe_syscall(__NR_##name, arg1, arg2); \
595 }
596 
597 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
598 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
599 { \
600     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
601 }
602 
603 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
604     type4, arg4) \
605 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
606 { \
607     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
608 }
609 
610 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
611     type4, arg4, type5, arg5) \
612 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
613     type5 arg5) \
614 { \
615     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
616 }
617 
618 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
619     type4, arg4, type5, arg5, type6, arg6) \
620 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
621     type5 arg5, type6 arg6) \
622 { \
623     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
624 }
625 
626 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
627 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
628 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
629               int, flags, mode_t, mode)
630 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
631 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
632               struct rusage *, rusage)
633 #endif
634 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
635               int, options, struct rusage *, rusage)
636 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
637 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
638     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
639 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
640               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
641 #endif
642 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
643 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
644               struct timespec *, tsp, const sigset_t *, sigmask,
645               size_t, sigsetsize)
646 #endif
647 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
648               int, maxevents, int, timeout, const sigset_t *, sigmask,
649               size_t, sigsetsize)
650 #if defined(__NR_futex)
651 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
652               const struct timespec *,timeout,int *,uaddr2,int,val3)
653 #endif
654 #if defined(__NR_futex_time64)
655 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
656               const struct timespec *,timeout,int *,uaddr2,int,val3)
657 #endif
658 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
659 safe_syscall2(int, kill, pid_t, pid, int, sig)
660 safe_syscall2(int, tkill, int, tid, int, sig)
661 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
662 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
663 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
664 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
665               unsigned long, pos_l, unsigned long, pos_h)
666 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
667               unsigned long, pos_l, unsigned long, pos_h)
668 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
669               socklen_t, addrlen)
670 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
671               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
672 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
673               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
674 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
675 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
676 safe_syscall2(int, flock, int, fd, int, operation)
677 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
678 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
679               const struct timespec *, uts, size_t, sigsetsize)
680 #endif
681 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
682               int, flags)
683 #if defined(TARGET_NR_nanosleep)
684 safe_syscall2(int, nanosleep, const struct timespec *, req,
685               struct timespec *, rem)
686 #endif
687 #if defined(TARGET_NR_clock_nanosleep) || \
688     defined(TARGET_NR_clock_nanosleep_time64)
689 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
690               const struct timespec *, req, struct timespec *, rem)
691 #endif
692 #ifdef __NR_ipc
693 #ifdef __s390x__
694 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
695               void *, ptr)
696 #else
697 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
698               void *, ptr, long, fifth)
699 #endif
700 #endif
701 #ifdef __NR_msgsnd
702 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
703               int, flags)
704 #endif
705 #ifdef __NR_msgrcv
706 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
707               long, msgtype, int, flags)
708 #endif
709 #ifdef __NR_semtimedop
710 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
711               unsigned, nsops, const struct timespec *, timeout)
712 #endif
713 #if defined(TARGET_NR_mq_timedsend) || \
714     defined(TARGET_NR_mq_timedsend_time64)
715 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
716               size_t, len, unsigned, prio, const struct timespec *, timeout)
717 #endif
718 #if defined(TARGET_NR_mq_timedreceive) || \
719     defined(TARGET_NR_mq_timedreceive_time64)
720 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
721               size_t, len, unsigned *, prio, const struct timespec *, timeout)
722 #endif
723 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
724 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
725               int, outfd, loff_t *, poutoff, size_t, length,
726               unsigned int, flags)
727 #endif
728 
729 /* We do ioctl like this rather than via safe_syscall3 to preserve the
730  * "third argument might be integer or pointer or not present" behaviour of
731  * the libc function.
732  */
733 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
734 /* Similarly for fcntl. Note that callers must always:
735  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
736  *  use the flock64 struct rather than unsuffixed flock
737  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
738  */
739 #ifdef __NR_fcntl64
740 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
741 #else
742 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
743 #endif
744 
745 static inline int host_to_target_sock_type(int host_type)
746 {
747     int target_type;
748 
749     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
750     case SOCK_DGRAM:
751         target_type = TARGET_SOCK_DGRAM;
752         break;
753     case SOCK_STREAM:
754         target_type = TARGET_SOCK_STREAM;
755         break;
756     default:
757         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
758         break;
759     }
760 
761 #if defined(SOCK_CLOEXEC)
762     if (host_type & SOCK_CLOEXEC) {
763         target_type |= TARGET_SOCK_CLOEXEC;
764     }
765 #endif
766 
767 #if defined(SOCK_NONBLOCK)
768     if (host_type & SOCK_NONBLOCK) {
769         target_type |= TARGET_SOCK_NONBLOCK;
770     }
771 #endif
772 
773     return target_type;
774 }
775 
776 static abi_ulong target_brk;
777 static abi_ulong target_original_brk;
778 static abi_ulong brk_page;
779 
780 void target_set_brk(abi_ulong new_brk)
781 {
782     target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
783     brk_page = HOST_PAGE_ALIGN(target_brk);
784 }
785 
786 //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
787 #define DEBUGF_BRK(message, args...)
788 
789 /* do_brk() must return target values and target errnos. */
790 abi_long do_brk(abi_ulong new_brk)
791 {
792     abi_long mapped_addr;
793     abi_ulong new_alloc_size;
794 
795     /* brk pointers are always untagged */
796 
797     DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
798 
799     if (!new_brk) {
800         DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
801         return target_brk;
802     }
803     if (new_brk < target_original_brk) {
804         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
805                    target_brk);
806         return target_brk;
807     }
808 
809     /* If the new brk is less than the highest page reserved to the
810      * target heap allocation, set it and we're almost done...  */
811     if (new_brk <= brk_page) {
812         /* Heap contents are initialized to zero, as for anonymous
813          * mapped pages.  */
814         if (new_brk > target_brk) {
815             memset(g2h_untagged(target_brk), 0, new_brk - target_brk);
816         }
817 	target_brk = new_brk;
818         DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
819 	return target_brk;
820     }
821 
822     /* We need to allocate more memory after the brk... Note that
823      * we don't use MAP_FIXED because that will map over the top of
824      * any existing mapping (like the one with the host libc or qemu
825      * itself); instead we treat "mapped but at wrong address" as
826      * a failure and unmap again.
827      */
828     new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
829     mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
830                                         PROT_READ|PROT_WRITE,
831                                         MAP_ANON|MAP_PRIVATE, 0, 0));
832 
833     if (mapped_addr == brk_page) {
834         /* Heap contents are initialized to zero, as for anonymous
835          * mapped pages.  Technically the new pages are already
836          * initialized to zero since they *are* anonymous mapped
837          * pages, however we have to take care with the contents that
838          * come from the remaining part of the previous page: it may
839          * contains garbage data due to a previous heap usage (grown
840          * then shrunken).  */
841         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
842 
843         target_brk = new_brk;
844         brk_page = HOST_PAGE_ALIGN(target_brk);
845         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
846             target_brk);
847         return target_brk;
848     } else if (mapped_addr != -1) {
849         /* Mapped but at wrong address, meaning there wasn't actually
850          * enough space for this brk.
851          */
852         target_munmap(mapped_addr, new_alloc_size);
853         mapped_addr = -1;
854         DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
855     }
856     else {
857         DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
858     }
859 
860 #if defined(TARGET_ALPHA)
861     /* We (partially) emulate OSF/1 on Alpha, which requires we
862        return a proper errno, not an unchanged brk value.  */
863     return -TARGET_ENOMEM;
864 #endif
865     /* For everything else, return the previous break. */
866     return target_brk;
867 }
868 
869 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
870     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
871 static inline abi_long copy_from_user_fdset(fd_set *fds,
872                                             abi_ulong target_fds_addr,
873                                             int n)
874 {
875     int i, nw, j, k;
876     abi_ulong b, *target_fds;
877 
878     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
879     if (!(target_fds = lock_user(VERIFY_READ,
880                                  target_fds_addr,
881                                  sizeof(abi_ulong) * nw,
882                                  1)))
883         return -TARGET_EFAULT;
884 
885     FD_ZERO(fds);
886     k = 0;
887     for (i = 0; i < nw; i++) {
888         /* grab the abi_ulong */
889         __get_user(b, &target_fds[i]);
890         for (j = 0; j < TARGET_ABI_BITS; j++) {
891             /* check the bit inside the abi_ulong */
892             if ((b >> j) & 1)
893                 FD_SET(k, fds);
894             k++;
895         }
896     }
897 
898     unlock_user(target_fds, target_fds_addr, 0);
899 
900     return 0;
901 }
902 
903 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
904                                                  abi_ulong target_fds_addr,
905                                                  int n)
906 {
907     if (target_fds_addr) {
908         if (copy_from_user_fdset(fds, target_fds_addr, n))
909             return -TARGET_EFAULT;
910         *fds_ptr = fds;
911     } else {
912         *fds_ptr = NULL;
913     }
914     return 0;
915 }
916 
917 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
918                                           const fd_set *fds,
919                                           int n)
920 {
921     int i, nw, j, k;
922     abi_long v;
923     abi_ulong *target_fds;
924 
925     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
926     if (!(target_fds = lock_user(VERIFY_WRITE,
927                                  target_fds_addr,
928                                  sizeof(abi_ulong) * nw,
929                                  0)))
930         return -TARGET_EFAULT;
931 
932     k = 0;
933     for (i = 0; i < nw; i++) {
934         v = 0;
935         for (j = 0; j < TARGET_ABI_BITS; j++) {
936             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
937             k++;
938         }
939         __put_user(v, &target_fds[i]);
940     }
941 
942     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
943 
944     return 0;
945 }
946 #endif
947 
948 #if defined(__alpha__)
949 #define HOST_HZ 1024
950 #else
951 #define HOST_HZ 100
952 #endif
953 
954 static inline abi_long host_to_target_clock_t(long ticks)
955 {
956 #if HOST_HZ == TARGET_HZ
957     return ticks;
958 #else
959     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
960 #endif
961 }
962 
963 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
964                                              const struct rusage *rusage)
965 {
966     struct target_rusage *target_rusage;
967 
968     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
969         return -TARGET_EFAULT;
970     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
971     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
972     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
973     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
974     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
975     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
976     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
977     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
978     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
979     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
980     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
981     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
982     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
983     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
984     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
985     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
986     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
987     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
988     unlock_user_struct(target_rusage, target_addr, 1);
989 
990     return 0;
991 }
992 
993 #ifdef TARGET_NR_setrlimit
994 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
995 {
996     abi_ulong target_rlim_swap;
997     rlim_t result;
998 
999     target_rlim_swap = tswapal(target_rlim);
1000     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1001         return RLIM_INFINITY;
1002 
1003     result = target_rlim_swap;
1004     if (target_rlim_swap != (rlim_t)result)
1005         return RLIM_INFINITY;
1006 
1007     return result;
1008 }
1009 #endif
1010 
1011 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1012 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1013 {
1014     abi_ulong target_rlim_swap;
1015     abi_ulong result;
1016 
1017     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1018         target_rlim_swap = TARGET_RLIM_INFINITY;
1019     else
1020         target_rlim_swap = rlim;
1021     result = tswapal(target_rlim_swap);
1022 
1023     return result;
1024 }
1025 #endif
1026 
1027 static inline int target_to_host_resource(int code)
1028 {
1029     switch (code) {
1030     case TARGET_RLIMIT_AS:
1031         return RLIMIT_AS;
1032     case TARGET_RLIMIT_CORE:
1033         return RLIMIT_CORE;
1034     case TARGET_RLIMIT_CPU:
1035         return RLIMIT_CPU;
1036     case TARGET_RLIMIT_DATA:
1037         return RLIMIT_DATA;
1038     case TARGET_RLIMIT_FSIZE:
1039         return RLIMIT_FSIZE;
1040     case TARGET_RLIMIT_LOCKS:
1041         return RLIMIT_LOCKS;
1042     case TARGET_RLIMIT_MEMLOCK:
1043         return RLIMIT_MEMLOCK;
1044     case TARGET_RLIMIT_MSGQUEUE:
1045         return RLIMIT_MSGQUEUE;
1046     case TARGET_RLIMIT_NICE:
1047         return RLIMIT_NICE;
1048     case TARGET_RLIMIT_NOFILE:
1049         return RLIMIT_NOFILE;
1050     case TARGET_RLIMIT_NPROC:
1051         return RLIMIT_NPROC;
1052     case TARGET_RLIMIT_RSS:
1053         return RLIMIT_RSS;
1054     case TARGET_RLIMIT_RTPRIO:
1055         return RLIMIT_RTPRIO;
1056     case TARGET_RLIMIT_RTTIME:
1057         return RLIMIT_RTTIME;
1058     case TARGET_RLIMIT_SIGPENDING:
1059         return RLIMIT_SIGPENDING;
1060     case TARGET_RLIMIT_STACK:
1061         return RLIMIT_STACK;
1062     default:
1063         return code;
1064     }
1065 }
1066 
1067 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1068                                               abi_ulong target_tv_addr)
1069 {
1070     struct target_timeval *target_tv;
1071 
1072     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1073         return -TARGET_EFAULT;
1074     }
1075 
1076     __get_user(tv->tv_sec, &target_tv->tv_sec);
1077     __get_user(tv->tv_usec, &target_tv->tv_usec);
1078 
1079     unlock_user_struct(target_tv, target_tv_addr, 0);
1080 
1081     return 0;
1082 }
1083 
1084 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1085                                             const struct timeval *tv)
1086 {
1087     struct target_timeval *target_tv;
1088 
1089     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1090         return -TARGET_EFAULT;
1091     }
1092 
1093     __put_user(tv->tv_sec, &target_tv->tv_sec);
1094     __put_user(tv->tv_usec, &target_tv->tv_usec);
1095 
1096     unlock_user_struct(target_tv, target_tv_addr, 1);
1097 
1098     return 0;
1099 }
1100 
1101 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1102 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1103                                                 abi_ulong target_tv_addr)
1104 {
1105     struct target__kernel_sock_timeval *target_tv;
1106 
1107     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1108         return -TARGET_EFAULT;
1109     }
1110 
1111     __get_user(tv->tv_sec, &target_tv->tv_sec);
1112     __get_user(tv->tv_usec, &target_tv->tv_usec);
1113 
1114     unlock_user_struct(target_tv, target_tv_addr, 0);
1115 
1116     return 0;
1117 }
1118 #endif
1119 
1120 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1121                                               const struct timeval *tv)
1122 {
1123     struct target__kernel_sock_timeval *target_tv;
1124 
1125     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1126         return -TARGET_EFAULT;
1127     }
1128 
1129     __put_user(tv->tv_sec, &target_tv->tv_sec);
1130     __put_user(tv->tv_usec, &target_tv->tv_usec);
1131 
1132     unlock_user_struct(target_tv, target_tv_addr, 1);
1133 
1134     return 0;
1135 }
1136 
1137 #if defined(TARGET_NR_futex) || \
1138     defined(TARGET_NR_rt_sigtimedwait) || \
1139     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1140     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1141     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1142     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1143     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1144     defined(TARGET_NR_timer_settime) || \
1145     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1146 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1147                                                abi_ulong target_addr)
1148 {
1149     struct target_timespec *target_ts;
1150 
1151     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1152         return -TARGET_EFAULT;
1153     }
1154     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1155     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1156     unlock_user_struct(target_ts, target_addr, 0);
1157     return 0;
1158 }
1159 #endif
1160 
1161 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1162     defined(TARGET_NR_timer_settime64) || \
1163     defined(TARGET_NR_mq_timedsend_time64) || \
1164     defined(TARGET_NR_mq_timedreceive_time64) || \
1165     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1166     defined(TARGET_NR_clock_nanosleep_time64) || \
1167     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1168     defined(TARGET_NR_utimensat) || \
1169     defined(TARGET_NR_utimensat_time64) || \
1170     defined(TARGET_NR_semtimedop_time64) || \
1171     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1172 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1173                                                  abi_ulong target_addr)
1174 {
1175     struct target__kernel_timespec *target_ts;
1176 
1177     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1178         return -TARGET_EFAULT;
1179     }
1180     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1181     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1182     /* in 32bit mode, this drops the padding */
1183     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1184     unlock_user_struct(target_ts, target_addr, 0);
1185     return 0;
1186 }
1187 #endif
1188 
1189 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1190                                                struct timespec *host_ts)
1191 {
1192     struct target_timespec *target_ts;
1193 
1194     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1195         return -TARGET_EFAULT;
1196     }
1197     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1198     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1199     unlock_user_struct(target_ts, target_addr, 1);
1200     return 0;
1201 }
1202 
1203 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1204                                                  struct timespec *host_ts)
1205 {
1206     struct target__kernel_timespec *target_ts;
1207 
1208     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1209         return -TARGET_EFAULT;
1210     }
1211     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1212     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1213     unlock_user_struct(target_ts, target_addr, 1);
1214     return 0;
1215 }
1216 
1217 #if defined(TARGET_NR_gettimeofday)
1218 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1219                                              struct timezone *tz)
1220 {
1221     struct target_timezone *target_tz;
1222 
1223     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1224         return -TARGET_EFAULT;
1225     }
1226 
1227     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1228     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1229 
1230     unlock_user_struct(target_tz, target_tz_addr, 1);
1231 
1232     return 0;
1233 }
1234 #endif
1235 
1236 #if defined(TARGET_NR_settimeofday)
1237 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1238                                                abi_ulong target_tz_addr)
1239 {
1240     struct target_timezone *target_tz;
1241 
1242     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1243         return -TARGET_EFAULT;
1244     }
1245 
1246     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1247     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1248 
1249     unlock_user_struct(target_tz, target_tz_addr, 0);
1250 
1251     return 0;
1252 }
1253 #endif
1254 
1255 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1256 #include <mqueue.h>
1257 
1258 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1259                                               abi_ulong target_mq_attr_addr)
1260 {
1261     struct target_mq_attr *target_mq_attr;
1262 
1263     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1264                           target_mq_attr_addr, 1))
1265         return -TARGET_EFAULT;
1266 
1267     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1268     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1269     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1270     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1271 
1272     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1273 
1274     return 0;
1275 }
1276 
1277 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1278                                             const struct mq_attr *attr)
1279 {
1280     struct target_mq_attr *target_mq_attr;
1281 
1282     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1283                           target_mq_attr_addr, 0))
1284         return -TARGET_EFAULT;
1285 
1286     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1287     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1288     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1289     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1290 
1291     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1292 
1293     return 0;
1294 }
1295 #endif
1296 
1297 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1298 /* do_select() must return target values and target errnos. */
1299 static abi_long do_select(int n,
1300                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1301                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1302 {
1303     fd_set rfds, wfds, efds;
1304     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1305     struct timeval tv;
1306     struct timespec ts, *ts_ptr;
1307     abi_long ret;
1308 
1309     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1310     if (ret) {
1311         return ret;
1312     }
1313     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1314     if (ret) {
1315         return ret;
1316     }
1317     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1318     if (ret) {
1319         return ret;
1320     }
1321 
1322     if (target_tv_addr) {
1323         if (copy_from_user_timeval(&tv, target_tv_addr))
1324             return -TARGET_EFAULT;
1325         ts.tv_sec = tv.tv_sec;
1326         ts.tv_nsec = tv.tv_usec * 1000;
1327         ts_ptr = &ts;
1328     } else {
1329         ts_ptr = NULL;
1330     }
1331 
1332     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1333                                   ts_ptr, NULL));
1334 
1335     if (!is_error(ret)) {
1336         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1337             return -TARGET_EFAULT;
1338         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1339             return -TARGET_EFAULT;
1340         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1341             return -TARGET_EFAULT;
1342 
1343         if (target_tv_addr) {
1344             tv.tv_sec = ts.tv_sec;
1345             tv.tv_usec = ts.tv_nsec / 1000;
1346             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1347                 return -TARGET_EFAULT;
1348             }
1349         }
1350     }
1351 
1352     return ret;
1353 }
1354 
1355 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1356 static abi_long do_old_select(abi_ulong arg1)
1357 {
1358     struct target_sel_arg_struct *sel;
1359     abi_ulong inp, outp, exp, tvp;
1360     long nsel;
1361 
1362     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1363         return -TARGET_EFAULT;
1364     }
1365 
1366     nsel = tswapal(sel->n);
1367     inp = tswapal(sel->inp);
1368     outp = tswapal(sel->outp);
1369     exp = tswapal(sel->exp);
1370     tvp = tswapal(sel->tvp);
1371 
1372     unlock_user_struct(sel, arg1, 0);
1373 
1374     return do_select(nsel, inp, outp, exp, tvp);
1375 }
1376 #endif
1377 #endif
1378 
1379 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1380 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1381                             abi_long arg4, abi_long arg5, abi_long arg6,
1382                             bool time64)
1383 {
1384     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1385     fd_set rfds, wfds, efds;
1386     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1387     struct timespec ts, *ts_ptr;
1388     abi_long ret;
1389 
1390     /*
1391      * The 6th arg is actually two args smashed together,
1392      * so we cannot use the C library.
1393      */
1394     struct {
1395         sigset_t *set;
1396         size_t size;
1397     } sig, *sig_ptr;
1398 
1399     abi_ulong arg_sigset, arg_sigsize, *arg7;
1400 
1401     n = arg1;
1402     rfd_addr = arg2;
1403     wfd_addr = arg3;
1404     efd_addr = arg4;
1405     ts_addr = arg5;
1406 
1407     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1408     if (ret) {
1409         return ret;
1410     }
1411     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1412     if (ret) {
1413         return ret;
1414     }
1415     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1416     if (ret) {
1417         return ret;
1418     }
1419 
1420     /*
1421      * This takes a timespec, and not a timeval, so we cannot
1422      * use the do_select() helper ...
1423      */
1424     if (ts_addr) {
1425         if (time64) {
1426             if (target_to_host_timespec64(&ts, ts_addr)) {
1427                 return -TARGET_EFAULT;
1428             }
1429         } else {
1430             if (target_to_host_timespec(&ts, ts_addr)) {
1431                 return -TARGET_EFAULT;
1432             }
1433         }
1434             ts_ptr = &ts;
1435     } else {
1436         ts_ptr = NULL;
1437     }
1438 
1439     /* Extract the two packed args for the sigset */
1440     sig_ptr = NULL;
1441     if (arg6) {
1442         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1443         if (!arg7) {
1444             return -TARGET_EFAULT;
1445         }
1446         arg_sigset = tswapal(arg7[0]);
1447         arg_sigsize = tswapal(arg7[1]);
1448         unlock_user(arg7, arg6, 0);
1449 
1450         if (arg_sigset) {
1451             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1452             if (ret != 0) {
1453                 return ret;
1454             }
1455             sig_ptr = &sig;
1456             sig.size = SIGSET_T_SIZE;
1457         }
1458     }
1459 
1460     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1461                                   ts_ptr, sig_ptr));
1462 
1463     if (sig_ptr) {
1464         finish_sigsuspend_mask(ret);
1465     }
1466 
1467     if (!is_error(ret)) {
1468         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1469             return -TARGET_EFAULT;
1470         }
1471         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1472             return -TARGET_EFAULT;
1473         }
1474         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1475             return -TARGET_EFAULT;
1476         }
1477         if (time64) {
1478             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1479                 return -TARGET_EFAULT;
1480             }
1481         } else {
1482             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1483                 return -TARGET_EFAULT;
1484             }
1485         }
1486     }
1487     return ret;
1488 }
1489 #endif
1490 
1491 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1492     defined(TARGET_NR_ppoll_time64)
1493 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1494                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1495 {
1496     struct target_pollfd *target_pfd;
1497     unsigned int nfds = arg2;
1498     struct pollfd *pfd;
1499     unsigned int i;
1500     abi_long ret;
1501 
1502     pfd = NULL;
1503     target_pfd = NULL;
1504     if (nfds) {
1505         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1506             return -TARGET_EINVAL;
1507         }
1508         target_pfd = lock_user(VERIFY_WRITE, arg1,
1509                                sizeof(struct target_pollfd) * nfds, 1);
1510         if (!target_pfd) {
1511             return -TARGET_EFAULT;
1512         }
1513 
1514         pfd = alloca(sizeof(struct pollfd) * nfds);
1515         for (i = 0; i < nfds; i++) {
1516             pfd[i].fd = tswap32(target_pfd[i].fd);
1517             pfd[i].events = tswap16(target_pfd[i].events);
1518         }
1519     }
1520     if (ppoll) {
1521         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1522         target_sigset_t *target_set;
1523         sigset_t _set, *set = &_set;
1524 
1525         if (arg3) {
1526             if (time64) {
1527                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1528                     unlock_user(target_pfd, arg1, 0);
1529                     return -TARGET_EFAULT;
1530                 }
1531             } else {
1532                 if (target_to_host_timespec(timeout_ts, arg3)) {
1533                     unlock_user(target_pfd, arg1, 0);
1534                     return -TARGET_EFAULT;
1535                 }
1536             }
1537         } else {
1538             timeout_ts = NULL;
1539         }
1540 
1541         if (arg4) {
1542             if (arg5 != sizeof(target_sigset_t)) {
1543                 unlock_user(target_pfd, arg1, 0);
1544                 return -TARGET_EINVAL;
1545             }
1546 
1547             target_set = lock_user(VERIFY_READ, arg4,
1548                                    sizeof(target_sigset_t), 1);
1549             if (!target_set) {
1550                 unlock_user(target_pfd, arg1, 0);
1551                 return -TARGET_EFAULT;
1552             }
1553             target_to_host_sigset(set, target_set);
1554         } else {
1555             set = NULL;
1556         }
1557 
1558         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1559                                    set, SIGSET_T_SIZE));
1560 
1561         if (!is_error(ret) && arg3) {
1562             if (time64) {
1563                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1564                     return -TARGET_EFAULT;
1565                 }
1566             } else {
1567                 if (host_to_target_timespec(arg3, timeout_ts)) {
1568                     return -TARGET_EFAULT;
1569                 }
1570             }
1571         }
1572         if (arg4) {
1573             unlock_user(target_set, arg4, 0);
1574         }
1575     } else {
1576           struct timespec ts, *pts;
1577 
1578           if (arg3 >= 0) {
1579               /* Convert ms to secs, ns */
1580               ts.tv_sec = arg3 / 1000;
1581               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1582               pts = &ts;
1583           } else {
1584               /* -ve poll() timeout means "infinite" */
1585               pts = NULL;
1586           }
1587           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1588     }
1589 
1590     if (!is_error(ret)) {
1591         for (i = 0; i < nfds; i++) {
1592             target_pfd[i].revents = tswap16(pfd[i].revents);
1593         }
1594     }
1595     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1596     return ret;
1597 }
1598 #endif
1599 
1600 static abi_long do_pipe2(int host_pipe[], int flags)
1601 {
1602 #ifdef CONFIG_PIPE2
1603     return pipe2(host_pipe, flags);
1604 #else
1605     return -ENOSYS;
1606 #endif
1607 }
1608 
1609 static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
1610                         int flags, int is_pipe2)
1611 {
1612     int host_pipe[2];
1613     abi_long ret;
1614     ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
1615 
1616     if (is_error(ret))
1617         return get_errno(ret);
1618 
1619     /* Several targets have special calling conventions for the original
1620        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1621     if (!is_pipe2) {
1622 #if defined(TARGET_ALPHA)
1623         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
1624         return host_pipe[0];
1625 #elif defined(TARGET_MIPS)
1626         ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
1627         return host_pipe[0];
1628 #elif defined(TARGET_SH4)
1629         ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
1630         return host_pipe[0];
1631 #elif defined(TARGET_SPARC)
1632         ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
1633         return host_pipe[0];
1634 #endif
1635     }
1636 
1637     if (put_user_s32(host_pipe[0], pipedes)
1638         || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
1639         return -TARGET_EFAULT;
1640     return get_errno(ret);
1641 }
1642 
1643 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1644                                               abi_ulong target_addr,
1645                                               socklen_t len)
1646 {
1647     struct target_ip_mreqn *target_smreqn;
1648 
1649     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1650     if (!target_smreqn)
1651         return -TARGET_EFAULT;
1652     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1653     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1654     if (len == sizeof(struct target_ip_mreqn))
1655         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1656     unlock_user(target_smreqn, target_addr, 0);
1657 
1658     return 0;
1659 }
1660 
1661 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1662                                                abi_ulong target_addr,
1663                                                socklen_t len)
1664 {
1665     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1666     sa_family_t sa_family;
1667     struct target_sockaddr *target_saddr;
1668 
1669     if (fd_trans_target_to_host_addr(fd)) {
1670         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1671     }
1672 
1673     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1674     if (!target_saddr)
1675         return -TARGET_EFAULT;
1676 
1677     sa_family = tswap16(target_saddr->sa_family);
1678 
1679     /* Oops. The caller might send a incomplete sun_path; sun_path
1680      * must be terminated by \0 (see the manual page), but
1681      * unfortunately it is quite common to specify sockaddr_un
1682      * length as "strlen(x->sun_path)" while it should be
1683      * "strlen(...) + 1". We'll fix that here if needed.
1684      * Linux kernel has a similar feature.
1685      */
1686 
1687     if (sa_family == AF_UNIX) {
1688         if (len < unix_maxlen && len > 0) {
1689             char *cp = (char*)target_saddr;
1690 
1691             if ( cp[len-1] && !cp[len] )
1692                 len++;
1693         }
1694         if (len > unix_maxlen)
1695             len = unix_maxlen;
1696     }
1697 
1698     memcpy(addr, target_saddr, len);
1699     addr->sa_family = sa_family;
1700     if (sa_family == AF_NETLINK) {
1701         struct sockaddr_nl *nladdr;
1702 
1703         nladdr = (struct sockaddr_nl *)addr;
1704         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1705         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1706     } else if (sa_family == AF_PACKET) {
1707 	struct target_sockaddr_ll *lladdr;
1708 
1709 	lladdr = (struct target_sockaddr_ll *)addr;
1710 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1711 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1712     }
1713     unlock_user(target_saddr, target_addr, 0);
1714 
1715     return 0;
1716 }
1717 
1718 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1719                                                struct sockaddr *addr,
1720                                                socklen_t len)
1721 {
1722     struct target_sockaddr *target_saddr;
1723 
1724     if (len == 0) {
1725         return 0;
1726     }
1727     assert(addr);
1728 
1729     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1730     if (!target_saddr)
1731         return -TARGET_EFAULT;
1732     memcpy(target_saddr, addr, len);
1733     if (len >= offsetof(struct target_sockaddr, sa_family) +
1734         sizeof(target_saddr->sa_family)) {
1735         target_saddr->sa_family = tswap16(addr->sa_family);
1736     }
1737     if (addr->sa_family == AF_NETLINK &&
1738         len >= sizeof(struct target_sockaddr_nl)) {
1739         struct target_sockaddr_nl *target_nl =
1740                (struct target_sockaddr_nl *)target_saddr;
1741         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1742         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1743     } else if (addr->sa_family == AF_PACKET) {
1744         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1745         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1746         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1747     } else if (addr->sa_family == AF_INET6 &&
1748                len >= sizeof(struct target_sockaddr_in6)) {
1749         struct target_sockaddr_in6 *target_in6 =
1750                (struct target_sockaddr_in6 *)target_saddr;
1751         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1752     }
1753     unlock_user(target_saddr, target_addr, len);
1754 
1755     return 0;
1756 }
1757 
1758 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1759                                            struct target_msghdr *target_msgh)
1760 {
1761     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1762     abi_long msg_controllen;
1763     abi_ulong target_cmsg_addr;
1764     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1765     socklen_t space = 0;
1766 
1767     msg_controllen = tswapal(target_msgh->msg_controllen);
1768     if (msg_controllen < sizeof (struct target_cmsghdr))
1769         goto the_end;
1770     target_cmsg_addr = tswapal(target_msgh->msg_control);
1771     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1772     target_cmsg_start = target_cmsg;
1773     if (!target_cmsg)
1774         return -TARGET_EFAULT;
1775 
1776     while (cmsg && target_cmsg) {
1777         void *data = CMSG_DATA(cmsg);
1778         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1779 
1780         int len = tswapal(target_cmsg->cmsg_len)
1781             - sizeof(struct target_cmsghdr);
1782 
1783         space += CMSG_SPACE(len);
1784         if (space > msgh->msg_controllen) {
1785             space -= CMSG_SPACE(len);
1786             /* This is a QEMU bug, since we allocated the payload
1787              * area ourselves (unlike overflow in host-to-target
1788              * conversion, which is just the guest giving us a buffer
1789              * that's too small). It can't happen for the payload types
1790              * we currently support; if it becomes an issue in future
1791              * we would need to improve our allocation strategy to
1792              * something more intelligent than "twice the size of the
1793              * target buffer we're reading from".
1794              */
1795             qemu_log_mask(LOG_UNIMP,
1796                           ("Unsupported ancillary data %d/%d: "
1797                            "unhandled msg size\n"),
1798                           tswap32(target_cmsg->cmsg_level),
1799                           tswap32(target_cmsg->cmsg_type));
1800             break;
1801         }
1802 
1803         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1804             cmsg->cmsg_level = SOL_SOCKET;
1805         } else {
1806             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1807         }
1808         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1809         cmsg->cmsg_len = CMSG_LEN(len);
1810 
1811         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1812             int *fd = (int *)data;
1813             int *target_fd = (int *)target_data;
1814             int i, numfds = len / sizeof(int);
1815 
1816             for (i = 0; i < numfds; i++) {
1817                 __get_user(fd[i], target_fd + i);
1818             }
1819         } else if (cmsg->cmsg_level == SOL_SOCKET
1820                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1821             struct ucred *cred = (struct ucred *)data;
1822             struct target_ucred *target_cred =
1823                 (struct target_ucred *)target_data;
1824 
1825             __get_user(cred->pid, &target_cred->pid);
1826             __get_user(cred->uid, &target_cred->uid);
1827             __get_user(cred->gid, &target_cred->gid);
1828         } else {
1829             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1830                           cmsg->cmsg_level, cmsg->cmsg_type);
1831             memcpy(data, target_data, len);
1832         }
1833 
1834         cmsg = CMSG_NXTHDR(msgh, cmsg);
1835         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1836                                          target_cmsg_start);
1837     }
1838     unlock_user(target_cmsg, target_cmsg_addr, 0);
1839  the_end:
1840     msgh->msg_controllen = space;
1841     return 0;
1842 }
1843 
1844 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1845                                            struct msghdr *msgh)
1846 {
1847     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1848     abi_long msg_controllen;
1849     abi_ulong target_cmsg_addr;
1850     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1851     socklen_t space = 0;
1852 
1853     msg_controllen = tswapal(target_msgh->msg_controllen);
1854     if (msg_controllen < sizeof (struct target_cmsghdr))
1855         goto the_end;
1856     target_cmsg_addr = tswapal(target_msgh->msg_control);
1857     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1858     target_cmsg_start = target_cmsg;
1859     if (!target_cmsg)
1860         return -TARGET_EFAULT;
1861 
1862     while (cmsg && target_cmsg) {
1863         void *data = CMSG_DATA(cmsg);
1864         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1865 
1866         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1867         int tgt_len, tgt_space;
1868 
1869         /* We never copy a half-header but may copy half-data;
1870          * this is Linux's behaviour in put_cmsg(). Note that
1871          * truncation here is a guest problem (which we report
1872          * to the guest via the CTRUNC bit), unlike truncation
1873          * in target_to_host_cmsg, which is a QEMU bug.
1874          */
1875         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1876             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1877             break;
1878         }
1879 
1880         if (cmsg->cmsg_level == SOL_SOCKET) {
1881             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1882         } else {
1883             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1884         }
1885         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1886 
1887         /* Payload types which need a different size of payload on
1888          * the target must adjust tgt_len here.
1889          */
1890         tgt_len = len;
1891         switch (cmsg->cmsg_level) {
1892         case SOL_SOCKET:
1893             switch (cmsg->cmsg_type) {
1894             case SO_TIMESTAMP:
1895                 tgt_len = sizeof(struct target_timeval);
1896                 break;
1897             default:
1898                 break;
1899             }
1900             break;
1901         default:
1902             break;
1903         }
1904 
1905         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1906             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1907             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1908         }
1909 
1910         /* We must now copy-and-convert len bytes of payload
1911          * into tgt_len bytes of destination space. Bear in mind
1912          * that in both source and destination we may be dealing
1913          * with a truncated value!
1914          */
1915         switch (cmsg->cmsg_level) {
1916         case SOL_SOCKET:
1917             switch (cmsg->cmsg_type) {
1918             case SCM_RIGHTS:
1919             {
1920                 int *fd = (int *)data;
1921                 int *target_fd = (int *)target_data;
1922                 int i, numfds = tgt_len / sizeof(int);
1923 
1924                 for (i = 0; i < numfds; i++) {
1925                     __put_user(fd[i], target_fd + i);
1926                 }
1927                 break;
1928             }
1929             case SO_TIMESTAMP:
1930             {
1931                 struct timeval *tv = (struct timeval *)data;
1932                 struct target_timeval *target_tv =
1933                     (struct target_timeval *)target_data;
1934 
1935                 if (len != sizeof(struct timeval) ||
1936                     tgt_len != sizeof(struct target_timeval)) {
1937                     goto unimplemented;
1938                 }
1939 
1940                 /* copy struct timeval to target */
1941                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1942                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1943                 break;
1944             }
1945             case SCM_CREDENTIALS:
1946             {
1947                 struct ucred *cred = (struct ucred *)data;
1948                 struct target_ucred *target_cred =
1949                     (struct target_ucred *)target_data;
1950 
1951                 __put_user(cred->pid, &target_cred->pid);
1952                 __put_user(cred->uid, &target_cred->uid);
1953                 __put_user(cred->gid, &target_cred->gid);
1954                 break;
1955             }
1956             default:
1957                 goto unimplemented;
1958             }
1959             break;
1960 
1961         case SOL_IP:
1962             switch (cmsg->cmsg_type) {
1963             case IP_TTL:
1964             {
1965                 uint32_t *v = (uint32_t *)data;
1966                 uint32_t *t_int = (uint32_t *)target_data;
1967 
1968                 if (len != sizeof(uint32_t) ||
1969                     tgt_len != sizeof(uint32_t)) {
1970                     goto unimplemented;
1971                 }
1972                 __put_user(*v, t_int);
1973                 break;
1974             }
1975             case IP_RECVERR:
1976             {
1977                 struct errhdr_t {
1978                    struct sock_extended_err ee;
1979                    struct sockaddr_in offender;
1980                 };
1981                 struct errhdr_t *errh = (struct errhdr_t *)data;
1982                 struct errhdr_t *target_errh =
1983                     (struct errhdr_t *)target_data;
1984 
1985                 if (len != sizeof(struct errhdr_t) ||
1986                     tgt_len != sizeof(struct errhdr_t)) {
1987                     goto unimplemented;
1988                 }
1989                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1990                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1991                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1992                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1993                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1994                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1995                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1996                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1997                     (void *) &errh->offender, sizeof(errh->offender));
1998                 break;
1999             }
2000             default:
2001                 goto unimplemented;
2002             }
2003             break;
2004 
2005         case SOL_IPV6:
2006             switch (cmsg->cmsg_type) {
2007             case IPV6_HOPLIMIT:
2008             {
2009                 uint32_t *v = (uint32_t *)data;
2010                 uint32_t *t_int = (uint32_t *)target_data;
2011 
2012                 if (len != sizeof(uint32_t) ||
2013                     tgt_len != sizeof(uint32_t)) {
2014                     goto unimplemented;
2015                 }
2016                 __put_user(*v, t_int);
2017                 break;
2018             }
2019             case IPV6_RECVERR:
2020             {
2021                 struct errhdr6_t {
2022                    struct sock_extended_err ee;
2023                    struct sockaddr_in6 offender;
2024                 };
2025                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2026                 struct errhdr6_t *target_errh =
2027                     (struct errhdr6_t *)target_data;
2028 
2029                 if (len != sizeof(struct errhdr6_t) ||
2030                     tgt_len != sizeof(struct errhdr6_t)) {
2031                     goto unimplemented;
2032                 }
2033                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2034                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2035                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2036                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2037                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2038                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2039                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2040                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2041                     (void *) &errh->offender, sizeof(errh->offender));
2042                 break;
2043             }
2044             default:
2045                 goto unimplemented;
2046             }
2047             break;
2048 
2049         default:
2050         unimplemented:
2051             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2052                           cmsg->cmsg_level, cmsg->cmsg_type);
2053             memcpy(target_data, data, MIN(len, tgt_len));
2054             if (tgt_len > len) {
2055                 memset(target_data + len, 0, tgt_len - len);
2056             }
2057         }
2058 
2059         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2060         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2061         if (msg_controllen < tgt_space) {
2062             tgt_space = msg_controllen;
2063         }
2064         msg_controllen -= tgt_space;
2065         space += tgt_space;
2066         cmsg = CMSG_NXTHDR(msgh, cmsg);
2067         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2068                                          target_cmsg_start);
2069     }
2070     unlock_user(target_cmsg, target_cmsg_addr, space);
2071  the_end:
2072     target_msgh->msg_controllen = tswapal(space);
2073     return 0;
2074 }
2075 
2076 /* do_setsockopt() Must return target values and target errnos. */
2077 static abi_long do_setsockopt(int sockfd, int level, int optname,
2078                               abi_ulong optval_addr, socklen_t optlen)
2079 {
2080     abi_long ret;
2081     int val;
2082     struct ip_mreqn *ip_mreq;
2083     struct ip_mreq_source *ip_mreq_source;
2084 
2085     switch(level) {
2086     case SOL_TCP:
2087     case SOL_UDP:
2088         /* TCP and UDP options all take an 'int' value.  */
2089         if (optlen < sizeof(uint32_t))
2090             return -TARGET_EINVAL;
2091 
2092         if (get_user_u32(val, optval_addr))
2093             return -TARGET_EFAULT;
2094         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2095         break;
2096     case SOL_IP:
2097         switch(optname) {
2098         case IP_TOS:
2099         case IP_TTL:
2100         case IP_HDRINCL:
2101         case IP_ROUTER_ALERT:
2102         case IP_RECVOPTS:
2103         case IP_RETOPTS:
2104         case IP_PKTINFO:
2105         case IP_MTU_DISCOVER:
2106         case IP_RECVERR:
2107         case IP_RECVTTL:
2108         case IP_RECVTOS:
2109 #ifdef IP_FREEBIND
2110         case IP_FREEBIND:
2111 #endif
2112         case IP_MULTICAST_TTL:
2113         case IP_MULTICAST_LOOP:
2114             val = 0;
2115             if (optlen >= sizeof(uint32_t)) {
2116                 if (get_user_u32(val, optval_addr))
2117                     return -TARGET_EFAULT;
2118             } else if (optlen >= 1) {
2119                 if (get_user_u8(val, optval_addr))
2120                     return -TARGET_EFAULT;
2121             }
2122             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2123             break;
2124         case IP_ADD_MEMBERSHIP:
2125         case IP_DROP_MEMBERSHIP:
2126             if (optlen < sizeof (struct target_ip_mreq) ||
2127                 optlen > sizeof (struct target_ip_mreqn))
2128                 return -TARGET_EINVAL;
2129 
2130             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2131             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2132             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2133             break;
2134 
2135         case IP_BLOCK_SOURCE:
2136         case IP_UNBLOCK_SOURCE:
2137         case IP_ADD_SOURCE_MEMBERSHIP:
2138         case IP_DROP_SOURCE_MEMBERSHIP:
2139             if (optlen != sizeof (struct target_ip_mreq_source))
2140                 return -TARGET_EINVAL;
2141 
2142             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2143             if (!ip_mreq_source) {
2144                 return -TARGET_EFAULT;
2145             }
2146             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2147             unlock_user (ip_mreq_source, optval_addr, 0);
2148             break;
2149 
2150         default:
2151             goto unimplemented;
2152         }
2153         break;
2154     case SOL_IPV6:
2155         switch (optname) {
2156         case IPV6_MTU_DISCOVER:
2157         case IPV6_MTU:
2158         case IPV6_V6ONLY:
2159         case IPV6_RECVPKTINFO:
2160         case IPV6_UNICAST_HOPS:
2161         case IPV6_MULTICAST_HOPS:
2162         case IPV6_MULTICAST_LOOP:
2163         case IPV6_RECVERR:
2164         case IPV6_RECVHOPLIMIT:
2165         case IPV6_2292HOPLIMIT:
2166         case IPV6_CHECKSUM:
2167         case IPV6_ADDRFORM:
2168         case IPV6_2292PKTINFO:
2169         case IPV6_RECVTCLASS:
2170         case IPV6_RECVRTHDR:
2171         case IPV6_2292RTHDR:
2172         case IPV6_RECVHOPOPTS:
2173         case IPV6_2292HOPOPTS:
2174         case IPV6_RECVDSTOPTS:
2175         case IPV6_2292DSTOPTS:
2176         case IPV6_TCLASS:
2177         case IPV6_ADDR_PREFERENCES:
2178 #ifdef IPV6_RECVPATHMTU
2179         case IPV6_RECVPATHMTU:
2180 #endif
2181 #ifdef IPV6_TRANSPARENT
2182         case IPV6_TRANSPARENT:
2183 #endif
2184 #ifdef IPV6_FREEBIND
2185         case IPV6_FREEBIND:
2186 #endif
2187 #ifdef IPV6_RECVORIGDSTADDR
2188         case IPV6_RECVORIGDSTADDR:
2189 #endif
2190             val = 0;
2191             if (optlen < sizeof(uint32_t)) {
2192                 return -TARGET_EINVAL;
2193             }
2194             if (get_user_u32(val, optval_addr)) {
2195                 return -TARGET_EFAULT;
2196             }
2197             ret = get_errno(setsockopt(sockfd, level, optname,
2198                                        &val, sizeof(val)));
2199             break;
2200         case IPV6_PKTINFO:
2201         {
2202             struct in6_pktinfo pki;
2203 
2204             if (optlen < sizeof(pki)) {
2205                 return -TARGET_EINVAL;
2206             }
2207 
2208             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2209                 return -TARGET_EFAULT;
2210             }
2211 
2212             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2213 
2214             ret = get_errno(setsockopt(sockfd, level, optname,
2215                                        &pki, sizeof(pki)));
2216             break;
2217         }
2218         case IPV6_ADD_MEMBERSHIP:
2219         case IPV6_DROP_MEMBERSHIP:
2220         {
2221             struct ipv6_mreq ipv6mreq;
2222 
2223             if (optlen < sizeof(ipv6mreq)) {
2224                 return -TARGET_EINVAL;
2225             }
2226 
2227             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2228                 return -TARGET_EFAULT;
2229             }
2230 
2231             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2232 
2233             ret = get_errno(setsockopt(sockfd, level, optname,
2234                                        &ipv6mreq, sizeof(ipv6mreq)));
2235             break;
2236         }
2237         default:
2238             goto unimplemented;
2239         }
2240         break;
2241     case SOL_ICMPV6:
2242         switch (optname) {
2243         case ICMPV6_FILTER:
2244         {
2245             struct icmp6_filter icmp6f;
2246 
2247             if (optlen > sizeof(icmp6f)) {
2248                 optlen = sizeof(icmp6f);
2249             }
2250 
2251             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2252                 return -TARGET_EFAULT;
2253             }
2254 
2255             for (val = 0; val < 8; val++) {
2256                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2257             }
2258 
2259             ret = get_errno(setsockopt(sockfd, level, optname,
2260                                        &icmp6f, optlen));
2261             break;
2262         }
2263         default:
2264             goto unimplemented;
2265         }
2266         break;
2267     case SOL_RAW:
2268         switch (optname) {
2269         case ICMP_FILTER:
2270         case IPV6_CHECKSUM:
2271             /* those take an u32 value */
2272             if (optlen < sizeof(uint32_t)) {
2273                 return -TARGET_EINVAL;
2274             }
2275 
2276             if (get_user_u32(val, optval_addr)) {
2277                 return -TARGET_EFAULT;
2278             }
2279             ret = get_errno(setsockopt(sockfd, level, optname,
2280                                        &val, sizeof(val)));
2281             break;
2282 
2283         default:
2284             goto unimplemented;
2285         }
2286         break;
2287 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2288     case SOL_ALG:
2289         switch (optname) {
2290         case ALG_SET_KEY:
2291         {
2292             char *alg_key = g_malloc(optlen);
2293 
2294             if (!alg_key) {
2295                 return -TARGET_ENOMEM;
2296             }
2297             if (copy_from_user(alg_key, optval_addr, optlen)) {
2298                 g_free(alg_key);
2299                 return -TARGET_EFAULT;
2300             }
2301             ret = get_errno(setsockopt(sockfd, level, optname,
2302                                        alg_key, optlen));
2303             g_free(alg_key);
2304             break;
2305         }
2306         case ALG_SET_AEAD_AUTHSIZE:
2307         {
2308             ret = get_errno(setsockopt(sockfd, level, optname,
2309                                        NULL, optlen));
2310             break;
2311         }
2312         default:
2313             goto unimplemented;
2314         }
2315         break;
2316 #endif
2317     case TARGET_SOL_SOCKET:
2318         switch (optname) {
2319         case TARGET_SO_RCVTIMEO:
2320         {
2321                 struct timeval tv;
2322 
2323                 optname = SO_RCVTIMEO;
2324 
2325 set_timeout:
2326                 if (optlen != sizeof(struct target_timeval)) {
2327                     return -TARGET_EINVAL;
2328                 }
2329 
2330                 if (copy_from_user_timeval(&tv, optval_addr)) {
2331                     return -TARGET_EFAULT;
2332                 }
2333 
2334                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2335                                 &tv, sizeof(tv)));
2336                 return ret;
2337         }
2338         case TARGET_SO_SNDTIMEO:
2339                 optname = SO_SNDTIMEO;
2340                 goto set_timeout;
2341         case TARGET_SO_ATTACH_FILTER:
2342         {
2343                 struct target_sock_fprog *tfprog;
2344                 struct target_sock_filter *tfilter;
2345                 struct sock_fprog fprog;
2346                 struct sock_filter *filter;
2347                 int i;
2348 
2349                 if (optlen != sizeof(*tfprog)) {
2350                     return -TARGET_EINVAL;
2351                 }
2352                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2353                     return -TARGET_EFAULT;
2354                 }
2355                 if (!lock_user_struct(VERIFY_READ, tfilter,
2356                                       tswapal(tfprog->filter), 0)) {
2357                     unlock_user_struct(tfprog, optval_addr, 1);
2358                     return -TARGET_EFAULT;
2359                 }
2360 
2361                 fprog.len = tswap16(tfprog->len);
2362                 filter = g_try_new(struct sock_filter, fprog.len);
2363                 if (filter == NULL) {
2364                     unlock_user_struct(tfilter, tfprog->filter, 1);
2365                     unlock_user_struct(tfprog, optval_addr, 1);
2366                     return -TARGET_ENOMEM;
2367                 }
2368                 for (i = 0; i < fprog.len; i++) {
2369                     filter[i].code = tswap16(tfilter[i].code);
2370                     filter[i].jt = tfilter[i].jt;
2371                     filter[i].jf = tfilter[i].jf;
2372                     filter[i].k = tswap32(tfilter[i].k);
2373                 }
2374                 fprog.filter = filter;
2375 
2376                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2377                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2378                 g_free(filter);
2379 
2380                 unlock_user_struct(tfilter, tfprog->filter, 1);
2381                 unlock_user_struct(tfprog, optval_addr, 1);
2382                 return ret;
2383         }
2384 	case TARGET_SO_BINDTODEVICE:
2385 	{
2386 		char *dev_ifname, *addr_ifname;
2387 
2388 		if (optlen > IFNAMSIZ - 1) {
2389 		    optlen = IFNAMSIZ - 1;
2390 		}
2391 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2392 		if (!dev_ifname) {
2393 		    return -TARGET_EFAULT;
2394 		}
2395 		optname = SO_BINDTODEVICE;
2396 		addr_ifname = alloca(IFNAMSIZ);
2397 		memcpy(addr_ifname, dev_ifname, optlen);
2398 		addr_ifname[optlen] = 0;
2399 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2400                                            addr_ifname, optlen));
2401 		unlock_user (dev_ifname, optval_addr, 0);
2402 		return ret;
2403 	}
2404         case TARGET_SO_LINGER:
2405         {
2406                 struct linger lg;
2407                 struct target_linger *tlg;
2408 
2409                 if (optlen != sizeof(struct target_linger)) {
2410                     return -TARGET_EINVAL;
2411                 }
2412                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2413                     return -TARGET_EFAULT;
2414                 }
2415                 __get_user(lg.l_onoff, &tlg->l_onoff);
2416                 __get_user(lg.l_linger, &tlg->l_linger);
2417                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2418                                 &lg, sizeof(lg)));
2419                 unlock_user_struct(tlg, optval_addr, 0);
2420                 return ret;
2421         }
2422             /* Options with 'int' argument.  */
2423         case TARGET_SO_DEBUG:
2424 		optname = SO_DEBUG;
2425 		break;
2426         case TARGET_SO_REUSEADDR:
2427 		optname = SO_REUSEADDR;
2428 		break;
2429 #ifdef SO_REUSEPORT
2430         case TARGET_SO_REUSEPORT:
2431                 optname = SO_REUSEPORT;
2432                 break;
2433 #endif
2434         case TARGET_SO_TYPE:
2435 		optname = SO_TYPE;
2436 		break;
2437         case TARGET_SO_ERROR:
2438 		optname = SO_ERROR;
2439 		break;
2440         case TARGET_SO_DONTROUTE:
2441 		optname = SO_DONTROUTE;
2442 		break;
2443         case TARGET_SO_BROADCAST:
2444 		optname = SO_BROADCAST;
2445 		break;
2446         case TARGET_SO_SNDBUF:
2447 		optname = SO_SNDBUF;
2448 		break;
2449         case TARGET_SO_SNDBUFFORCE:
2450                 optname = SO_SNDBUFFORCE;
2451                 break;
2452         case TARGET_SO_RCVBUF:
2453 		optname = SO_RCVBUF;
2454 		break;
2455         case TARGET_SO_RCVBUFFORCE:
2456                 optname = SO_RCVBUFFORCE;
2457                 break;
2458         case TARGET_SO_KEEPALIVE:
2459 		optname = SO_KEEPALIVE;
2460 		break;
2461         case TARGET_SO_OOBINLINE:
2462 		optname = SO_OOBINLINE;
2463 		break;
2464         case TARGET_SO_NO_CHECK:
2465 		optname = SO_NO_CHECK;
2466 		break;
2467         case TARGET_SO_PRIORITY:
2468 		optname = SO_PRIORITY;
2469 		break;
2470 #ifdef SO_BSDCOMPAT
2471         case TARGET_SO_BSDCOMPAT:
2472 		optname = SO_BSDCOMPAT;
2473 		break;
2474 #endif
2475         case TARGET_SO_PASSCRED:
2476 		optname = SO_PASSCRED;
2477 		break;
2478         case TARGET_SO_PASSSEC:
2479                 optname = SO_PASSSEC;
2480                 break;
2481         case TARGET_SO_TIMESTAMP:
2482 		optname = SO_TIMESTAMP;
2483 		break;
2484         case TARGET_SO_RCVLOWAT:
2485 		optname = SO_RCVLOWAT;
2486 		break;
2487         default:
2488             goto unimplemented;
2489         }
2490 	if (optlen < sizeof(uint32_t))
2491             return -TARGET_EINVAL;
2492 
2493 	if (get_user_u32(val, optval_addr))
2494             return -TARGET_EFAULT;
2495 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2496         break;
2497 #ifdef SOL_NETLINK
2498     case SOL_NETLINK:
2499         switch (optname) {
2500         case NETLINK_PKTINFO:
2501         case NETLINK_ADD_MEMBERSHIP:
2502         case NETLINK_DROP_MEMBERSHIP:
2503         case NETLINK_BROADCAST_ERROR:
2504         case NETLINK_NO_ENOBUFS:
2505 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2506         case NETLINK_LISTEN_ALL_NSID:
2507         case NETLINK_CAP_ACK:
2508 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2509 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2510         case NETLINK_EXT_ACK:
2511 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2512 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2513         case NETLINK_GET_STRICT_CHK:
2514 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2515             break;
2516         default:
2517             goto unimplemented;
2518         }
2519         val = 0;
2520         if (optlen < sizeof(uint32_t)) {
2521             return -TARGET_EINVAL;
2522         }
2523         if (get_user_u32(val, optval_addr)) {
2524             return -TARGET_EFAULT;
2525         }
2526         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2527                                    sizeof(val)));
2528         break;
2529 #endif /* SOL_NETLINK */
2530     default:
2531     unimplemented:
2532         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2533                       level, optname);
2534         ret = -TARGET_ENOPROTOOPT;
2535     }
2536     return ret;
2537 }
2538 
2539 /* do_getsockopt() Must return target values and target errnos. */
2540 static abi_long do_getsockopt(int sockfd, int level, int optname,
2541                               abi_ulong optval_addr, abi_ulong optlen)
2542 {
2543     abi_long ret;
2544     int len, val;
2545     socklen_t lv;
2546 
2547     switch(level) {
2548     case TARGET_SOL_SOCKET:
2549         level = SOL_SOCKET;
2550         switch (optname) {
2551         /* These don't just return a single integer */
2552         case TARGET_SO_PEERNAME:
2553             goto unimplemented;
2554         case TARGET_SO_RCVTIMEO: {
2555             struct timeval tv;
2556             socklen_t tvlen;
2557 
2558             optname = SO_RCVTIMEO;
2559 
2560 get_timeout:
2561             if (get_user_u32(len, optlen)) {
2562                 return -TARGET_EFAULT;
2563             }
2564             if (len < 0) {
2565                 return -TARGET_EINVAL;
2566             }
2567 
2568             tvlen = sizeof(tv);
2569             ret = get_errno(getsockopt(sockfd, level, optname,
2570                                        &tv, &tvlen));
2571             if (ret < 0) {
2572                 return ret;
2573             }
2574             if (len > sizeof(struct target_timeval)) {
2575                 len = sizeof(struct target_timeval);
2576             }
2577             if (copy_to_user_timeval(optval_addr, &tv)) {
2578                 return -TARGET_EFAULT;
2579             }
2580             if (put_user_u32(len, optlen)) {
2581                 return -TARGET_EFAULT;
2582             }
2583             break;
2584         }
2585         case TARGET_SO_SNDTIMEO:
2586             optname = SO_SNDTIMEO;
2587             goto get_timeout;
2588         case TARGET_SO_PEERCRED: {
2589             struct ucred cr;
2590             socklen_t crlen;
2591             struct target_ucred *tcr;
2592 
2593             if (get_user_u32(len, optlen)) {
2594                 return -TARGET_EFAULT;
2595             }
2596             if (len < 0) {
2597                 return -TARGET_EINVAL;
2598             }
2599 
2600             crlen = sizeof(cr);
2601             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2602                                        &cr, &crlen));
2603             if (ret < 0) {
2604                 return ret;
2605             }
2606             if (len > crlen) {
2607                 len = crlen;
2608             }
2609             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2610                 return -TARGET_EFAULT;
2611             }
2612             __put_user(cr.pid, &tcr->pid);
2613             __put_user(cr.uid, &tcr->uid);
2614             __put_user(cr.gid, &tcr->gid);
2615             unlock_user_struct(tcr, optval_addr, 1);
2616             if (put_user_u32(len, optlen)) {
2617                 return -TARGET_EFAULT;
2618             }
2619             break;
2620         }
2621         case TARGET_SO_PEERSEC: {
2622             char *name;
2623 
2624             if (get_user_u32(len, optlen)) {
2625                 return -TARGET_EFAULT;
2626             }
2627             if (len < 0) {
2628                 return -TARGET_EINVAL;
2629             }
2630             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2631             if (!name) {
2632                 return -TARGET_EFAULT;
2633             }
2634             lv = len;
2635             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2636                                        name, &lv));
2637             if (put_user_u32(lv, optlen)) {
2638                 ret = -TARGET_EFAULT;
2639             }
2640             unlock_user(name, optval_addr, lv);
2641             break;
2642         }
2643         case TARGET_SO_LINGER:
2644         {
2645             struct linger lg;
2646             socklen_t lglen;
2647             struct target_linger *tlg;
2648 
2649             if (get_user_u32(len, optlen)) {
2650                 return -TARGET_EFAULT;
2651             }
2652             if (len < 0) {
2653                 return -TARGET_EINVAL;
2654             }
2655 
2656             lglen = sizeof(lg);
2657             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2658                                        &lg, &lglen));
2659             if (ret < 0) {
2660                 return ret;
2661             }
2662             if (len > lglen) {
2663                 len = lglen;
2664             }
2665             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2666                 return -TARGET_EFAULT;
2667             }
2668             __put_user(lg.l_onoff, &tlg->l_onoff);
2669             __put_user(lg.l_linger, &tlg->l_linger);
2670             unlock_user_struct(tlg, optval_addr, 1);
2671             if (put_user_u32(len, optlen)) {
2672                 return -TARGET_EFAULT;
2673             }
2674             break;
2675         }
2676         /* Options with 'int' argument.  */
2677         case TARGET_SO_DEBUG:
2678             optname = SO_DEBUG;
2679             goto int_case;
2680         case TARGET_SO_REUSEADDR:
2681             optname = SO_REUSEADDR;
2682             goto int_case;
2683 #ifdef SO_REUSEPORT
2684         case TARGET_SO_REUSEPORT:
2685             optname = SO_REUSEPORT;
2686             goto int_case;
2687 #endif
2688         case TARGET_SO_TYPE:
2689             optname = SO_TYPE;
2690             goto int_case;
2691         case TARGET_SO_ERROR:
2692             optname = SO_ERROR;
2693             goto int_case;
2694         case TARGET_SO_DONTROUTE:
2695             optname = SO_DONTROUTE;
2696             goto int_case;
2697         case TARGET_SO_BROADCAST:
2698             optname = SO_BROADCAST;
2699             goto int_case;
2700         case TARGET_SO_SNDBUF:
2701             optname = SO_SNDBUF;
2702             goto int_case;
2703         case TARGET_SO_RCVBUF:
2704             optname = SO_RCVBUF;
2705             goto int_case;
2706         case TARGET_SO_KEEPALIVE:
2707             optname = SO_KEEPALIVE;
2708             goto int_case;
2709         case TARGET_SO_OOBINLINE:
2710             optname = SO_OOBINLINE;
2711             goto int_case;
2712         case TARGET_SO_NO_CHECK:
2713             optname = SO_NO_CHECK;
2714             goto int_case;
2715         case TARGET_SO_PRIORITY:
2716             optname = SO_PRIORITY;
2717             goto int_case;
2718 #ifdef SO_BSDCOMPAT
2719         case TARGET_SO_BSDCOMPAT:
2720             optname = SO_BSDCOMPAT;
2721             goto int_case;
2722 #endif
2723         case TARGET_SO_PASSCRED:
2724             optname = SO_PASSCRED;
2725             goto int_case;
2726         case TARGET_SO_TIMESTAMP:
2727             optname = SO_TIMESTAMP;
2728             goto int_case;
2729         case TARGET_SO_RCVLOWAT:
2730             optname = SO_RCVLOWAT;
2731             goto int_case;
2732         case TARGET_SO_ACCEPTCONN:
2733             optname = SO_ACCEPTCONN;
2734             goto int_case;
2735         case TARGET_SO_PROTOCOL:
2736             optname = SO_PROTOCOL;
2737             goto int_case;
2738         case TARGET_SO_DOMAIN:
2739             optname = SO_DOMAIN;
2740             goto int_case;
2741         default:
2742             goto int_case;
2743         }
2744         break;
2745     case SOL_TCP:
2746     case SOL_UDP:
2747         /* TCP and UDP options all take an 'int' value.  */
2748     int_case:
2749         if (get_user_u32(len, optlen))
2750             return -TARGET_EFAULT;
2751         if (len < 0)
2752             return -TARGET_EINVAL;
2753         lv = sizeof(lv);
2754         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2755         if (ret < 0)
2756             return ret;
2757         if (optname == SO_TYPE) {
2758             val = host_to_target_sock_type(val);
2759         }
2760         if (len > lv)
2761             len = lv;
2762         if (len == 4) {
2763             if (put_user_u32(val, optval_addr))
2764                 return -TARGET_EFAULT;
2765         } else {
2766             if (put_user_u8(val, optval_addr))
2767                 return -TARGET_EFAULT;
2768         }
2769         if (put_user_u32(len, optlen))
2770             return -TARGET_EFAULT;
2771         break;
2772     case SOL_IP:
2773         switch(optname) {
2774         case IP_TOS:
2775         case IP_TTL:
2776         case IP_HDRINCL:
2777         case IP_ROUTER_ALERT:
2778         case IP_RECVOPTS:
2779         case IP_RETOPTS:
2780         case IP_PKTINFO:
2781         case IP_MTU_DISCOVER:
2782         case IP_RECVERR:
2783         case IP_RECVTOS:
2784 #ifdef IP_FREEBIND
2785         case IP_FREEBIND:
2786 #endif
2787         case IP_MULTICAST_TTL:
2788         case IP_MULTICAST_LOOP:
2789             if (get_user_u32(len, optlen))
2790                 return -TARGET_EFAULT;
2791             if (len < 0)
2792                 return -TARGET_EINVAL;
2793             lv = sizeof(lv);
2794             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2795             if (ret < 0)
2796                 return ret;
2797             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2798                 len = 1;
2799                 if (put_user_u32(len, optlen)
2800                     || put_user_u8(val, optval_addr))
2801                     return -TARGET_EFAULT;
2802             } else {
2803                 if (len > sizeof(int))
2804                     len = sizeof(int);
2805                 if (put_user_u32(len, optlen)
2806                     || put_user_u32(val, optval_addr))
2807                     return -TARGET_EFAULT;
2808             }
2809             break;
2810         default:
2811             ret = -TARGET_ENOPROTOOPT;
2812             break;
2813         }
2814         break;
2815     case SOL_IPV6:
2816         switch (optname) {
2817         case IPV6_MTU_DISCOVER:
2818         case IPV6_MTU:
2819         case IPV6_V6ONLY:
2820         case IPV6_RECVPKTINFO:
2821         case IPV6_UNICAST_HOPS:
2822         case IPV6_MULTICAST_HOPS:
2823         case IPV6_MULTICAST_LOOP:
2824         case IPV6_RECVERR:
2825         case IPV6_RECVHOPLIMIT:
2826         case IPV6_2292HOPLIMIT:
2827         case IPV6_CHECKSUM:
2828         case IPV6_ADDRFORM:
2829         case IPV6_2292PKTINFO:
2830         case IPV6_RECVTCLASS:
2831         case IPV6_RECVRTHDR:
2832         case IPV6_2292RTHDR:
2833         case IPV6_RECVHOPOPTS:
2834         case IPV6_2292HOPOPTS:
2835         case IPV6_RECVDSTOPTS:
2836         case IPV6_2292DSTOPTS:
2837         case IPV6_TCLASS:
2838         case IPV6_ADDR_PREFERENCES:
2839 #ifdef IPV6_RECVPATHMTU
2840         case IPV6_RECVPATHMTU:
2841 #endif
2842 #ifdef IPV6_TRANSPARENT
2843         case IPV6_TRANSPARENT:
2844 #endif
2845 #ifdef IPV6_FREEBIND
2846         case IPV6_FREEBIND:
2847 #endif
2848 #ifdef IPV6_RECVORIGDSTADDR
2849         case IPV6_RECVORIGDSTADDR:
2850 #endif
2851             if (get_user_u32(len, optlen))
2852                 return -TARGET_EFAULT;
2853             if (len < 0)
2854                 return -TARGET_EINVAL;
2855             lv = sizeof(lv);
2856             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2857             if (ret < 0)
2858                 return ret;
2859             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2860                 len = 1;
2861                 if (put_user_u32(len, optlen)
2862                     || put_user_u8(val, optval_addr))
2863                     return -TARGET_EFAULT;
2864             } else {
2865                 if (len > sizeof(int))
2866                     len = sizeof(int);
2867                 if (put_user_u32(len, optlen)
2868                     || put_user_u32(val, optval_addr))
2869                     return -TARGET_EFAULT;
2870             }
2871             break;
2872         default:
2873             ret = -TARGET_ENOPROTOOPT;
2874             break;
2875         }
2876         break;
2877 #ifdef SOL_NETLINK
2878     case SOL_NETLINK:
2879         switch (optname) {
2880         case NETLINK_PKTINFO:
2881         case NETLINK_BROADCAST_ERROR:
2882         case NETLINK_NO_ENOBUFS:
2883 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2884         case NETLINK_LISTEN_ALL_NSID:
2885         case NETLINK_CAP_ACK:
2886 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2887 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2888         case NETLINK_EXT_ACK:
2889 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2890 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2891         case NETLINK_GET_STRICT_CHK:
2892 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2893             if (get_user_u32(len, optlen)) {
2894                 return -TARGET_EFAULT;
2895             }
2896             if (len != sizeof(val)) {
2897                 return -TARGET_EINVAL;
2898             }
2899             lv = len;
2900             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2901             if (ret < 0) {
2902                 return ret;
2903             }
2904             if (put_user_u32(lv, optlen)
2905                 || put_user_u32(val, optval_addr)) {
2906                 return -TARGET_EFAULT;
2907             }
2908             break;
2909 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2910         case NETLINK_LIST_MEMBERSHIPS:
2911         {
2912             uint32_t *results;
2913             int i;
2914             if (get_user_u32(len, optlen)) {
2915                 return -TARGET_EFAULT;
2916             }
2917             if (len < 0) {
2918                 return -TARGET_EINVAL;
2919             }
2920             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2921             if (!results && len > 0) {
2922                 return -TARGET_EFAULT;
2923             }
2924             lv = len;
2925             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2926             if (ret < 0) {
2927                 unlock_user(results, optval_addr, 0);
2928                 return ret;
2929             }
2930             /* swap host endianess to target endianess. */
2931             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2932                 results[i] = tswap32(results[i]);
2933             }
2934             if (put_user_u32(lv, optlen)) {
2935                 return -TARGET_EFAULT;
2936             }
2937             unlock_user(results, optval_addr, 0);
2938             break;
2939         }
2940 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2941         default:
2942             goto unimplemented;
2943         }
2944         break;
2945 #endif /* SOL_NETLINK */
2946     default:
2947     unimplemented:
2948         qemu_log_mask(LOG_UNIMP,
2949                       "getsockopt level=%d optname=%d not yet supported\n",
2950                       level, optname);
2951         ret = -TARGET_EOPNOTSUPP;
2952         break;
2953     }
2954     return ret;
2955 }
2956 
2957 /* Convert target low/high pair representing file offset into the host
2958  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2959  * as the kernel doesn't handle them either.
2960  */
2961 static void target_to_host_low_high(abi_ulong tlow,
2962                                     abi_ulong thigh,
2963                                     unsigned long *hlow,
2964                                     unsigned long *hhigh)
2965 {
2966     uint64_t off = tlow |
2967         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2968         TARGET_LONG_BITS / 2;
2969 
2970     *hlow = off;
2971     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2972 }
2973 
2974 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2975                                 abi_ulong count, int copy)
2976 {
2977     struct target_iovec *target_vec;
2978     struct iovec *vec;
2979     abi_ulong total_len, max_len;
2980     int i;
2981     int err = 0;
2982     bool bad_address = false;
2983 
2984     if (count == 0) {
2985         errno = 0;
2986         return NULL;
2987     }
2988     if (count > IOV_MAX) {
2989         errno = EINVAL;
2990         return NULL;
2991     }
2992 
2993     vec = g_try_new0(struct iovec, count);
2994     if (vec == NULL) {
2995         errno = ENOMEM;
2996         return NULL;
2997     }
2998 
2999     target_vec = lock_user(VERIFY_READ, target_addr,
3000                            count * sizeof(struct target_iovec), 1);
3001     if (target_vec == NULL) {
3002         err = EFAULT;
3003         goto fail2;
3004     }
3005 
3006     /* ??? If host page size > target page size, this will result in a
3007        value larger than what we can actually support.  */
3008     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3009     total_len = 0;
3010 
3011     for (i = 0; i < count; i++) {
3012         abi_ulong base = tswapal(target_vec[i].iov_base);
3013         abi_long len = tswapal(target_vec[i].iov_len);
3014 
3015         if (len < 0) {
3016             err = EINVAL;
3017             goto fail;
3018         } else if (len == 0) {
3019             /* Zero length pointer is ignored.  */
3020             vec[i].iov_base = 0;
3021         } else {
3022             vec[i].iov_base = lock_user(type, base, len, copy);
3023             /* If the first buffer pointer is bad, this is a fault.  But
3024              * subsequent bad buffers will result in a partial write; this
3025              * is realized by filling the vector with null pointers and
3026              * zero lengths. */
3027             if (!vec[i].iov_base) {
3028                 if (i == 0) {
3029                     err = EFAULT;
3030                     goto fail;
3031                 } else {
3032                     bad_address = true;
3033                 }
3034             }
3035             if (bad_address) {
3036                 len = 0;
3037             }
3038             if (len > max_len - total_len) {
3039                 len = max_len - total_len;
3040             }
3041         }
3042         vec[i].iov_len = len;
3043         total_len += len;
3044     }
3045 
3046     unlock_user(target_vec, target_addr, 0);
3047     return vec;
3048 
3049  fail:
3050     while (--i >= 0) {
3051         if (tswapal(target_vec[i].iov_len) > 0) {
3052             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3053         }
3054     }
3055     unlock_user(target_vec, target_addr, 0);
3056  fail2:
3057     g_free(vec);
3058     errno = err;
3059     return NULL;
3060 }
3061 
3062 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3063                          abi_ulong count, int copy)
3064 {
3065     struct target_iovec *target_vec;
3066     int i;
3067 
3068     target_vec = lock_user(VERIFY_READ, target_addr,
3069                            count * sizeof(struct target_iovec), 1);
3070     if (target_vec) {
3071         for (i = 0; i < count; i++) {
3072             abi_ulong base = tswapal(target_vec[i].iov_base);
3073             abi_long len = tswapal(target_vec[i].iov_len);
3074             if (len < 0) {
3075                 break;
3076             }
3077             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3078         }
3079         unlock_user(target_vec, target_addr, 0);
3080     }
3081 
3082     g_free(vec);
3083 }
3084 
3085 static inline int target_to_host_sock_type(int *type)
3086 {
3087     int host_type = 0;
3088     int target_type = *type;
3089 
3090     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3091     case TARGET_SOCK_DGRAM:
3092         host_type = SOCK_DGRAM;
3093         break;
3094     case TARGET_SOCK_STREAM:
3095         host_type = SOCK_STREAM;
3096         break;
3097     default:
3098         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3099         break;
3100     }
3101     if (target_type & TARGET_SOCK_CLOEXEC) {
3102 #if defined(SOCK_CLOEXEC)
3103         host_type |= SOCK_CLOEXEC;
3104 #else
3105         return -TARGET_EINVAL;
3106 #endif
3107     }
3108     if (target_type & TARGET_SOCK_NONBLOCK) {
3109 #if defined(SOCK_NONBLOCK)
3110         host_type |= SOCK_NONBLOCK;
3111 #elif !defined(O_NONBLOCK)
3112         return -TARGET_EINVAL;
3113 #endif
3114     }
3115     *type = host_type;
3116     return 0;
3117 }
3118 
3119 /* Try to emulate socket type flags after socket creation.  */
3120 static int sock_flags_fixup(int fd, int target_type)
3121 {
3122 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3123     if (target_type & TARGET_SOCK_NONBLOCK) {
3124         int flags = fcntl(fd, F_GETFL);
3125         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3126             close(fd);
3127             return -TARGET_EINVAL;
3128         }
3129     }
3130 #endif
3131     return fd;
3132 }
3133 
3134 /* do_socket() Must return target values and target errnos. */
3135 static abi_long do_socket(int domain, int type, int protocol)
3136 {
3137     int target_type = type;
3138     int ret;
3139 
3140     ret = target_to_host_sock_type(&type);
3141     if (ret) {
3142         return ret;
3143     }
3144 
3145     if (domain == PF_NETLINK && !(
3146 #ifdef CONFIG_RTNETLINK
3147          protocol == NETLINK_ROUTE ||
3148 #endif
3149          protocol == NETLINK_KOBJECT_UEVENT ||
3150          protocol == NETLINK_AUDIT)) {
3151         return -TARGET_EPROTONOSUPPORT;
3152     }
3153 
3154     if (domain == AF_PACKET ||
3155         (domain == AF_INET && type == SOCK_PACKET)) {
3156         protocol = tswap16(protocol);
3157     }
3158 
3159     ret = get_errno(socket(domain, type, protocol));
3160     if (ret >= 0) {
3161         ret = sock_flags_fixup(ret, target_type);
3162         if (type == SOCK_PACKET) {
3163             /* Manage an obsolete case :
3164              * if socket type is SOCK_PACKET, bind by name
3165              */
3166             fd_trans_register(ret, &target_packet_trans);
3167         } else if (domain == PF_NETLINK) {
3168             switch (protocol) {
3169 #ifdef CONFIG_RTNETLINK
3170             case NETLINK_ROUTE:
3171                 fd_trans_register(ret, &target_netlink_route_trans);
3172                 break;
3173 #endif
3174             case NETLINK_KOBJECT_UEVENT:
3175                 /* nothing to do: messages are strings */
3176                 break;
3177             case NETLINK_AUDIT:
3178                 fd_trans_register(ret, &target_netlink_audit_trans);
3179                 break;
3180             default:
3181                 g_assert_not_reached();
3182             }
3183         }
3184     }
3185     return ret;
3186 }
3187 
3188 /* do_bind() Must return target values and target errnos. */
3189 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3190                         socklen_t addrlen)
3191 {
3192     void *addr;
3193     abi_long ret;
3194 
3195     if ((int)addrlen < 0) {
3196         return -TARGET_EINVAL;
3197     }
3198 
3199     addr = alloca(addrlen+1);
3200 
3201     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3202     if (ret)
3203         return ret;
3204 
3205     return get_errno(bind(sockfd, addr, addrlen));
3206 }
3207 
3208 /* do_connect() Must return target values and target errnos. */
3209 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3210                            socklen_t addrlen)
3211 {
3212     void *addr;
3213     abi_long ret;
3214 
3215     if ((int)addrlen < 0) {
3216         return -TARGET_EINVAL;
3217     }
3218 
3219     addr = alloca(addrlen+1);
3220 
3221     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3222     if (ret)
3223         return ret;
3224 
3225     return get_errno(safe_connect(sockfd, addr, addrlen));
3226 }
3227 
3228 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3229 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3230                                       int flags, int send)
3231 {
3232     abi_long ret, len;
3233     struct msghdr msg;
3234     abi_ulong count;
3235     struct iovec *vec;
3236     abi_ulong target_vec;
3237 
3238     if (msgp->msg_name) {
3239         msg.msg_namelen = tswap32(msgp->msg_namelen);
3240         msg.msg_name = alloca(msg.msg_namelen+1);
3241         ret = target_to_host_sockaddr(fd, msg.msg_name,
3242                                       tswapal(msgp->msg_name),
3243                                       msg.msg_namelen);
3244         if (ret == -TARGET_EFAULT) {
3245             /* For connected sockets msg_name and msg_namelen must
3246              * be ignored, so returning EFAULT immediately is wrong.
3247              * Instead, pass a bad msg_name to the host kernel, and
3248              * let it decide whether to return EFAULT or not.
3249              */
3250             msg.msg_name = (void *)-1;
3251         } else if (ret) {
3252             goto out2;
3253         }
3254     } else {
3255         msg.msg_name = NULL;
3256         msg.msg_namelen = 0;
3257     }
3258     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3259     msg.msg_control = alloca(msg.msg_controllen);
3260     memset(msg.msg_control, 0, msg.msg_controllen);
3261 
3262     msg.msg_flags = tswap32(msgp->msg_flags);
3263 
3264     count = tswapal(msgp->msg_iovlen);
3265     target_vec = tswapal(msgp->msg_iov);
3266 
3267     if (count > IOV_MAX) {
3268         /* sendrcvmsg returns a different errno for this condition than
3269          * readv/writev, so we must catch it here before lock_iovec() does.
3270          */
3271         ret = -TARGET_EMSGSIZE;
3272         goto out2;
3273     }
3274 
3275     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3276                      target_vec, count, send);
3277     if (vec == NULL) {
3278         ret = -host_to_target_errno(errno);
3279         goto out2;
3280     }
3281     msg.msg_iovlen = count;
3282     msg.msg_iov = vec;
3283 
3284     if (send) {
3285         if (fd_trans_target_to_host_data(fd)) {
3286             void *host_msg;
3287 
3288             host_msg = g_malloc(msg.msg_iov->iov_len);
3289             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3290             ret = fd_trans_target_to_host_data(fd)(host_msg,
3291                                                    msg.msg_iov->iov_len);
3292             if (ret >= 0) {
3293                 msg.msg_iov->iov_base = host_msg;
3294                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3295             }
3296             g_free(host_msg);
3297         } else {
3298             ret = target_to_host_cmsg(&msg, msgp);
3299             if (ret == 0) {
3300                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3301             }
3302         }
3303     } else {
3304         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3305         if (!is_error(ret)) {
3306             len = ret;
3307             if (fd_trans_host_to_target_data(fd)) {
3308                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3309                                                MIN(msg.msg_iov->iov_len, len));
3310             } else {
3311                 ret = host_to_target_cmsg(msgp, &msg);
3312             }
3313             if (!is_error(ret)) {
3314                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3315                 msgp->msg_flags = tswap32(msg.msg_flags);
3316                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3317                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3318                                     msg.msg_name, msg.msg_namelen);
3319                     if (ret) {
3320                         goto out;
3321                     }
3322                 }
3323 
3324                 ret = len;
3325             }
3326         }
3327     }
3328 
3329 out:
3330     unlock_iovec(vec, target_vec, count, !send);
3331 out2:
3332     return ret;
3333 }
3334 
3335 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3336                                int flags, int send)
3337 {
3338     abi_long ret;
3339     struct target_msghdr *msgp;
3340 
3341     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3342                           msgp,
3343                           target_msg,
3344                           send ? 1 : 0)) {
3345         return -TARGET_EFAULT;
3346     }
3347     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3348     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3349     return ret;
3350 }
3351 
3352 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3353  * so it might not have this *mmsg-specific flag either.
3354  */
3355 #ifndef MSG_WAITFORONE
3356 #define MSG_WAITFORONE 0x10000
3357 #endif
3358 
3359 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3360                                 unsigned int vlen, unsigned int flags,
3361                                 int send)
3362 {
3363     struct target_mmsghdr *mmsgp;
3364     abi_long ret = 0;
3365     int i;
3366 
3367     if (vlen > UIO_MAXIOV) {
3368         vlen = UIO_MAXIOV;
3369     }
3370 
3371     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3372     if (!mmsgp) {
3373         return -TARGET_EFAULT;
3374     }
3375 
3376     for (i = 0; i < vlen; i++) {
3377         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3378         if (is_error(ret)) {
3379             break;
3380         }
3381         mmsgp[i].msg_len = tswap32(ret);
3382         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3383         if (flags & MSG_WAITFORONE) {
3384             flags |= MSG_DONTWAIT;
3385         }
3386     }
3387 
3388     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3389 
3390     /* Return number of datagrams sent if we sent any at all;
3391      * otherwise return the error.
3392      */
3393     if (i) {
3394         return i;
3395     }
3396     return ret;
3397 }
3398 
3399 /* do_accept4() Must return target values and target errnos. */
3400 static abi_long do_accept4(int fd, abi_ulong target_addr,
3401                            abi_ulong target_addrlen_addr, int flags)
3402 {
3403     socklen_t addrlen, ret_addrlen;
3404     void *addr;
3405     abi_long ret;
3406     int host_flags;
3407 
3408     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
3409 
3410     if (target_addr == 0) {
3411         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3412     }
3413 
3414     /* linux returns EFAULT if addrlen pointer is invalid */
3415     if (get_user_u32(addrlen, target_addrlen_addr))
3416         return -TARGET_EFAULT;
3417 
3418     if ((int)addrlen < 0) {
3419         return -TARGET_EINVAL;
3420     }
3421 
3422     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3423         return -TARGET_EFAULT;
3424     }
3425 
3426     addr = alloca(addrlen);
3427 
3428     ret_addrlen = addrlen;
3429     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3430     if (!is_error(ret)) {
3431         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3432         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3433             ret = -TARGET_EFAULT;
3434         }
3435     }
3436     return ret;
3437 }
3438 
3439 /* do_getpeername() Must return target values and target errnos. */
3440 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3441                                abi_ulong target_addrlen_addr)
3442 {
3443     socklen_t addrlen, ret_addrlen;
3444     void *addr;
3445     abi_long ret;
3446 
3447     if (get_user_u32(addrlen, target_addrlen_addr))
3448         return -TARGET_EFAULT;
3449 
3450     if ((int)addrlen < 0) {
3451         return -TARGET_EINVAL;
3452     }
3453 
3454     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3455         return -TARGET_EFAULT;
3456     }
3457 
3458     addr = alloca(addrlen);
3459 
3460     ret_addrlen = addrlen;
3461     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3462     if (!is_error(ret)) {
3463         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3464         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3465             ret = -TARGET_EFAULT;
3466         }
3467     }
3468     return ret;
3469 }
3470 
3471 /* do_getsockname() Must return target values and target errnos. */
3472 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3473                                abi_ulong target_addrlen_addr)
3474 {
3475     socklen_t addrlen, ret_addrlen;
3476     void *addr;
3477     abi_long ret;
3478 
3479     if (get_user_u32(addrlen, target_addrlen_addr))
3480         return -TARGET_EFAULT;
3481 
3482     if ((int)addrlen < 0) {
3483         return -TARGET_EINVAL;
3484     }
3485 
3486     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3487         return -TARGET_EFAULT;
3488     }
3489 
3490     addr = alloca(addrlen);
3491 
3492     ret_addrlen = addrlen;
3493     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3494     if (!is_error(ret)) {
3495         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3496         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3497             ret = -TARGET_EFAULT;
3498         }
3499     }
3500     return ret;
3501 }
3502 
3503 /* do_socketpair() Must return target values and target errnos. */
3504 static abi_long do_socketpair(int domain, int type, int protocol,
3505                               abi_ulong target_tab_addr)
3506 {
3507     int tab[2];
3508     abi_long ret;
3509 
3510     target_to_host_sock_type(&type);
3511 
3512     ret = get_errno(socketpair(domain, type, protocol, tab));
3513     if (!is_error(ret)) {
3514         if (put_user_s32(tab[0], target_tab_addr)
3515             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3516             ret = -TARGET_EFAULT;
3517     }
3518     return ret;
3519 }
3520 
3521 /* do_sendto() Must return target values and target errnos. */
3522 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3523                           abi_ulong target_addr, socklen_t addrlen)
3524 {
3525     void *addr;
3526     void *host_msg;
3527     void *copy_msg = NULL;
3528     abi_long ret;
3529 
3530     if ((int)addrlen < 0) {
3531         return -TARGET_EINVAL;
3532     }
3533 
3534     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3535     if (!host_msg)
3536         return -TARGET_EFAULT;
3537     if (fd_trans_target_to_host_data(fd)) {
3538         copy_msg = host_msg;
3539         host_msg = g_malloc(len);
3540         memcpy(host_msg, copy_msg, len);
3541         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3542         if (ret < 0) {
3543             goto fail;
3544         }
3545     }
3546     if (target_addr) {
3547         addr = alloca(addrlen+1);
3548         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3549         if (ret) {
3550             goto fail;
3551         }
3552         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3553     } else {
3554         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3555     }
3556 fail:
3557     if (copy_msg) {
3558         g_free(host_msg);
3559         host_msg = copy_msg;
3560     }
3561     unlock_user(host_msg, msg, 0);
3562     return ret;
3563 }
3564 
3565 /* do_recvfrom() Must return target values and target errnos. */
3566 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3567                             abi_ulong target_addr,
3568                             abi_ulong target_addrlen)
3569 {
3570     socklen_t addrlen, ret_addrlen;
3571     void *addr;
3572     void *host_msg;
3573     abi_long ret;
3574 
3575     if (!msg) {
3576         host_msg = NULL;
3577     } else {
3578         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3579         if (!host_msg) {
3580             return -TARGET_EFAULT;
3581         }
3582     }
3583     if (target_addr) {
3584         if (get_user_u32(addrlen, target_addrlen)) {
3585             ret = -TARGET_EFAULT;
3586             goto fail;
3587         }
3588         if ((int)addrlen < 0) {
3589             ret = -TARGET_EINVAL;
3590             goto fail;
3591         }
3592         addr = alloca(addrlen);
3593         ret_addrlen = addrlen;
3594         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3595                                       addr, &ret_addrlen));
3596     } else {
3597         addr = NULL; /* To keep compiler quiet.  */
3598         addrlen = 0; /* To keep compiler quiet.  */
3599         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3600     }
3601     if (!is_error(ret)) {
3602         if (fd_trans_host_to_target_data(fd)) {
3603             abi_long trans;
3604             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3605             if (is_error(trans)) {
3606                 ret = trans;
3607                 goto fail;
3608             }
3609         }
3610         if (target_addr) {
3611             host_to_target_sockaddr(target_addr, addr,
3612                                     MIN(addrlen, ret_addrlen));
3613             if (put_user_u32(ret_addrlen, target_addrlen)) {
3614                 ret = -TARGET_EFAULT;
3615                 goto fail;
3616             }
3617         }
3618         unlock_user(host_msg, msg, len);
3619     } else {
3620 fail:
3621         unlock_user(host_msg, msg, 0);
3622     }
3623     return ret;
3624 }
3625 
3626 #ifdef TARGET_NR_socketcall
3627 /* do_socketcall() must return target values and target errnos. */
3628 static abi_long do_socketcall(int num, abi_ulong vptr)
3629 {
3630     static const unsigned nargs[] = { /* number of arguments per operation */
3631         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3632         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3633         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3634         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3635         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3636         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3637         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3638         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3639         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3640         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3641         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3642         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3643         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3644         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3645         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3646         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3647         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3648         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3649         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3650         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3651     };
3652     abi_long a[6]; /* max 6 args */
3653     unsigned i;
3654 
3655     /* check the range of the first argument num */
3656     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3657     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3658         return -TARGET_EINVAL;
3659     }
3660     /* ensure we have space for args */
3661     if (nargs[num] > ARRAY_SIZE(a)) {
3662         return -TARGET_EINVAL;
3663     }
3664     /* collect the arguments in a[] according to nargs[] */
3665     for (i = 0; i < nargs[num]; ++i) {
3666         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3667             return -TARGET_EFAULT;
3668         }
3669     }
3670     /* now when we have the args, invoke the appropriate underlying function */
3671     switch (num) {
3672     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3673         return do_socket(a[0], a[1], a[2]);
3674     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3675         return do_bind(a[0], a[1], a[2]);
3676     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3677         return do_connect(a[0], a[1], a[2]);
3678     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3679         return get_errno(listen(a[0], a[1]));
3680     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3681         return do_accept4(a[0], a[1], a[2], 0);
3682     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3683         return do_getsockname(a[0], a[1], a[2]);
3684     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3685         return do_getpeername(a[0], a[1], a[2]);
3686     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3687         return do_socketpair(a[0], a[1], a[2], a[3]);
3688     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3689         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3690     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3691         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3692     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3693         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3694     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3695         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3696     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3697         return get_errno(shutdown(a[0], a[1]));
3698     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3699         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3700     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3701         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3702     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3703         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3704     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3705         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3706     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3707         return do_accept4(a[0], a[1], a[2], a[3]);
3708     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3709         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3710     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3711         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3712     default:
3713         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3714         return -TARGET_EINVAL;
3715     }
3716 }
3717 #endif
3718 
3719 #define N_SHM_REGIONS	32
3720 
3721 static struct shm_region {
3722     abi_ulong start;
3723     abi_ulong size;
3724     bool in_use;
3725 } shm_regions[N_SHM_REGIONS];
3726 
3727 #ifndef TARGET_SEMID64_DS
3728 /* asm-generic version of this struct */
3729 struct target_semid64_ds
3730 {
3731   struct target_ipc_perm sem_perm;
3732   abi_ulong sem_otime;
3733 #if TARGET_ABI_BITS == 32
3734   abi_ulong __unused1;
3735 #endif
3736   abi_ulong sem_ctime;
3737 #if TARGET_ABI_BITS == 32
3738   abi_ulong __unused2;
3739 #endif
3740   abi_ulong sem_nsems;
3741   abi_ulong __unused3;
3742   abi_ulong __unused4;
3743 };
3744 #endif
3745 
3746 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3747                                                abi_ulong target_addr)
3748 {
3749     struct target_ipc_perm *target_ip;
3750     struct target_semid64_ds *target_sd;
3751 
3752     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3753         return -TARGET_EFAULT;
3754     target_ip = &(target_sd->sem_perm);
3755     host_ip->__key = tswap32(target_ip->__key);
3756     host_ip->uid = tswap32(target_ip->uid);
3757     host_ip->gid = tswap32(target_ip->gid);
3758     host_ip->cuid = tswap32(target_ip->cuid);
3759     host_ip->cgid = tswap32(target_ip->cgid);
3760 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3761     host_ip->mode = tswap32(target_ip->mode);
3762 #else
3763     host_ip->mode = tswap16(target_ip->mode);
3764 #endif
3765 #if defined(TARGET_PPC)
3766     host_ip->__seq = tswap32(target_ip->__seq);
3767 #else
3768     host_ip->__seq = tswap16(target_ip->__seq);
3769 #endif
3770     unlock_user_struct(target_sd, target_addr, 0);
3771     return 0;
3772 }
3773 
3774 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3775                                                struct ipc_perm *host_ip)
3776 {
3777     struct target_ipc_perm *target_ip;
3778     struct target_semid64_ds *target_sd;
3779 
3780     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3781         return -TARGET_EFAULT;
3782     target_ip = &(target_sd->sem_perm);
3783     target_ip->__key = tswap32(host_ip->__key);
3784     target_ip->uid = tswap32(host_ip->uid);
3785     target_ip->gid = tswap32(host_ip->gid);
3786     target_ip->cuid = tswap32(host_ip->cuid);
3787     target_ip->cgid = tswap32(host_ip->cgid);
3788 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3789     target_ip->mode = tswap32(host_ip->mode);
3790 #else
3791     target_ip->mode = tswap16(host_ip->mode);
3792 #endif
3793 #if defined(TARGET_PPC)
3794     target_ip->__seq = tswap32(host_ip->__seq);
3795 #else
3796     target_ip->__seq = tswap16(host_ip->__seq);
3797 #endif
3798     unlock_user_struct(target_sd, target_addr, 1);
3799     return 0;
3800 }
3801 
3802 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3803                                                abi_ulong target_addr)
3804 {
3805     struct target_semid64_ds *target_sd;
3806 
3807     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3808         return -TARGET_EFAULT;
3809     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3810         return -TARGET_EFAULT;
3811     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3812     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3813     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3814     unlock_user_struct(target_sd, target_addr, 0);
3815     return 0;
3816 }
3817 
3818 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3819                                                struct semid_ds *host_sd)
3820 {
3821     struct target_semid64_ds *target_sd;
3822 
3823     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3824         return -TARGET_EFAULT;
3825     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3826         return -TARGET_EFAULT;
3827     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3828     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3829     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3830     unlock_user_struct(target_sd, target_addr, 1);
3831     return 0;
3832 }
3833 
3834 struct target_seminfo {
3835     int semmap;
3836     int semmni;
3837     int semmns;
3838     int semmnu;
3839     int semmsl;
3840     int semopm;
3841     int semume;
3842     int semusz;
3843     int semvmx;
3844     int semaem;
3845 };
3846 
3847 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3848                                               struct seminfo *host_seminfo)
3849 {
3850     struct target_seminfo *target_seminfo;
3851     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3852         return -TARGET_EFAULT;
3853     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3854     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3855     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3856     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3857     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3858     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3859     __put_user(host_seminfo->semume, &target_seminfo->semume);
3860     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3861     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3862     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3863     unlock_user_struct(target_seminfo, target_addr, 1);
3864     return 0;
3865 }
3866 
3867 union semun {
3868 	int val;
3869 	struct semid_ds *buf;
3870 	unsigned short *array;
3871 	struct seminfo *__buf;
3872 };
3873 
3874 union target_semun {
3875 	int val;
3876 	abi_ulong buf;
3877 	abi_ulong array;
3878 	abi_ulong __buf;
3879 };
3880 
3881 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3882                                                abi_ulong target_addr)
3883 {
3884     int nsems;
3885     unsigned short *array;
3886     union semun semun;
3887     struct semid_ds semid_ds;
3888     int i, ret;
3889 
3890     semun.buf = &semid_ds;
3891 
3892     ret = semctl(semid, 0, IPC_STAT, semun);
3893     if (ret == -1)
3894         return get_errno(ret);
3895 
3896     nsems = semid_ds.sem_nsems;
3897 
3898     *host_array = g_try_new(unsigned short, nsems);
3899     if (!*host_array) {
3900         return -TARGET_ENOMEM;
3901     }
3902     array = lock_user(VERIFY_READ, target_addr,
3903                       nsems*sizeof(unsigned short), 1);
3904     if (!array) {
3905         g_free(*host_array);
3906         return -TARGET_EFAULT;
3907     }
3908 
3909     for(i=0; i<nsems; i++) {
3910         __get_user((*host_array)[i], &array[i]);
3911     }
3912     unlock_user(array, target_addr, 0);
3913 
3914     return 0;
3915 }
3916 
3917 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3918                                                unsigned short **host_array)
3919 {
3920     int nsems;
3921     unsigned short *array;
3922     union semun semun;
3923     struct semid_ds semid_ds;
3924     int i, ret;
3925 
3926     semun.buf = &semid_ds;
3927 
3928     ret = semctl(semid, 0, IPC_STAT, semun);
3929     if (ret == -1)
3930         return get_errno(ret);
3931 
3932     nsems = semid_ds.sem_nsems;
3933 
3934     array = lock_user(VERIFY_WRITE, target_addr,
3935                       nsems*sizeof(unsigned short), 0);
3936     if (!array)
3937         return -TARGET_EFAULT;
3938 
3939     for(i=0; i<nsems; i++) {
3940         __put_user((*host_array)[i], &array[i]);
3941     }
3942     g_free(*host_array);
3943     unlock_user(array, target_addr, 1);
3944 
3945     return 0;
3946 }
3947 
3948 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3949                                  abi_ulong target_arg)
3950 {
3951     union target_semun target_su = { .buf = target_arg };
3952     union semun arg;
3953     struct semid_ds dsarg;
3954     unsigned short *array = NULL;
3955     struct seminfo seminfo;
3956     abi_long ret = -TARGET_EINVAL;
3957     abi_long err;
3958     cmd &= 0xff;
3959 
3960     switch( cmd ) {
3961 	case GETVAL:
3962 	case SETVAL:
3963             /* In 64 bit cross-endian situations, we will erroneously pick up
3964              * the wrong half of the union for the "val" element.  To rectify
3965              * this, the entire 8-byte structure is byteswapped, followed by
3966 	     * a swap of the 4 byte val field. In other cases, the data is
3967 	     * already in proper host byte order. */
3968 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3969 		target_su.buf = tswapal(target_su.buf);
3970 		arg.val = tswap32(target_su.val);
3971 	    } else {
3972 		arg.val = target_su.val;
3973 	    }
3974             ret = get_errno(semctl(semid, semnum, cmd, arg));
3975             break;
3976 	case GETALL:
3977 	case SETALL:
3978             err = target_to_host_semarray(semid, &array, target_su.array);
3979             if (err)
3980                 return err;
3981             arg.array = array;
3982             ret = get_errno(semctl(semid, semnum, cmd, arg));
3983             err = host_to_target_semarray(semid, target_su.array, &array);
3984             if (err)
3985                 return err;
3986             break;
3987 	case IPC_STAT:
3988 	case IPC_SET:
3989 	case SEM_STAT:
3990             err = target_to_host_semid_ds(&dsarg, target_su.buf);
3991             if (err)
3992                 return err;
3993             arg.buf = &dsarg;
3994             ret = get_errno(semctl(semid, semnum, cmd, arg));
3995             err = host_to_target_semid_ds(target_su.buf, &dsarg);
3996             if (err)
3997                 return err;
3998             break;
3999 	case IPC_INFO:
4000 	case SEM_INFO:
4001             arg.__buf = &seminfo;
4002             ret = get_errno(semctl(semid, semnum, cmd, arg));
4003             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4004             if (err)
4005                 return err;
4006             break;
4007 	case IPC_RMID:
4008 	case GETPID:
4009 	case GETNCNT:
4010 	case GETZCNT:
4011             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4012             break;
4013     }
4014 
4015     return ret;
4016 }
4017 
4018 struct target_sembuf {
4019     unsigned short sem_num;
4020     short sem_op;
4021     short sem_flg;
4022 };
4023 
4024 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4025                                              abi_ulong target_addr,
4026                                              unsigned nsops)
4027 {
4028     struct target_sembuf *target_sembuf;
4029     int i;
4030 
4031     target_sembuf = lock_user(VERIFY_READ, target_addr,
4032                               nsops*sizeof(struct target_sembuf), 1);
4033     if (!target_sembuf)
4034         return -TARGET_EFAULT;
4035 
4036     for(i=0; i<nsops; i++) {
4037         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4038         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4039         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4040     }
4041 
4042     unlock_user(target_sembuf, target_addr, 0);
4043 
4044     return 0;
4045 }
4046 
4047 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4048     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4049 
4050 /*
4051  * This macro is required to handle the s390 variants, which passes the
4052  * arguments in a different order than default.
4053  */
4054 #ifdef __s390x__
4055 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4056   (__nsops), (__timeout), (__sops)
4057 #else
4058 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4059   (__nsops), 0, (__sops), (__timeout)
4060 #endif
4061 
4062 static inline abi_long do_semtimedop(int semid,
4063                                      abi_long ptr,
4064                                      unsigned nsops,
4065                                      abi_long timeout, bool time64)
4066 {
4067     struct sembuf *sops;
4068     struct timespec ts, *pts = NULL;
4069     abi_long ret;
4070 
4071     if (timeout) {
4072         pts = &ts;
4073         if (time64) {
4074             if (target_to_host_timespec64(pts, timeout)) {
4075                 return -TARGET_EFAULT;
4076             }
4077         } else {
4078             if (target_to_host_timespec(pts, timeout)) {
4079                 return -TARGET_EFAULT;
4080             }
4081         }
4082     }
4083 
4084     if (nsops > TARGET_SEMOPM) {
4085         return -TARGET_E2BIG;
4086     }
4087 
4088     sops = g_new(struct sembuf, nsops);
4089 
4090     if (target_to_host_sembuf(sops, ptr, nsops)) {
4091         g_free(sops);
4092         return -TARGET_EFAULT;
4093     }
4094 
4095     ret = -TARGET_ENOSYS;
4096 #ifdef __NR_semtimedop
4097     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4098 #endif
4099 #ifdef __NR_ipc
4100     if (ret == -TARGET_ENOSYS) {
4101         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4102                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4103     }
4104 #endif
4105     g_free(sops);
4106     return ret;
4107 }
4108 #endif
4109 
4110 struct target_msqid_ds
4111 {
4112     struct target_ipc_perm msg_perm;
4113     abi_ulong msg_stime;
4114 #if TARGET_ABI_BITS == 32
4115     abi_ulong __unused1;
4116 #endif
4117     abi_ulong msg_rtime;
4118 #if TARGET_ABI_BITS == 32
4119     abi_ulong __unused2;
4120 #endif
4121     abi_ulong msg_ctime;
4122 #if TARGET_ABI_BITS == 32
4123     abi_ulong __unused3;
4124 #endif
4125     abi_ulong __msg_cbytes;
4126     abi_ulong msg_qnum;
4127     abi_ulong msg_qbytes;
4128     abi_ulong msg_lspid;
4129     abi_ulong msg_lrpid;
4130     abi_ulong __unused4;
4131     abi_ulong __unused5;
4132 };
4133 
4134 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4135                                                abi_ulong target_addr)
4136 {
4137     struct target_msqid_ds *target_md;
4138 
4139     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4140         return -TARGET_EFAULT;
4141     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4142         return -TARGET_EFAULT;
4143     host_md->msg_stime = tswapal(target_md->msg_stime);
4144     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4145     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4146     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4147     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4148     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4149     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4150     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4151     unlock_user_struct(target_md, target_addr, 0);
4152     return 0;
4153 }
4154 
4155 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4156                                                struct msqid_ds *host_md)
4157 {
4158     struct target_msqid_ds *target_md;
4159 
4160     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4161         return -TARGET_EFAULT;
4162     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4163         return -TARGET_EFAULT;
4164     target_md->msg_stime = tswapal(host_md->msg_stime);
4165     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4166     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4167     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4168     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4169     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4170     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4171     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4172     unlock_user_struct(target_md, target_addr, 1);
4173     return 0;
4174 }
4175 
4176 struct target_msginfo {
4177     int msgpool;
4178     int msgmap;
4179     int msgmax;
4180     int msgmnb;
4181     int msgmni;
4182     int msgssz;
4183     int msgtql;
4184     unsigned short int msgseg;
4185 };
4186 
4187 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4188                                               struct msginfo *host_msginfo)
4189 {
4190     struct target_msginfo *target_msginfo;
4191     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4192         return -TARGET_EFAULT;
4193     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4194     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4195     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4196     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4197     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4198     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4199     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4200     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4201     unlock_user_struct(target_msginfo, target_addr, 1);
4202     return 0;
4203 }
4204 
4205 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4206 {
4207     struct msqid_ds dsarg;
4208     struct msginfo msginfo;
4209     abi_long ret = -TARGET_EINVAL;
4210 
4211     cmd &= 0xff;
4212 
4213     switch (cmd) {
4214     case IPC_STAT:
4215     case IPC_SET:
4216     case MSG_STAT:
4217         if (target_to_host_msqid_ds(&dsarg,ptr))
4218             return -TARGET_EFAULT;
4219         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4220         if (host_to_target_msqid_ds(ptr,&dsarg))
4221             return -TARGET_EFAULT;
4222         break;
4223     case IPC_RMID:
4224         ret = get_errno(msgctl(msgid, cmd, NULL));
4225         break;
4226     case IPC_INFO:
4227     case MSG_INFO:
4228         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4229         if (host_to_target_msginfo(ptr, &msginfo))
4230             return -TARGET_EFAULT;
4231         break;
4232     }
4233 
4234     return ret;
4235 }
4236 
4237 struct target_msgbuf {
4238     abi_long mtype;
4239     char	mtext[1];
4240 };
4241 
4242 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4243                                  ssize_t msgsz, int msgflg)
4244 {
4245     struct target_msgbuf *target_mb;
4246     struct msgbuf *host_mb;
4247     abi_long ret = 0;
4248 
4249     if (msgsz < 0) {
4250         return -TARGET_EINVAL;
4251     }
4252 
4253     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4254         return -TARGET_EFAULT;
4255     host_mb = g_try_malloc(msgsz + sizeof(long));
4256     if (!host_mb) {
4257         unlock_user_struct(target_mb, msgp, 0);
4258         return -TARGET_ENOMEM;
4259     }
4260     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4261     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4262     ret = -TARGET_ENOSYS;
4263 #ifdef __NR_msgsnd
4264     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4265 #endif
4266 #ifdef __NR_ipc
4267     if (ret == -TARGET_ENOSYS) {
4268 #ifdef __s390x__
4269         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4270                                  host_mb));
4271 #else
4272         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4273                                  host_mb, 0));
4274 #endif
4275     }
4276 #endif
4277     g_free(host_mb);
4278     unlock_user_struct(target_mb, msgp, 0);
4279 
4280     return ret;
4281 }
4282 
4283 #ifdef __NR_ipc
4284 #if defined(__sparc__)
4285 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4286 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4287 #elif defined(__s390x__)
4288 /* The s390 sys_ipc variant has only five parameters.  */
4289 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4290     ((long int[]){(long int)__msgp, __msgtyp})
4291 #else
4292 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4293     ((long int[]){(long int)__msgp, __msgtyp}), 0
4294 #endif
4295 #endif
4296 
4297 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4298                                  ssize_t msgsz, abi_long msgtyp,
4299                                  int msgflg)
4300 {
4301     struct target_msgbuf *target_mb;
4302     char *target_mtext;
4303     struct msgbuf *host_mb;
4304     abi_long ret = 0;
4305 
4306     if (msgsz < 0) {
4307         return -TARGET_EINVAL;
4308     }
4309 
4310     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4311         return -TARGET_EFAULT;
4312 
4313     host_mb = g_try_malloc(msgsz + sizeof(long));
4314     if (!host_mb) {
4315         ret = -TARGET_ENOMEM;
4316         goto end;
4317     }
4318     ret = -TARGET_ENOSYS;
4319 #ifdef __NR_msgrcv
4320     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4321 #endif
4322 #ifdef __NR_ipc
4323     if (ret == -TARGET_ENOSYS) {
4324         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4325                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4326     }
4327 #endif
4328 
4329     if (ret > 0) {
4330         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4331         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4332         if (!target_mtext) {
4333             ret = -TARGET_EFAULT;
4334             goto end;
4335         }
4336         memcpy(target_mb->mtext, host_mb->mtext, ret);
4337         unlock_user(target_mtext, target_mtext_addr, ret);
4338     }
4339 
4340     target_mb->mtype = tswapal(host_mb->mtype);
4341 
4342 end:
4343     if (target_mb)
4344         unlock_user_struct(target_mb, msgp, 1);
4345     g_free(host_mb);
4346     return ret;
4347 }
4348 
4349 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4350                                                abi_ulong target_addr)
4351 {
4352     struct target_shmid_ds *target_sd;
4353 
4354     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4355         return -TARGET_EFAULT;
4356     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4357         return -TARGET_EFAULT;
4358     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4359     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4360     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4361     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4362     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4363     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4364     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4365     unlock_user_struct(target_sd, target_addr, 0);
4366     return 0;
4367 }
4368 
4369 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4370                                                struct shmid_ds *host_sd)
4371 {
4372     struct target_shmid_ds *target_sd;
4373 
4374     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4375         return -TARGET_EFAULT;
4376     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4377         return -TARGET_EFAULT;
4378     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4379     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4380     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4381     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4382     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4383     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4384     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4385     unlock_user_struct(target_sd, target_addr, 1);
4386     return 0;
4387 }
4388 
4389 struct  target_shminfo {
4390     abi_ulong shmmax;
4391     abi_ulong shmmin;
4392     abi_ulong shmmni;
4393     abi_ulong shmseg;
4394     abi_ulong shmall;
4395 };
4396 
4397 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4398                                               struct shminfo *host_shminfo)
4399 {
4400     struct target_shminfo *target_shminfo;
4401     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4402         return -TARGET_EFAULT;
4403     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4404     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4405     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4406     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4407     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4408     unlock_user_struct(target_shminfo, target_addr, 1);
4409     return 0;
4410 }
4411 
4412 struct target_shm_info {
4413     int used_ids;
4414     abi_ulong shm_tot;
4415     abi_ulong shm_rss;
4416     abi_ulong shm_swp;
4417     abi_ulong swap_attempts;
4418     abi_ulong swap_successes;
4419 };
4420 
4421 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4422                                                struct shm_info *host_shm_info)
4423 {
4424     struct target_shm_info *target_shm_info;
4425     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4426         return -TARGET_EFAULT;
4427     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4428     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4429     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4430     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4431     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4432     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4433     unlock_user_struct(target_shm_info, target_addr, 1);
4434     return 0;
4435 }
4436 
4437 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4438 {
4439     struct shmid_ds dsarg;
4440     struct shminfo shminfo;
4441     struct shm_info shm_info;
4442     abi_long ret = -TARGET_EINVAL;
4443 
4444     cmd &= 0xff;
4445 
4446     switch(cmd) {
4447     case IPC_STAT:
4448     case IPC_SET:
4449     case SHM_STAT:
4450         if (target_to_host_shmid_ds(&dsarg, buf))
4451             return -TARGET_EFAULT;
4452         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4453         if (host_to_target_shmid_ds(buf, &dsarg))
4454             return -TARGET_EFAULT;
4455         break;
4456     case IPC_INFO:
4457         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4458         if (host_to_target_shminfo(buf, &shminfo))
4459             return -TARGET_EFAULT;
4460         break;
4461     case SHM_INFO:
4462         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4463         if (host_to_target_shm_info(buf, &shm_info))
4464             return -TARGET_EFAULT;
4465         break;
4466     case IPC_RMID:
4467     case SHM_LOCK:
4468     case SHM_UNLOCK:
4469         ret = get_errno(shmctl(shmid, cmd, NULL));
4470         break;
4471     }
4472 
4473     return ret;
4474 }
4475 
4476 #ifndef TARGET_FORCE_SHMLBA
4477 /* For most architectures, SHMLBA is the same as the page size;
4478  * some architectures have larger values, in which case they should
4479  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4480  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4481  * and defining its own value for SHMLBA.
4482  *
4483  * The kernel also permits SHMLBA to be set by the architecture to a
4484  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4485  * this means that addresses are rounded to the large size if
4486  * SHM_RND is set but addresses not aligned to that size are not rejected
4487  * as long as they are at least page-aligned. Since the only architecture
4488  * which uses this is ia64 this code doesn't provide for that oddity.
4489  */
4490 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4491 {
4492     return TARGET_PAGE_SIZE;
4493 }
4494 #endif
4495 
4496 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4497                                  int shmid, abi_ulong shmaddr, int shmflg)
4498 {
4499     CPUState *cpu = env_cpu(cpu_env);
4500     abi_long raddr;
4501     void *host_raddr;
4502     struct shmid_ds shm_info;
4503     int i,ret;
4504     abi_ulong shmlba;
4505 
4506     /* shmat pointers are always untagged */
4507 
4508     /* find out the length of the shared memory segment */
4509     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4510     if (is_error(ret)) {
4511         /* can't get length, bail out */
4512         return ret;
4513     }
4514 
4515     shmlba = target_shmlba(cpu_env);
4516 
4517     if (shmaddr & (shmlba - 1)) {
4518         if (shmflg & SHM_RND) {
4519             shmaddr &= ~(shmlba - 1);
4520         } else {
4521             return -TARGET_EINVAL;
4522         }
4523     }
4524     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4525         return -TARGET_EINVAL;
4526     }
4527 
4528     mmap_lock();
4529 
4530     /*
4531      * We're mapping shared memory, so ensure we generate code for parallel
4532      * execution and flush old translations.  This will work up to the level
4533      * supported by the host -- anything that requires EXCP_ATOMIC will not
4534      * be atomic with respect to an external process.
4535      */
4536     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4537         cpu->tcg_cflags |= CF_PARALLEL;
4538         tb_flush(cpu);
4539     }
4540 
4541     if (shmaddr)
4542         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4543     else {
4544         abi_ulong mmap_start;
4545 
4546         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4547         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4548 
4549         if (mmap_start == -1) {
4550             errno = ENOMEM;
4551             host_raddr = (void *)-1;
4552         } else
4553             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4554                                shmflg | SHM_REMAP);
4555     }
4556 
4557     if (host_raddr == (void *)-1) {
4558         mmap_unlock();
4559         return get_errno((long)host_raddr);
4560     }
4561     raddr=h2g((unsigned long)host_raddr);
4562 
4563     page_set_flags(raddr, raddr + shm_info.shm_segsz,
4564                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4565                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4566 
4567     for (i = 0; i < N_SHM_REGIONS; i++) {
4568         if (!shm_regions[i].in_use) {
4569             shm_regions[i].in_use = true;
4570             shm_regions[i].start = raddr;
4571             shm_regions[i].size = shm_info.shm_segsz;
4572             break;
4573         }
4574     }
4575 
4576     mmap_unlock();
4577     return raddr;
4578 
4579 }
4580 
4581 static inline abi_long do_shmdt(abi_ulong shmaddr)
4582 {
4583     int i;
4584     abi_long rv;
4585 
4586     /* shmdt pointers are always untagged */
4587 
4588     mmap_lock();
4589 
4590     for (i = 0; i < N_SHM_REGIONS; ++i) {
4591         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4592             shm_regions[i].in_use = false;
4593             page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
4594             break;
4595         }
4596     }
4597     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4598 
4599     mmap_unlock();
4600 
4601     return rv;
4602 }
4603 
4604 #ifdef TARGET_NR_ipc
4605 /* ??? This only works with linear mappings.  */
4606 /* do_ipc() must return target values and target errnos. */
4607 static abi_long do_ipc(CPUArchState *cpu_env,
4608                        unsigned int call, abi_long first,
4609                        abi_long second, abi_long third,
4610                        abi_long ptr, abi_long fifth)
4611 {
4612     int version;
4613     abi_long ret = 0;
4614 
4615     version = call >> 16;
4616     call &= 0xffff;
4617 
4618     switch (call) {
4619     case IPCOP_semop:
4620         ret = do_semtimedop(first, ptr, second, 0, false);
4621         break;
4622     case IPCOP_semtimedop:
4623     /*
4624      * The s390 sys_ipc variant has only five parameters instead of six
4625      * (as for default variant) and the only difference is the handling of
4626      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4627      * to a struct timespec where the generic variant uses fifth parameter.
4628      */
4629 #if defined(TARGET_S390X)
4630         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4631 #else
4632         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4633 #endif
4634         break;
4635 
4636     case IPCOP_semget:
4637         ret = get_errno(semget(first, second, third));
4638         break;
4639 
4640     case IPCOP_semctl: {
4641         /* The semun argument to semctl is passed by value, so dereference the
4642          * ptr argument. */
4643         abi_ulong atptr;
4644         get_user_ual(atptr, ptr);
4645         ret = do_semctl(first, second, third, atptr);
4646         break;
4647     }
4648 
4649     case IPCOP_msgget:
4650         ret = get_errno(msgget(first, second));
4651         break;
4652 
4653     case IPCOP_msgsnd:
4654         ret = do_msgsnd(first, ptr, second, third);
4655         break;
4656 
4657     case IPCOP_msgctl:
4658         ret = do_msgctl(first, second, ptr);
4659         break;
4660 
4661     case IPCOP_msgrcv:
4662         switch (version) {
4663         case 0:
4664             {
4665                 struct target_ipc_kludge {
4666                     abi_long msgp;
4667                     abi_long msgtyp;
4668                 } *tmp;
4669 
4670                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4671                     ret = -TARGET_EFAULT;
4672                     break;
4673                 }
4674 
4675                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4676 
4677                 unlock_user_struct(tmp, ptr, 0);
4678                 break;
4679             }
4680         default:
4681             ret = do_msgrcv(first, ptr, second, fifth, third);
4682         }
4683         break;
4684 
4685     case IPCOP_shmat:
4686         switch (version) {
4687         default:
4688         {
4689             abi_ulong raddr;
4690             raddr = do_shmat(cpu_env, first, ptr, second);
4691             if (is_error(raddr))
4692                 return get_errno(raddr);
4693             if (put_user_ual(raddr, third))
4694                 return -TARGET_EFAULT;
4695             break;
4696         }
4697         case 1:
4698             ret = -TARGET_EINVAL;
4699             break;
4700         }
4701 	break;
4702     case IPCOP_shmdt:
4703         ret = do_shmdt(ptr);
4704 	break;
4705 
4706     case IPCOP_shmget:
4707 	/* IPC_* flag values are the same on all linux platforms */
4708 	ret = get_errno(shmget(first, second, third));
4709 	break;
4710 
4711 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4712     case IPCOP_shmctl:
4713         ret = do_shmctl(first, second, ptr);
4714         break;
4715     default:
4716         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4717                       call, version);
4718 	ret = -TARGET_ENOSYS;
4719 	break;
4720     }
4721     return ret;
4722 }
4723 #endif
4724 
4725 /* kernel structure types definitions */
4726 
4727 #define STRUCT(name, ...) STRUCT_ ## name,
4728 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4729 enum {
4730 #include "syscall_types.h"
4731 STRUCT_MAX
4732 };
4733 #undef STRUCT
4734 #undef STRUCT_SPECIAL
4735 
4736 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4737 #define STRUCT_SPECIAL(name)
4738 #include "syscall_types.h"
4739 #undef STRUCT
4740 #undef STRUCT_SPECIAL
4741 
4742 #define MAX_STRUCT_SIZE 4096
4743 
4744 #ifdef CONFIG_FIEMAP
4745 /* So fiemap access checks don't overflow on 32 bit systems.
4746  * This is very slightly smaller than the limit imposed by
4747  * the underlying kernel.
4748  */
4749 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4750                             / sizeof(struct fiemap_extent))
4751 
4752 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4753                                        int fd, int cmd, abi_long arg)
4754 {
4755     /* The parameter for this ioctl is a struct fiemap followed
4756      * by an array of struct fiemap_extent whose size is set
4757      * in fiemap->fm_extent_count. The array is filled in by the
4758      * ioctl.
4759      */
4760     int target_size_in, target_size_out;
4761     struct fiemap *fm;
4762     const argtype *arg_type = ie->arg_type;
4763     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4764     void *argptr, *p;
4765     abi_long ret;
4766     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4767     uint32_t outbufsz;
4768     int free_fm = 0;
4769 
4770     assert(arg_type[0] == TYPE_PTR);
4771     assert(ie->access == IOC_RW);
4772     arg_type++;
4773     target_size_in = thunk_type_size(arg_type, 0);
4774     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4775     if (!argptr) {
4776         return -TARGET_EFAULT;
4777     }
4778     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4779     unlock_user(argptr, arg, 0);
4780     fm = (struct fiemap *)buf_temp;
4781     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4782         return -TARGET_EINVAL;
4783     }
4784 
4785     outbufsz = sizeof (*fm) +
4786         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4787 
4788     if (outbufsz > MAX_STRUCT_SIZE) {
4789         /* We can't fit all the extents into the fixed size buffer.
4790          * Allocate one that is large enough and use it instead.
4791          */
4792         fm = g_try_malloc(outbufsz);
4793         if (!fm) {
4794             return -TARGET_ENOMEM;
4795         }
4796         memcpy(fm, buf_temp, sizeof(struct fiemap));
4797         free_fm = 1;
4798     }
4799     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4800     if (!is_error(ret)) {
4801         target_size_out = target_size_in;
4802         /* An extent_count of 0 means we were only counting the extents
4803          * so there are no structs to copy
4804          */
4805         if (fm->fm_extent_count != 0) {
4806             target_size_out += fm->fm_mapped_extents * extent_size;
4807         }
4808         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4809         if (!argptr) {
4810             ret = -TARGET_EFAULT;
4811         } else {
4812             /* Convert the struct fiemap */
4813             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4814             if (fm->fm_extent_count != 0) {
4815                 p = argptr + target_size_in;
4816                 /* ...and then all the struct fiemap_extents */
4817                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4818                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4819                                   THUNK_TARGET);
4820                     p += extent_size;
4821                 }
4822             }
4823             unlock_user(argptr, arg, target_size_out);
4824         }
4825     }
4826     if (free_fm) {
4827         g_free(fm);
4828     }
4829     return ret;
4830 }
4831 #endif
4832 
4833 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4834                                 int fd, int cmd, abi_long arg)
4835 {
4836     const argtype *arg_type = ie->arg_type;
4837     int target_size;
4838     void *argptr;
4839     int ret;
4840     struct ifconf *host_ifconf;
4841     uint32_t outbufsz;
4842     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4843     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4844     int target_ifreq_size;
4845     int nb_ifreq;
4846     int free_buf = 0;
4847     int i;
4848     int target_ifc_len;
4849     abi_long target_ifc_buf;
4850     int host_ifc_len;
4851     char *host_ifc_buf;
4852 
4853     assert(arg_type[0] == TYPE_PTR);
4854     assert(ie->access == IOC_RW);
4855 
4856     arg_type++;
4857     target_size = thunk_type_size(arg_type, 0);
4858 
4859     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4860     if (!argptr)
4861         return -TARGET_EFAULT;
4862     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4863     unlock_user(argptr, arg, 0);
4864 
4865     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4866     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4867     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4868 
4869     if (target_ifc_buf != 0) {
4870         target_ifc_len = host_ifconf->ifc_len;
4871         nb_ifreq = target_ifc_len / target_ifreq_size;
4872         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4873 
4874         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4875         if (outbufsz > MAX_STRUCT_SIZE) {
4876             /*
4877              * We can't fit all the extents into the fixed size buffer.
4878              * Allocate one that is large enough and use it instead.
4879              */
4880             host_ifconf = g_try_malloc(outbufsz);
4881             if (!host_ifconf) {
4882                 return -TARGET_ENOMEM;
4883             }
4884             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4885             free_buf = 1;
4886         }
4887         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4888 
4889         host_ifconf->ifc_len = host_ifc_len;
4890     } else {
4891       host_ifc_buf = NULL;
4892     }
4893     host_ifconf->ifc_buf = host_ifc_buf;
4894 
4895     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4896     if (!is_error(ret)) {
4897 	/* convert host ifc_len to target ifc_len */
4898 
4899         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4900         target_ifc_len = nb_ifreq * target_ifreq_size;
4901         host_ifconf->ifc_len = target_ifc_len;
4902 
4903 	/* restore target ifc_buf */
4904 
4905         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4906 
4907 	/* copy struct ifconf to target user */
4908 
4909         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4910         if (!argptr)
4911             return -TARGET_EFAULT;
4912         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4913         unlock_user(argptr, arg, target_size);
4914 
4915         if (target_ifc_buf != 0) {
4916             /* copy ifreq[] to target user */
4917             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4918             for (i = 0; i < nb_ifreq ; i++) {
4919                 thunk_convert(argptr + i * target_ifreq_size,
4920                               host_ifc_buf + i * sizeof(struct ifreq),
4921                               ifreq_arg_type, THUNK_TARGET);
4922             }
4923             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4924         }
4925     }
4926 
4927     if (free_buf) {
4928         g_free(host_ifconf);
4929     }
4930 
4931     return ret;
4932 }
4933 
4934 #if defined(CONFIG_USBFS)
4935 #if HOST_LONG_BITS > 64
4936 #error USBDEVFS thunks do not support >64 bit hosts yet.
4937 #endif
4938 struct live_urb {
4939     uint64_t target_urb_adr;
4940     uint64_t target_buf_adr;
4941     char *target_buf_ptr;
4942     struct usbdevfs_urb host_urb;
4943 };
4944 
4945 static GHashTable *usbdevfs_urb_hashtable(void)
4946 {
4947     static GHashTable *urb_hashtable;
4948 
4949     if (!urb_hashtable) {
4950         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4951     }
4952     return urb_hashtable;
4953 }
4954 
4955 static void urb_hashtable_insert(struct live_urb *urb)
4956 {
4957     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4958     g_hash_table_insert(urb_hashtable, urb, urb);
4959 }
4960 
4961 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4962 {
4963     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4964     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4965 }
4966 
4967 static void urb_hashtable_remove(struct live_urb *urb)
4968 {
4969     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4970     g_hash_table_remove(urb_hashtable, urb);
4971 }
4972 
4973 static abi_long
4974 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4975                           int fd, int cmd, abi_long arg)
4976 {
4977     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4978     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4979     struct live_urb *lurb;
4980     void *argptr;
4981     uint64_t hurb;
4982     int target_size;
4983     uintptr_t target_urb_adr;
4984     abi_long ret;
4985 
4986     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4987 
4988     memset(buf_temp, 0, sizeof(uint64_t));
4989     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4990     if (is_error(ret)) {
4991         return ret;
4992     }
4993 
4994     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4995     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4996     if (!lurb->target_urb_adr) {
4997         return -TARGET_EFAULT;
4998     }
4999     urb_hashtable_remove(lurb);
5000     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5001         lurb->host_urb.buffer_length);
5002     lurb->target_buf_ptr = NULL;
5003 
5004     /* restore the guest buffer pointer */
5005     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5006 
5007     /* update the guest urb struct */
5008     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5009     if (!argptr) {
5010         g_free(lurb);
5011         return -TARGET_EFAULT;
5012     }
5013     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5014     unlock_user(argptr, lurb->target_urb_adr, target_size);
5015 
5016     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5017     /* write back the urb handle */
5018     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5019     if (!argptr) {
5020         g_free(lurb);
5021         return -TARGET_EFAULT;
5022     }
5023 
5024     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5025     target_urb_adr = lurb->target_urb_adr;
5026     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5027     unlock_user(argptr, arg, target_size);
5028 
5029     g_free(lurb);
5030     return ret;
5031 }
5032 
5033 static abi_long
5034 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5035                              uint8_t *buf_temp __attribute__((unused)),
5036                              int fd, int cmd, abi_long arg)
5037 {
5038     struct live_urb *lurb;
5039 
5040     /* map target address back to host URB with metadata. */
5041     lurb = urb_hashtable_lookup(arg);
5042     if (!lurb) {
5043         return -TARGET_EFAULT;
5044     }
5045     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5046 }
5047 
5048 static abi_long
5049 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5050                             int fd, int cmd, abi_long arg)
5051 {
5052     const argtype *arg_type = ie->arg_type;
5053     int target_size;
5054     abi_long ret;
5055     void *argptr;
5056     int rw_dir;
5057     struct live_urb *lurb;
5058 
5059     /*
5060      * each submitted URB needs to map to a unique ID for the
5061      * kernel, and that unique ID needs to be a pointer to
5062      * host memory.  hence, we need to malloc for each URB.
5063      * isochronous transfers have a variable length struct.
5064      */
5065     arg_type++;
5066     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5067 
5068     /* construct host copy of urb and metadata */
5069     lurb = g_try_new0(struct live_urb, 1);
5070     if (!lurb) {
5071         return -TARGET_ENOMEM;
5072     }
5073 
5074     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5075     if (!argptr) {
5076         g_free(lurb);
5077         return -TARGET_EFAULT;
5078     }
5079     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5080     unlock_user(argptr, arg, 0);
5081 
5082     lurb->target_urb_adr = arg;
5083     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5084 
5085     /* buffer space used depends on endpoint type so lock the entire buffer */
5086     /* control type urbs should check the buffer contents for true direction */
5087     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5088     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5089         lurb->host_urb.buffer_length, 1);
5090     if (lurb->target_buf_ptr == NULL) {
5091         g_free(lurb);
5092         return -TARGET_EFAULT;
5093     }
5094 
5095     /* update buffer pointer in host copy */
5096     lurb->host_urb.buffer = lurb->target_buf_ptr;
5097 
5098     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5099     if (is_error(ret)) {
5100         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5101         g_free(lurb);
5102     } else {
5103         urb_hashtable_insert(lurb);
5104     }
5105 
5106     return ret;
5107 }
5108 #endif /* CONFIG_USBFS */
5109 
5110 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5111                             int cmd, abi_long arg)
5112 {
5113     void *argptr;
5114     struct dm_ioctl *host_dm;
5115     abi_long guest_data;
5116     uint32_t guest_data_size;
5117     int target_size;
5118     const argtype *arg_type = ie->arg_type;
5119     abi_long ret;
5120     void *big_buf = NULL;
5121     char *host_data;
5122 
5123     arg_type++;
5124     target_size = thunk_type_size(arg_type, 0);
5125     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5126     if (!argptr) {
5127         ret = -TARGET_EFAULT;
5128         goto out;
5129     }
5130     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5131     unlock_user(argptr, arg, 0);
5132 
5133     /* buf_temp is too small, so fetch things into a bigger buffer */
5134     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5135     memcpy(big_buf, buf_temp, target_size);
5136     buf_temp = big_buf;
5137     host_dm = big_buf;
5138 
5139     guest_data = arg + host_dm->data_start;
5140     if ((guest_data - arg) < 0) {
5141         ret = -TARGET_EINVAL;
5142         goto out;
5143     }
5144     guest_data_size = host_dm->data_size - host_dm->data_start;
5145     host_data = (char*)host_dm + host_dm->data_start;
5146 
5147     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5148     if (!argptr) {
5149         ret = -TARGET_EFAULT;
5150         goto out;
5151     }
5152 
5153     switch (ie->host_cmd) {
5154     case DM_REMOVE_ALL:
5155     case DM_LIST_DEVICES:
5156     case DM_DEV_CREATE:
5157     case DM_DEV_REMOVE:
5158     case DM_DEV_SUSPEND:
5159     case DM_DEV_STATUS:
5160     case DM_DEV_WAIT:
5161     case DM_TABLE_STATUS:
5162     case DM_TABLE_CLEAR:
5163     case DM_TABLE_DEPS:
5164     case DM_LIST_VERSIONS:
5165         /* no input data */
5166         break;
5167     case DM_DEV_RENAME:
5168     case DM_DEV_SET_GEOMETRY:
5169         /* data contains only strings */
5170         memcpy(host_data, argptr, guest_data_size);
5171         break;
5172     case DM_TARGET_MSG:
5173         memcpy(host_data, argptr, guest_data_size);
5174         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5175         break;
5176     case DM_TABLE_LOAD:
5177     {
5178         void *gspec = argptr;
5179         void *cur_data = host_data;
5180         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5181         int spec_size = thunk_type_size(arg_type, 0);
5182         int i;
5183 
5184         for (i = 0; i < host_dm->target_count; i++) {
5185             struct dm_target_spec *spec = cur_data;
5186             uint32_t next;
5187             int slen;
5188 
5189             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5190             slen = strlen((char*)gspec + spec_size) + 1;
5191             next = spec->next;
5192             spec->next = sizeof(*spec) + slen;
5193             strcpy((char*)&spec[1], gspec + spec_size);
5194             gspec += next;
5195             cur_data += spec->next;
5196         }
5197         break;
5198     }
5199     default:
5200         ret = -TARGET_EINVAL;
5201         unlock_user(argptr, guest_data, 0);
5202         goto out;
5203     }
5204     unlock_user(argptr, guest_data, 0);
5205 
5206     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5207     if (!is_error(ret)) {
5208         guest_data = arg + host_dm->data_start;
5209         guest_data_size = host_dm->data_size - host_dm->data_start;
5210         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5211         switch (ie->host_cmd) {
5212         case DM_REMOVE_ALL:
5213         case DM_DEV_CREATE:
5214         case DM_DEV_REMOVE:
5215         case DM_DEV_RENAME:
5216         case DM_DEV_SUSPEND:
5217         case DM_DEV_STATUS:
5218         case DM_TABLE_LOAD:
5219         case DM_TABLE_CLEAR:
5220         case DM_TARGET_MSG:
5221         case DM_DEV_SET_GEOMETRY:
5222             /* no return data */
5223             break;
5224         case DM_LIST_DEVICES:
5225         {
5226             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5227             uint32_t remaining_data = guest_data_size;
5228             void *cur_data = argptr;
5229             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5230             int nl_size = 12; /* can't use thunk_size due to alignment */
5231 
5232             while (1) {
5233                 uint32_t next = nl->next;
5234                 if (next) {
5235                     nl->next = nl_size + (strlen(nl->name) + 1);
5236                 }
5237                 if (remaining_data < nl->next) {
5238                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5239                     break;
5240                 }
5241                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5242                 strcpy(cur_data + nl_size, nl->name);
5243                 cur_data += nl->next;
5244                 remaining_data -= nl->next;
5245                 if (!next) {
5246                     break;
5247                 }
5248                 nl = (void*)nl + next;
5249             }
5250             break;
5251         }
5252         case DM_DEV_WAIT:
5253         case DM_TABLE_STATUS:
5254         {
5255             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5256             void *cur_data = argptr;
5257             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5258             int spec_size = thunk_type_size(arg_type, 0);
5259             int i;
5260 
5261             for (i = 0; i < host_dm->target_count; i++) {
5262                 uint32_t next = spec->next;
5263                 int slen = strlen((char*)&spec[1]) + 1;
5264                 spec->next = (cur_data - argptr) + spec_size + slen;
5265                 if (guest_data_size < spec->next) {
5266                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5267                     break;
5268                 }
5269                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5270                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5271                 cur_data = argptr + spec->next;
5272                 spec = (void*)host_dm + host_dm->data_start + next;
5273             }
5274             break;
5275         }
5276         case DM_TABLE_DEPS:
5277         {
5278             void *hdata = (void*)host_dm + host_dm->data_start;
5279             int count = *(uint32_t*)hdata;
5280             uint64_t *hdev = hdata + 8;
5281             uint64_t *gdev = argptr + 8;
5282             int i;
5283 
5284             *(uint32_t*)argptr = tswap32(count);
5285             for (i = 0; i < count; i++) {
5286                 *gdev = tswap64(*hdev);
5287                 gdev++;
5288                 hdev++;
5289             }
5290             break;
5291         }
5292         case DM_LIST_VERSIONS:
5293         {
5294             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5295             uint32_t remaining_data = guest_data_size;
5296             void *cur_data = argptr;
5297             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5298             int vers_size = thunk_type_size(arg_type, 0);
5299 
5300             while (1) {
5301                 uint32_t next = vers->next;
5302                 if (next) {
5303                     vers->next = vers_size + (strlen(vers->name) + 1);
5304                 }
5305                 if (remaining_data < vers->next) {
5306                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5307                     break;
5308                 }
5309                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5310                 strcpy(cur_data + vers_size, vers->name);
5311                 cur_data += vers->next;
5312                 remaining_data -= vers->next;
5313                 if (!next) {
5314                     break;
5315                 }
5316                 vers = (void*)vers + next;
5317             }
5318             break;
5319         }
5320         default:
5321             unlock_user(argptr, guest_data, 0);
5322             ret = -TARGET_EINVAL;
5323             goto out;
5324         }
5325         unlock_user(argptr, guest_data, guest_data_size);
5326 
5327         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5328         if (!argptr) {
5329             ret = -TARGET_EFAULT;
5330             goto out;
5331         }
5332         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5333         unlock_user(argptr, arg, target_size);
5334     }
5335 out:
5336     g_free(big_buf);
5337     return ret;
5338 }
5339 
5340 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5341                                int cmd, abi_long arg)
5342 {
5343     void *argptr;
5344     int target_size;
5345     const argtype *arg_type = ie->arg_type;
5346     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5347     abi_long ret;
5348 
5349     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5350     struct blkpg_partition host_part;
5351 
5352     /* Read and convert blkpg */
5353     arg_type++;
5354     target_size = thunk_type_size(arg_type, 0);
5355     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5356     if (!argptr) {
5357         ret = -TARGET_EFAULT;
5358         goto out;
5359     }
5360     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5361     unlock_user(argptr, arg, 0);
5362 
5363     switch (host_blkpg->op) {
5364     case BLKPG_ADD_PARTITION:
5365     case BLKPG_DEL_PARTITION:
5366         /* payload is struct blkpg_partition */
5367         break;
5368     default:
5369         /* Unknown opcode */
5370         ret = -TARGET_EINVAL;
5371         goto out;
5372     }
5373 
5374     /* Read and convert blkpg->data */
5375     arg = (abi_long)(uintptr_t)host_blkpg->data;
5376     target_size = thunk_type_size(part_arg_type, 0);
5377     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5378     if (!argptr) {
5379         ret = -TARGET_EFAULT;
5380         goto out;
5381     }
5382     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5383     unlock_user(argptr, arg, 0);
5384 
5385     /* Swizzle the data pointer to our local copy and call! */
5386     host_blkpg->data = &host_part;
5387     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5388 
5389 out:
5390     return ret;
5391 }
5392 
5393 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5394                                 int fd, int cmd, abi_long arg)
5395 {
5396     const argtype *arg_type = ie->arg_type;
5397     const StructEntry *se;
5398     const argtype *field_types;
5399     const int *dst_offsets, *src_offsets;
5400     int target_size;
5401     void *argptr;
5402     abi_ulong *target_rt_dev_ptr = NULL;
5403     unsigned long *host_rt_dev_ptr = NULL;
5404     abi_long ret;
5405     int i;
5406 
5407     assert(ie->access == IOC_W);
5408     assert(*arg_type == TYPE_PTR);
5409     arg_type++;
5410     assert(*arg_type == TYPE_STRUCT);
5411     target_size = thunk_type_size(arg_type, 0);
5412     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5413     if (!argptr) {
5414         return -TARGET_EFAULT;
5415     }
5416     arg_type++;
5417     assert(*arg_type == (int)STRUCT_rtentry);
5418     se = struct_entries + *arg_type++;
5419     assert(se->convert[0] == NULL);
5420     /* convert struct here to be able to catch rt_dev string */
5421     field_types = se->field_types;
5422     dst_offsets = se->field_offsets[THUNK_HOST];
5423     src_offsets = se->field_offsets[THUNK_TARGET];
5424     for (i = 0; i < se->nb_fields; i++) {
5425         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5426             assert(*field_types == TYPE_PTRVOID);
5427             target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
5428             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5429             if (*target_rt_dev_ptr != 0) {
5430                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5431                                                   tswapal(*target_rt_dev_ptr));
5432                 if (!*host_rt_dev_ptr) {
5433                     unlock_user(argptr, arg, 0);
5434                     return -TARGET_EFAULT;
5435                 }
5436             } else {
5437                 *host_rt_dev_ptr = 0;
5438             }
5439             field_types++;
5440             continue;
5441         }
5442         field_types = thunk_convert(buf_temp + dst_offsets[i],
5443                                     argptr + src_offsets[i],
5444                                     field_types, THUNK_HOST);
5445     }
5446     unlock_user(argptr, arg, 0);
5447 
5448     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5449 
5450     assert(host_rt_dev_ptr != NULL);
5451     assert(target_rt_dev_ptr != NULL);
5452     if (*host_rt_dev_ptr != 0) {
5453         unlock_user((void *)*host_rt_dev_ptr,
5454                     *target_rt_dev_ptr, 0);
5455     }
5456     return ret;
5457 }
5458 
5459 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5460                                      int fd, int cmd, abi_long arg)
5461 {
5462     int sig = target_to_host_signal(arg);
5463     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5464 }
5465 
5466 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5467                                     int fd, int cmd, abi_long arg)
5468 {
5469     struct timeval tv;
5470     abi_long ret;
5471 
5472     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5473     if (is_error(ret)) {
5474         return ret;
5475     }
5476 
5477     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5478         if (copy_to_user_timeval(arg, &tv)) {
5479             return -TARGET_EFAULT;
5480         }
5481     } else {
5482         if (copy_to_user_timeval64(arg, &tv)) {
5483             return -TARGET_EFAULT;
5484         }
5485     }
5486 
5487     return ret;
5488 }
5489 
5490 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5491                                       int fd, int cmd, abi_long arg)
5492 {
5493     struct timespec ts;
5494     abi_long ret;
5495 
5496     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5497     if (is_error(ret)) {
5498         return ret;
5499     }
5500 
5501     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5502         if (host_to_target_timespec(arg, &ts)) {
5503             return -TARGET_EFAULT;
5504         }
5505     } else{
5506         if (host_to_target_timespec64(arg, &ts)) {
5507             return -TARGET_EFAULT;
5508         }
5509     }
5510 
5511     return ret;
5512 }
5513 
5514 #ifdef TIOCGPTPEER
5515 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5516                                      int fd, int cmd, abi_long arg)
5517 {
5518     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5519     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5520 }
5521 #endif
5522 
5523 #ifdef HAVE_DRM_H
5524 
5525 static void unlock_drm_version(struct drm_version *host_ver,
5526                                struct target_drm_version *target_ver,
5527                                bool copy)
5528 {
5529     unlock_user(host_ver->name, target_ver->name,
5530                                 copy ? host_ver->name_len : 0);
5531     unlock_user(host_ver->date, target_ver->date,
5532                                 copy ? host_ver->date_len : 0);
5533     unlock_user(host_ver->desc, target_ver->desc,
5534                                 copy ? host_ver->desc_len : 0);
5535 }
5536 
5537 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5538                                           struct target_drm_version *target_ver)
5539 {
5540     memset(host_ver, 0, sizeof(*host_ver));
5541 
5542     __get_user(host_ver->name_len, &target_ver->name_len);
5543     if (host_ver->name_len) {
5544         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5545                                    target_ver->name_len, 0);
5546         if (!host_ver->name) {
5547             return -EFAULT;
5548         }
5549     }
5550 
5551     __get_user(host_ver->date_len, &target_ver->date_len);
5552     if (host_ver->date_len) {
5553         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5554                                    target_ver->date_len, 0);
5555         if (!host_ver->date) {
5556             goto err;
5557         }
5558     }
5559 
5560     __get_user(host_ver->desc_len, &target_ver->desc_len);
5561     if (host_ver->desc_len) {
5562         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5563                                    target_ver->desc_len, 0);
5564         if (!host_ver->desc) {
5565             goto err;
5566         }
5567     }
5568 
5569     return 0;
5570 err:
5571     unlock_drm_version(host_ver, target_ver, false);
5572     return -EFAULT;
5573 }
5574 
5575 static inline void host_to_target_drmversion(
5576                                           struct target_drm_version *target_ver,
5577                                           struct drm_version *host_ver)
5578 {
5579     __put_user(host_ver->version_major, &target_ver->version_major);
5580     __put_user(host_ver->version_minor, &target_ver->version_minor);
5581     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5582     __put_user(host_ver->name_len, &target_ver->name_len);
5583     __put_user(host_ver->date_len, &target_ver->date_len);
5584     __put_user(host_ver->desc_len, &target_ver->desc_len);
5585     unlock_drm_version(host_ver, target_ver, true);
5586 }
5587 
5588 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5589                              int fd, int cmd, abi_long arg)
5590 {
5591     struct drm_version *ver;
5592     struct target_drm_version *target_ver;
5593     abi_long ret;
5594 
5595     switch (ie->host_cmd) {
5596     case DRM_IOCTL_VERSION:
5597         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5598             return -TARGET_EFAULT;
5599         }
5600         ver = (struct drm_version *)buf_temp;
5601         ret = target_to_host_drmversion(ver, target_ver);
5602         if (!is_error(ret)) {
5603             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5604             if (is_error(ret)) {
5605                 unlock_drm_version(ver, target_ver, false);
5606             } else {
5607                 host_to_target_drmversion(target_ver, ver);
5608             }
5609         }
5610         unlock_user_struct(target_ver, arg, 0);
5611         return ret;
5612     }
5613     return -TARGET_ENOSYS;
5614 }
5615 
5616 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5617                                            struct drm_i915_getparam *gparam,
5618                                            int fd, abi_long arg)
5619 {
5620     abi_long ret;
5621     int value;
5622     struct target_drm_i915_getparam *target_gparam;
5623 
5624     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5625         return -TARGET_EFAULT;
5626     }
5627 
5628     __get_user(gparam->param, &target_gparam->param);
5629     gparam->value = &value;
5630     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5631     put_user_s32(value, target_gparam->value);
5632 
5633     unlock_user_struct(target_gparam, arg, 0);
5634     return ret;
5635 }
5636 
5637 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5638                                   int fd, int cmd, abi_long arg)
5639 {
5640     switch (ie->host_cmd) {
5641     case DRM_IOCTL_I915_GETPARAM:
5642         return do_ioctl_drm_i915_getparam(ie,
5643                                           (struct drm_i915_getparam *)buf_temp,
5644                                           fd, arg);
5645     default:
5646         return -TARGET_ENOSYS;
5647     }
5648 }
5649 
5650 #endif
5651 
5652 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5653                                         int fd, int cmd, abi_long arg)
5654 {
5655     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5656     struct tun_filter *target_filter;
5657     char *target_addr;
5658 
5659     assert(ie->access == IOC_W);
5660 
5661     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5662     if (!target_filter) {
5663         return -TARGET_EFAULT;
5664     }
5665     filter->flags = tswap16(target_filter->flags);
5666     filter->count = tswap16(target_filter->count);
5667     unlock_user(target_filter, arg, 0);
5668 
5669     if (filter->count) {
5670         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5671             MAX_STRUCT_SIZE) {
5672             return -TARGET_EFAULT;
5673         }
5674 
5675         target_addr = lock_user(VERIFY_READ,
5676                                 arg + offsetof(struct tun_filter, addr),
5677                                 filter->count * ETH_ALEN, 1);
5678         if (!target_addr) {
5679             return -TARGET_EFAULT;
5680         }
5681         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5682         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5683     }
5684 
5685     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5686 }
5687 
5688 IOCTLEntry ioctl_entries[] = {
5689 #define IOCTL(cmd, access, ...) \
5690     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5691 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5692     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5693 #define IOCTL_IGNORE(cmd) \
5694     { TARGET_ ## cmd, 0, #cmd },
5695 #include "ioctls.h"
5696     { 0, 0, },
5697 };
5698 
5699 /* ??? Implement proper locking for ioctls.  */
5700 /* do_ioctl() Must return target values and target errnos. */
5701 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5702 {
5703     const IOCTLEntry *ie;
5704     const argtype *arg_type;
5705     abi_long ret;
5706     uint8_t buf_temp[MAX_STRUCT_SIZE];
5707     int target_size;
5708     void *argptr;
5709 
5710     ie = ioctl_entries;
5711     for(;;) {
5712         if (ie->target_cmd == 0) {
5713             qemu_log_mask(
5714                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5715             return -TARGET_ENOSYS;
5716         }
5717         if (ie->target_cmd == cmd)
5718             break;
5719         ie++;
5720     }
5721     arg_type = ie->arg_type;
5722     if (ie->do_ioctl) {
5723         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5724     } else if (!ie->host_cmd) {
5725         /* Some architectures define BSD ioctls in their headers
5726            that are not implemented in Linux.  */
5727         return -TARGET_ENOSYS;
5728     }
5729 
5730     switch(arg_type[0]) {
5731     case TYPE_NULL:
5732         /* no argument */
5733         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5734         break;
5735     case TYPE_PTRVOID:
5736     case TYPE_INT:
5737     case TYPE_LONG:
5738     case TYPE_ULONG:
5739         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5740         break;
5741     case TYPE_PTR:
5742         arg_type++;
5743         target_size = thunk_type_size(arg_type, 0);
5744         switch(ie->access) {
5745         case IOC_R:
5746             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5747             if (!is_error(ret)) {
5748                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5749                 if (!argptr)
5750                     return -TARGET_EFAULT;
5751                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5752                 unlock_user(argptr, arg, target_size);
5753             }
5754             break;
5755         case IOC_W:
5756             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5757             if (!argptr)
5758                 return -TARGET_EFAULT;
5759             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5760             unlock_user(argptr, arg, 0);
5761             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5762             break;
5763         default:
5764         case IOC_RW:
5765             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5766             if (!argptr)
5767                 return -TARGET_EFAULT;
5768             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5769             unlock_user(argptr, arg, 0);
5770             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5771             if (!is_error(ret)) {
5772                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5773                 if (!argptr)
5774                     return -TARGET_EFAULT;
5775                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5776                 unlock_user(argptr, arg, target_size);
5777             }
5778             break;
5779         }
5780         break;
5781     default:
5782         qemu_log_mask(LOG_UNIMP,
5783                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5784                       (long)cmd, arg_type[0]);
5785         ret = -TARGET_ENOSYS;
5786         break;
5787     }
5788     return ret;
5789 }
5790 
5791 static const bitmask_transtbl iflag_tbl[] = {
5792         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5793         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5794         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5795         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5796         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5797         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5798         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5799         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5800         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5801         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5802         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5803         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5804         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5805         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5806         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5807         { 0, 0, 0, 0 }
5808 };
5809 
5810 static const bitmask_transtbl oflag_tbl[] = {
5811 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5812 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5813 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5814 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5815 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5816 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5817 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5818 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5819 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5820 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5821 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5822 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5823 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5824 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5825 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5826 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5827 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5828 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5829 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5830 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5831 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5832 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5833 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5834 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5835 	{ 0, 0, 0, 0 }
5836 };
5837 
5838 static const bitmask_transtbl cflag_tbl[] = {
5839 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5840 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5841 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5842 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5843 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5844 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5845 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5846 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5847 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5848 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5849 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5850 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5851 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5852 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5853 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5854 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5855 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5856 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5857 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5858 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5859 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5860 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5861 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5862 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5863 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5864 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5865 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5866 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5867 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5868 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5869 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5870 	{ 0, 0, 0, 0 }
5871 };
5872 
5873 static const bitmask_transtbl lflag_tbl[] = {
5874   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5875   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5876   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5877   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5878   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5879   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5880   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5881   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5882   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5883   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5884   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5885   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5886   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5887   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5888   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5889   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5890   { 0, 0, 0, 0 }
5891 };
5892 
5893 static void target_to_host_termios (void *dst, const void *src)
5894 {
5895     struct host_termios *host = dst;
5896     const struct target_termios *target = src;
5897 
5898     host->c_iflag =
5899         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5900     host->c_oflag =
5901         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5902     host->c_cflag =
5903         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5904     host->c_lflag =
5905         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5906     host->c_line = target->c_line;
5907 
5908     memset(host->c_cc, 0, sizeof(host->c_cc));
5909     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5910     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5911     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5912     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5913     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5914     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5915     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5916     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5917     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5918     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5919     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5920     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5921     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5922     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5923     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5924     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5925     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5926 }
5927 
5928 static void host_to_target_termios (void *dst, const void *src)
5929 {
5930     struct target_termios *target = dst;
5931     const struct host_termios *host = src;
5932 
5933     target->c_iflag =
5934         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5935     target->c_oflag =
5936         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5937     target->c_cflag =
5938         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5939     target->c_lflag =
5940         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5941     target->c_line = host->c_line;
5942 
5943     memset(target->c_cc, 0, sizeof(target->c_cc));
5944     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5945     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5946     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5947     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5948     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5949     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5950     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5951     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5952     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5953     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5954     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5955     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5956     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5957     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5958     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5959     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5960     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5961 }
5962 
5963 static const StructEntry struct_termios_def = {
5964     .convert = { host_to_target_termios, target_to_host_termios },
5965     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5966     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5967     .print = print_termios,
5968 };
5969 
5970 static const bitmask_transtbl mmap_flags_tbl[] = {
5971     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
5972     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
5973     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5974     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5975       MAP_ANONYMOUS, MAP_ANONYMOUS },
5976     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5977       MAP_GROWSDOWN, MAP_GROWSDOWN },
5978     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5979       MAP_DENYWRITE, MAP_DENYWRITE },
5980     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5981       MAP_EXECUTABLE, MAP_EXECUTABLE },
5982     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5983     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5984       MAP_NORESERVE, MAP_NORESERVE },
5985     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5986     /* MAP_STACK had been ignored by the kernel for quite some time.
5987        Recognize it for the target insofar as we do not want to pass
5988        it through to the host.  */
5989     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5990     { 0, 0, 0, 0 }
5991 };
5992 
5993 /*
5994  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5995  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5996  */
5997 #if defined(TARGET_I386)
5998 
5999 /* NOTE: there is really one LDT for all the threads */
6000 static uint8_t *ldt_table;
6001 
6002 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6003 {
6004     int size;
6005     void *p;
6006 
6007     if (!ldt_table)
6008         return 0;
6009     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6010     if (size > bytecount)
6011         size = bytecount;
6012     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6013     if (!p)
6014         return -TARGET_EFAULT;
6015     /* ??? Should this by byteswapped?  */
6016     memcpy(p, ldt_table, size);
6017     unlock_user(p, ptr, size);
6018     return size;
6019 }
6020 
6021 /* XXX: add locking support */
6022 static abi_long write_ldt(CPUX86State *env,
6023                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6024 {
6025     struct target_modify_ldt_ldt_s ldt_info;
6026     struct target_modify_ldt_ldt_s *target_ldt_info;
6027     int seg_32bit, contents, read_exec_only, limit_in_pages;
6028     int seg_not_present, useable, lm;
6029     uint32_t *lp, entry_1, entry_2;
6030 
6031     if (bytecount != sizeof(ldt_info))
6032         return -TARGET_EINVAL;
6033     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6034         return -TARGET_EFAULT;
6035     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6036     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6037     ldt_info.limit = tswap32(target_ldt_info->limit);
6038     ldt_info.flags = tswap32(target_ldt_info->flags);
6039     unlock_user_struct(target_ldt_info, ptr, 0);
6040 
6041     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6042         return -TARGET_EINVAL;
6043     seg_32bit = ldt_info.flags & 1;
6044     contents = (ldt_info.flags >> 1) & 3;
6045     read_exec_only = (ldt_info.flags >> 3) & 1;
6046     limit_in_pages = (ldt_info.flags >> 4) & 1;
6047     seg_not_present = (ldt_info.flags >> 5) & 1;
6048     useable = (ldt_info.flags >> 6) & 1;
6049 #ifdef TARGET_ABI32
6050     lm = 0;
6051 #else
6052     lm = (ldt_info.flags >> 7) & 1;
6053 #endif
6054     if (contents == 3) {
6055         if (oldmode)
6056             return -TARGET_EINVAL;
6057         if (seg_not_present == 0)
6058             return -TARGET_EINVAL;
6059     }
6060     /* allocate the LDT */
6061     if (!ldt_table) {
6062         env->ldt.base = target_mmap(0,
6063                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6064                                     PROT_READ|PROT_WRITE,
6065                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6066         if (env->ldt.base == -1)
6067             return -TARGET_ENOMEM;
6068         memset(g2h_untagged(env->ldt.base), 0,
6069                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6070         env->ldt.limit = 0xffff;
6071         ldt_table = g2h_untagged(env->ldt.base);
6072     }
6073 
6074     /* NOTE: same code as Linux kernel */
6075     /* Allow LDTs to be cleared by the user. */
6076     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6077         if (oldmode ||
6078             (contents == 0		&&
6079              read_exec_only == 1	&&
6080              seg_32bit == 0		&&
6081              limit_in_pages == 0	&&
6082              seg_not_present == 1	&&
6083              useable == 0 )) {
6084             entry_1 = 0;
6085             entry_2 = 0;
6086             goto install;
6087         }
6088     }
6089 
6090     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6091         (ldt_info.limit & 0x0ffff);
6092     entry_2 = (ldt_info.base_addr & 0xff000000) |
6093         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6094         (ldt_info.limit & 0xf0000) |
6095         ((read_exec_only ^ 1) << 9) |
6096         (contents << 10) |
6097         ((seg_not_present ^ 1) << 15) |
6098         (seg_32bit << 22) |
6099         (limit_in_pages << 23) |
6100         (lm << 21) |
6101         0x7000;
6102     if (!oldmode)
6103         entry_2 |= (useable << 20);
6104 
6105     /* Install the new entry ...  */
6106 install:
6107     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6108     lp[0] = tswap32(entry_1);
6109     lp[1] = tswap32(entry_2);
6110     return 0;
6111 }
6112 
6113 /* specific and weird i386 syscalls */
6114 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6115                               unsigned long bytecount)
6116 {
6117     abi_long ret;
6118 
6119     switch (func) {
6120     case 0:
6121         ret = read_ldt(ptr, bytecount);
6122         break;
6123     case 1:
6124         ret = write_ldt(env, ptr, bytecount, 1);
6125         break;
6126     case 0x11:
6127         ret = write_ldt(env, ptr, bytecount, 0);
6128         break;
6129     default:
6130         ret = -TARGET_ENOSYS;
6131         break;
6132     }
6133     return ret;
6134 }
6135 
6136 #if defined(TARGET_ABI32)
6137 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6138 {
6139     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6140     struct target_modify_ldt_ldt_s ldt_info;
6141     struct target_modify_ldt_ldt_s *target_ldt_info;
6142     int seg_32bit, contents, read_exec_only, limit_in_pages;
6143     int seg_not_present, useable, lm;
6144     uint32_t *lp, entry_1, entry_2;
6145     int i;
6146 
6147     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6148     if (!target_ldt_info)
6149         return -TARGET_EFAULT;
6150     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6151     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6152     ldt_info.limit = tswap32(target_ldt_info->limit);
6153     ldt_info.flags = tswap32(target_ldt_info->flags);
6154     if (ldt_info.entry_number == -1) {
6155         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6156             if (gdt_table[i] == 0) {
6157                 ldt_info.entry_number = i;
6158                 target_ldt_info->entry_number = tswap32(i);
6159                 break;
6160             }
6161         }
6162     }
6163     unlock_user_struct(target_ldt_info, ptr, 1);
6164 
6165     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6166         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6167            return -TARGET_EINVAL;
6168     seg_32bit = ldt_info.flags & 1;
6169     contents = (ldt_info.flags >> 1) & 3;
6170     read_exec_only = (ldt_info.flags >> 3) & 1;
6171     limit_in_pages = (ldt_info.flags >> 4) & 1;
6172     seg_not_present = (ldt_info.flags >> 5) & 1;
6173     useable = (ldt_info.flags >> 6) & 1;
6174 #ifdef TARGET_ABI32
6175     lm = 0;
6176 #else
6177     lm = (ldt_info.flags >> 7) & 1;
6178 #endif
6179 
6180     if (contents == 3) {
6181         if (seg_not_present == 0)
6182             return -TARGET_EINVAL;
6183     }
6184 
6185     /* NOTE: same code as Linux kernel */
6186     /* Allow LDTs to be cleared by the user. */
6187     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6188         if ((contents == 0             &&
6189              read_exec_only == 1       &&
6190              seg_32bit == 0            &&
6191              limit_in_pages == 0       &&
6192              seg_not_present == 1      &&
6193              useable == 0 )) {
6194             entry_1 = 0;
6195             entry_2 = 0;
6196             goto install;
6197         }
6198     }
6199 
6200     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6201         (ldt_info.limit & 0x0ffff);
6202     entry_2 = (ldt_info.base_addr & 0xff000000) |
6203         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6204         (ldt_info.limit & 0xf0000) |
6205         ((read_exec_only ^ 1) << 9) |
6206         (contents << 10) |
6207         ((seg_not_present ^ 1) << 15) |
6208         (seg_32bit << 22) |
6209         (limit_in_pages << 23) |
6210         (useable << 20) |
6211         (lm << 21) |
6212         0x7000;
6213 
6214     /* Install the new entry ...  */
6215 install:
6216     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6217     lp[0] = tswap32(entry_1);
6218     lp[1] = tswap32(entry_2);
6219     return 0;
6220 }
6221 
6222 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6223 {
6224     struct target_modify_ldt_ldt_s *target_ldt_info;
6225     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6226     uint32_t base_addr, limit, flags;
6227     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6228     int seg_not_present, useable, lm;
6229     uint32_t *lp, entry_1, entry_2;
6230 
6231     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6232     if (!target_ldt_info)
6233         return -TARGET_EFAULT;
6234     idx = tswap32(target_ldt_info->entry_number);
6235     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6236         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6237         unlock_user_struct(target_ldt_info, ptr, 1);
6238         return -TARGET_EINVAL;
6239     }
6240     lp = (uint32_t *)(gdt_table + idx);
6241     entry_1 = tswap32(lp[0]);
6242     entry_2 = tswap32(lp[1]);
6243 
6244     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6245     contents = (entry_2 >> 10) & 3;
6246     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6247     seg_32bit = (entry_2 >> 22) & 1;
6248     limit_in_pages = (entry_2 >> 23) & 1;
6249     useable = (entry_2 >> 20) & 1;
6250 #ifdef TARGET_ABI32
6251     lm = 0;
6252 #else
6253     lm = (entry_2 >> 21) & 1;
6254 #endif
6255     flags = (seg_32bit << 0) | (contents << 1) |
6256         (read_exec_only << 3) | (limit_in_pages << 4) |
6257         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6258     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6259     base_addr = (entry_1 >> 16) |
6260         (entry_2 & 0xff000000) |
6261         ((entry_2 & 0xff) << 16);
6262     target_ldt_info->base_addr = tswapal(base_addr);
6263     target_ldt_info->limit = tswap32(limit);
6264     target_ldt_info->flags = tswap32(flags);
6265     unlock_user_struct(target_ldt_info, ptr, 1);
6266     return 0;
6267 }
6268 
6269 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6270 {
6271     return -TARGET_ENOSYS;
6272 }
6273 #else
6274 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6275 {
6276     abi_long ret = 0;
6277     abi_ulong val;
6278     int idx;
6279 
6280     switch(code) {
6281     case TARGET_ARCH_SET_GS:
6282     case TARGET_ARCH_SET_FS:
6283         if (code == TARGET_ARCH_SET_GS)
6284             idx = R_GS;
6285         else
6286             idx = R_FS;
6287         cpu_x86_load_seg(env, idx, 0);
6288         env->segs[idx].base = addr;
6289         break;
6290     case TARGET_ARCH_GET_GS:
6291     case TARGET_ARCH_GET_FS:
6292         if (code == TARGET_ARCH_GET_GS)
6293             idx = R_GS;
6294         else
6295             idx = R_FS;
6296         val = env->segs[idx].base;
6297         if (put_user(val, addr, abi_ulong))
6298             ret = -TARGET_EFAULT;
6299         break;
6300     default:
6301         ret = -TARGET_EINVAL;
6302         break;
6303     }
6304     return ret;
6305 }
6306 #endif /* defined(TARGET_ABI32 */
6307 #endif /* defined(TARGET_I386) */
6308 
6309 /*
6310  * These constants are generic.  Supply any that are missing from the host.
6311  */
6312 #ifndef PR_SET_NAME
6313 # define PR_SET_NAME    15
6314 # define PR_GET_NAME    16
6315 #endif
6316 #ifndef PR_SET_FP_MODE
6317 # define PR_SET_FP_MODE 45
6318 # define PR_GET_FP_MODE 46
6319 # define PR_FP_MODE_FR   (1 << 0)
6320 # define PR_FP_MODE_FRE  (1 << 1)
6321 #endif
6322 #ifndef PR_SVE_SET_VL
6323 # define PR_SVE_SET_VL  50
6324 # define PR_SVE_GET_VL  51
6325 # define PR_SVE_VL_LEN_MASK  0xffff
6326 # define PR_SVE_VL_INHERIT   (1 << 17)
6327 #endif
6328 #ifndef PR_PAC_RESET_KEYS
6329 # define PR_PAC_RESET_KEYS  54
6330 # define PR_PAC_APIAKEY   (1 << 0)
6331 # define PR_PAC_APIBKEY   (1 << 1)
6332 # define PR_PAC_APDAKEY   (1 << 2)
6333 # define PR_PAC_APDBKEY   (1 << 3)
6334 # define PR_PAC_APGAKEY   (1 << 4)
6335 #endif
6336 #ifndef PR_SET_TAGGED_ADDR_CTRL
6337 # define PR_SET_TAGGED_ADDR_CTRL 55
6338 # define PR_GET_TAGGED_ADDR_CTRL 56
6339 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6340 #endif
6341 #ifndef PR_MTE_TCF_SHIFT
6342 # define PR_MTE_TCF_SHIFT       1
6343 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6344 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6345 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6346 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6347 # define PR_MTE_TAG_SHIFT       3
6348 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6349 #endif
6350 #ifndef PR_SET_IO_FLUSHER
6351 # define PR_SET_IO_FLUSHER 57
6352 # define PR_GET_IO_FLUSHER 58
6353 #endif
6354 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6355 # define PR_SET_SYSCALL_USER_DISPATCH 59
6356 #endif
6357 
6358 #include "target_prctl.h"
6359 
6360 static abi_long do_prctl_inval0(CPUArchState *env)
6361 {
6362     return -TARGET_EINVAL;
6363 }
6364 
6365 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6366 {
6367     return -TARGET_EINVAL;
6368 }
6369 
6370 #ifndef do_prctl_get_fp_mode
6371 #define do_prctl_get_fp_mode do_prctl_inval0
6372 #endif
6373 #ifndef do_prctl_set_fp_mode
6374 #define do_prctl_set_fp_mode do_prctl_inval1
6375 #endif
6376 #ifndef do_prctl_get_vl
6377 #define do_prctl_get_vl do_prctl_inval0
6378 #endif
6379 #ifndef do_prctl_set_vl
6380 #define do_prctl_set_vl do_prctl_inval1
6381 #endif
6382 #ifndef do_prctl_reset_keys
6383 #define do_prctl_reset_keys do_prctl_inval1
6384 #endif
6385 #ifndef do_prctl_set_tagged_addr_ctrl
6386 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6387 #endif
6388 #ifndef do_prctl_get_tagged_addr_ctrl
6389 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6390 #endif
6391 #ifndef do_prctl_get_unalign
6392 #define do_prctl_get_unalign do_prctl_inval1
6393 #endif
6394 #ifndef do_prctl_set_unalign
6395 #define do_prctl_set_unalign do_prctl_inval1
6396 #endif
6397 
6398 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6399                          abi_long arg3, abi_long arg4, abi_long arg5)
6400 {
6401     abi_long ret;
6402 
6403     switch (option) {
6404     case PR_GET_PDEATHSIG:
6405         {
6406             int deathsig;
6407             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6408                                   arg3, arg4, arg5));
6409             if (!is_error(ret) &&
6410                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6411                 return -TARGET_EFAULT;
6412             }
6413             return ret;
6414         }
6415     case PR_SET_PDEATHSIG:
6416         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6417                                arg3, arg4, arg5));
6418     case PR_GET_NAME:
6419         {
6420             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6421             if (!name) {
6422                 return -TARGET_EFAULT;
6423             }
6424             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6425                                   arg3, arg4, arg5));
6426             unlock_user(name, arg2, 16);
6427             return ret;
6428         }
6429     case PR_SET_NAME:
6430         {
6431             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6432             if (!name) {
6433                 return -TARGET_EFAULT;
6434             }
6435             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6436                                   arg3, arg4, arg5));
6437             unlock_user(name, arg2, 0);
6438             return ret;
6439         }
6440     case PR_GET_FP_MODE:
6441         return do_prctl_get_fp_mode(env);
6442     case PR_SET_FP_MODE:
6443         return do_prctl_set_fp_mode(env, arg2);
6444     case PR_SVE_GET_VL:
6445         return do_prctl_get_vl(env);
6446     case PR_SVE_SET_VL:
6447         return do_prctl_set_vl(env, arg2);
6448     case PR_PAC_RESET_KEYS:
6449         if (arg3 || arg4 || arg5) {
6450             return -TARGET_EINVAL;
6451         }
6452         return do_prctl_reset_keys(env, arg2);
6453     case PR_SET_TAGGED_ADDR_CTRL:
6454         if (arg3 || arg4 || arg5) {
6455             return -TARGET_EINVAL;
6456         }
6457         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6458     case PR_GET_TAGGED_ADDR_CTRL:
6459         if (arg2 || arg3 || arg4 || arg5) {
6460             return -TARGET_EINVAL;
6461         }
6462         return do_prctl_get_tagged_addr_ctrl(env);
6463 
6464     case PR_GET_UNALIGN:
6465         return do_prctl_get_unalign(env, arg2);
6466     case PR_SET_UNALIGN:
6467         return do_prctl_set_unalign(env, arg2);
6468 
6469     case PR_CAP_AMBIENT:
6470     case PR_CAPBSET_READ:
6471     case PR_CAPBSET_DROP:
6472     case PR_GET_DUMPABLE:
6473     case PR_SET_DUMPABLE:
6474     case PR_GET_KEEPCAPS:
6475     case PR_SET_KEEPCAPS:
6476     case PR_GET_SECUREBITS:
6477     case PR_SET_SECUREBITS:
6478     case PR_GET_TIMING:
6479     case PR_SET_TIMING:
6480     case PR_GET_TIMERSLACK:
6481     case PR_SET_TIMERSLACK:
6482     case PR_MCE_KILL:
6483     case PR_MCE_KILL_GET:
6484     case PR_GET_NO_NEW_PRIVS:
6485     case PR_SET_NO_NEW_PRIVS:
6486     case PR_GET_IO_FLUSHER:
6487     case PR_SET_IO_FLUSHER:
6488         /* Some prctl options have no pointer arguments and we can pass on. */
6489         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6490 
6491     case PR_GET_CHILD_SUBREAPER:
6492     case PR_SET_CHILD_SUBREAPER:
6493     case PR_GET_SPECULATION_CTRL:
6494     case PR_SET_SPECULATION_CTRL:
6495     case PR_GET_TID_ADDRESS:
6496         /* TODO */
6497         return -TARGET_EINVAL;
6498 
6499     case PR_GET_FPEXC:
6500     case PR_SET_FPEXC:
6501         /* Was used for SPE on PowerPC. */
6502         return -TARGET_EINVAL;
6503 
6504     case PR_GET_ENDIAN:
6505     case PR_SET_ENDIAN:
6506     case PR_GET_FPEMU:
6507     case PR_SET_FPEMU:
6508     case PR_SET_MM:
6509     case PR_GET_SECCOMP:
6510     case PR_SET_SECCOMP:
6511     case PR_SET_SYSCALL_USER_DISPATCH:
6512     case PR_GET_THP_DISABLE:
6513     case PR_SET_THP_DISABLE:
6514     case PR_GET_TSC:
6515     case PR_SET_TSC:
6516         /* Disable to prevent the target disabling stuff we need. */
6517         return -TARGET_EINVAL;
6518 
6519     default:
6520         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6521                       option);
6522         return -TARGET_EINVAL;
6523     }
6524 }
6525 
6526 #define NEW_STACK_SIZE 0x40000
6527 
6528 
6529 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6530 typedef struct {
6531     CPUArchState *env;
6532     pthread_mutex_t mutex;
6533     pthread_cond_t cond;
6534     pthread_t thread;
6535     uint32_t tid;
6536     abi_ulong child_tidptr;
6537     abi_ulong parent_tidptr;
6538     sigset_t sigmask;
6539 } new_thread_info;
6540 
6541 static void *clone_func(void *arg)
6542 {
6543     new_thread_info *info = arg;
6544     CPUArchState *env;
6545     CPUState *cpu;
6546     TaskState *ts;
6547 
6548     rcu_register_thread();
6549     tcg_register_thread();
6550     env = info->env;
6551     cpu = env_cpu(env);
6552     thread_cpu = cpu;
6553     ts = (TaskState *)cpu->opaque;
6554     info->tid = sys_gettid();
6555     task_settid(ts);
6556     if (info->child_tidptr)
6557         put_user_u32(info->tid, info->child_tidptr);
6558     if (info->parent_tidptr)
6559         put_user_u32(info->tid, info->parent_tidptr);
6560     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6561     /* Enable signals.  */
6562     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6563     /* Signal to the parent that we're ready.  */
6564     pthread_mutex_lock(&info->mutex);
6565     pthread_cond_broadcast(&info->cond);
6566     pthread_mutex_unlock(&info->mutex);
6567     /* Wait until the parent has finished initializing the tls state.  */
6568     pthread_mutex_lock(&clone_lock);
6569     pthread_mutex_unlock(&clone_lock);
6570     cpu_loop(env);
6571     /* never exits */
6572     return NULL;
6573 }
6574 
6575 /* do_fork() Must return host values and target errnos (unlike most
6576    do_*() functions). */
6577 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6578                    abi_ulong parent_tidptr, target_ulong newtls,
6579                    abi_ulong child_tidptr)
6580 {
6581     CPUState *cpu = env_cpu(env);
6582     int ret;
6583     TaskState *ts;
6584     CPUState *new_cpu;
6585     CPUArchState *new_env;
6586     sigset_t sigmask;
6587 
6588     flags &= ~CLONE_IGNORED_FLAGS;
6589 
6590     /* Emulate vfork() with fork() */
6591     if (flags & CLONE_VFORK)
6592         flags &= ~(CLONE_VFORK | CLONE_VM);
6593 
6594     if (flags & CLONE_VM) {
6595         TaskState *parent_ts = (TaskState *)cpu->opaque;
6596         new_thread_info info;
6597         pthread_attr_t attr;
6598 
6599         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6600             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6601             return -TARGET_EINVAL;
6602         }
6603 
6604         ts = g_new0(TaskState, 1);
6605         init_task_state(ts);
6606 
6607         /* Grab a mutex so that thread setup appears atomic.  */
6608         pthread_mutex_lock(&clone_lock);
6609 
6610         /*
6611          * If this is our first additional thread, we need to ensure we
6612          * generate code for parallel execution and flush old translations.
6613          * Do this now so that the copy gets CF_PARALLEL too.
6614          */
6615         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6616             cpu->tcg_cflags |= CF_PARALLEL;
6617             tb_flush(cpu);
6618         }
6619 
6620         /* we create a new CPU instance. */
6621         new_env = cpu_copy(env);
6622         /* Init regs that differ from the parent.  */
6623         cpu_clone_regs_child(new_env, newsp, flags);
6624         cpu_clone_regs_parent(env, flags);
6625         new_cpu = env_cpu(new_env);
6626         new_cpu->opaque = ts;
6627         ts->bprm = parent_ts->bprm;
6628         ts->info = parent_ts->info;
6629         ts->signal_mask = parent_ts->signal_mask;
6630 
6631         if (flags & CLONE_CHILD_CLEARTID) {
6632             ts->child_tidptr = child_tidptr;
6633         }
6634 
6635         if (flags & CLONE_SETTLS) {
6636             cpu_set_tls (new_env, newtls);
6637         }
6638 
6639         memset(&info, 0, sizeof(info));
6640         pthread_mutex_init(&info.mutex, NULL);
6641         pthread_mutex_lock(&info.mutex);
6642         pthread_cond_init(&info.cond, NULL);
6643         info.env = new_env;
6644         if (flags & CLONE_CHILD_SETTID) {
6645             info.child_tidptr = child_tidptr;
6646         }
6647         if (flags & CLONE_PARENT_SETTID) {
6648             info.parent_tidptr = parent_tidptr;
6649         }
6650 
6651         ret = pthread_attr_init(&attr);
6652         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6653         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6654         /* It is not safe to deliver signals until the child has finished
6655            initializing, so temporarily block all signals.  */
6656         sigfillset(&sigmask);
6657         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6658         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6659 
6660         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6661         /* TODO: Free new CPU state if thread creation failed.  */
6662 
6663         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6664         pthread_attr_destroy(&attr);
6665         if (ret == 0) {
6666             /* Wait for the child to initialize.  */
6667             pthread_cond_wait(&info.cond, &info.mutex);
6668             ret = info.tid;
6669         } else {
6670             ret = -1;
6671         }
6672         pthread_mutex_unlock(&info.mutex);
6673         pthread_cond_destroy(&info.cond);
6674         pthread_mutex_destroy(&info.mutex);
6675         pthread_mutex_unlock(&clone_lock);
6676     } else {
6677         /* if no CLONE_VM, we consider it is a fork */
6678         if (flags & CLONE_INVALID_FORK_FLAGS) {
6679             return -TARGET_EINVAL;
6680         }
6681 
6682         /* We can't support custom termination signals */
6683         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6684             return -TARGET_EINVAL;
6685         }
6686 
6687         if (block_signals()) {
6688             return -QEMU_ERESTARTSYS;
6689         }
6690 
6691         fork_start();
6692         ret = fork();
6693         if (ret == 0) {
6694             /* Child Process.  */
6695             cpu_clone_regs_child(env, newsp, flags);
6696             fork_end(1);
6697             /* There is a race condition here.  The parent process could
6698                theoretically read the TID in the child process before the child
6699                tid is set.  This would require using either ptrace
6700                (not implemented) or having *_tidptr to point at a shared memory
6701                mapping.  We can't repeat the spinlock hack used above because
6702                the child process gets its own copy of the lock.  */
6703             if (flags & CLONE_CHILD_SETTID)
6704                 put_user_u32(sys_gettid(), child_tidptr);
6705             if (flags & CLONE_PARENT_SETTID)
6706                 put_user_u32(sys_gettid(), parent_tidptr);
6707             ts = (TaskState *)cpu->opaque;
6708             if (flags & CLONE_SETTLS)
6709                 cpu_set_tls (env, newtls);
6710             if (flags & CLONE_CHILD_CLEARTID)
6711                 ts->child_tidptr = child_tidptr;
6712         } else {
6713             cpu_clone_regs_parent(env, flags);
6714             fork_end(0);
6715         }
6716     }
6717     return ret;
6718 }
6719 
6720 /* warning : doesn't handle linux specific flags... */
6721 static int target_to_host_fcntl_cmd(int cmd)
6722 {
6723     int ret;
6724 
6725     switch(cmd) {
6726     case TARGET_F_DUPFD:
6727     case TARGET_F_GETFD:
6728     case TARGET_F_SETFD:
6729     case TARGET_F_GETFL:
6730     case TARGET_F_SETFL:
6731     case TARGET_F_OFD_GETLK:
6732     case TARGET_F_OFD_SETLK:
6733     case TARGET_F_OFD_SETLKW:
6734         ret = cmd;
6735         break;
6736     case TARGET_F_GETLK:
6737         ret = F_GETLK64;
6738         break;
6739     case TARGET_F_SETLK:
6740         ret = F_SETLK64;
6741         break;
6742     case TARGET_F_SETLKW:
6743         ret = F_SETLKW64;
6744         break;
6745     case TARGET_F_GETOWN:
6746         ret = F_GETOWN;
6747         break;
6748     case TARGET_F_SETOWN:
6749         ret = F_SETOWN;
6750         break;
6751     case TARGET_F_GETSIG:
6752         ret = F_GETSIG;
6753         break;
6754     case TARGET_F_SETSIG:
6755         ret = F_SETSIG;
6756         break;
6757 #if TARGET_ABI_BITS == 32
6758     case TARGET_F_GETLK64:
6759         ret = F_GETLK64;
6760         break;
6761     case TARGET_F_SETLK64:
6762         ret = F_SETLK64;
6763         break;
6764     case TARGET_F_SETLKW64:
6765         ret = F_SETLKW64;
6766         break;
6767 #endif
6768     case TARGET_F_SETLEASE:
6769         ret = F_SETLEASE;
6770         break;
6771     case TARGET_F_GETLEASE:
6772         ret = F_GETLEASE;
6773         break;
6774 #ifdef F_DUPFD_CLOEXEC
6775     case TARGET_F_DUPFD_CLOEXEC:
6776         ret = F_DUPFD_CLOEXEC;
6777         break;
6778 #endif
6779     case TARGET_F_NOTIFY:
6780         ret = F_NOTIFY;
6781         break;
6782 #ifdef F_GETOWN_EX
6783     case TARGET_F_GETOWN_EX:
6784         ret = F_GETOWN_EX;
6785         break;
6786 #endif
6787 #ifdef F_SETOWN_EX
6788     case TARGET_F_SETOWN_EX:
6789         ret = F_SETOWN_EX;
6790         break;
6791 #endif
6792 #ifdef F_SETPIPE_SZ
6793     case TARGET_F_SETPIPE_SZ:
6794         ret = F_SETPIPE_SZ;
6795         break;
6796     case TARGET_F_GETPIPE_SZ:
6797         ret = F_GETPIPE_SZ;
6798         break;
6799 #endif
6800 #ifdef F_ADD_SEALS
6801     case TARGET_F_ADD_SEALS:
6802         ret = F_ADD_SEALS;
6803         break;
6804     case TARGET_F_GET_SEALS:
6805         ret = F_GET_SEALS;
6806         break;
6807 #endif
6808     default:
6809         ret = -TARGET_EINVAL;
6810         break;
6811     }
6812 
6813 #if defined(__powerpc64__)
6814     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6815      * is not supported by kernel. The glibc fcntl call actually adjusts
6816      * them to 5, 6 and 7 before making the syscall(). Since we make the
6817      * syscall directly, adjust to what is supported by the kernel.
6818      */
6819     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6820         ret -= F_GETLK64 - 5;
6821     }
6822 #endif
6823 
6824     return ret;
6825 }
6826 
6827 #define FLOCK_TRANSTBL \
6828     switch (type) { \
6829     TRANSTBL_CONVERT(F_RDLCK); \
6830     TRANSTBL_CONVERT(F_WRLCK); \
6831     TRANSTBL_CONVERT(F_UNLCK); \
6832     }
6833 
6834 static int target_to_host_flock(int type)
6835 {
6836 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6837     FLOCK_TRANSTBL
6838 #undef  TRANSTBL_CONVERT
6839     return -TARGET_EINVAL;
6840 }
6841 
6842 static int host_to_target_flock(int type)
6843 {
6844 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6845     FLOCK_TRANSTBL
6846 #undef  TRANSTBL_CONVERT
6847     /* if we don't know how to convert the value coming
6848      * from the host we copy to the target field as-is
6849      */
6850     return type;
6851 }
6852 
6853 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6854                                             abi_ulong target_flock_addr)
6855 {
6856     struct target_flock *target_fl;
6857     int l_type;
6858 
6859     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6860         return -TARGET_EFAULT;
6861     }
6862 
6863     __get_user(l_type, &target_fl->l_type);
6864     l_type = target_to_host_flock(l_type);
6865     if (l_type < 0) {
6866         return l_type;
6867     }
6868     fl->l_type = l_type;
6869     __get_user(fl->l_whence, &target_fl->l_whence);
6870     __get_user(fl->l_start, &target_fl->l_start);
6871     __get_user(fl->l_len, &target_fl->l_len);
6872     __get_user(fl->l_pid, &target_fl->l_pid);
6873     unlock_user_struct(target_fl, target_flock_addr, 0);
6874     return 0;
6875 }
6876 
6877 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6878                                           const struct flock64 *fl)
6879 {
6880     struct target_flock *target_fl;
6881     short l_type;
6882 
6883     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6884         return -TARGET_EFAULT;
6885     }
6886 
6887     l_type = host_to_target_flock(fl->l_type);
6888     __put_user(l_type, &target_fl->l_type);
6889     __put_user(fl->l_whence, &target_fl->l_whence);
6890     __put_user(fl->l_start, &target_fl->l_start);
6891     __put_user(fl->l_len, &target_fl->l_len);
6892     __put_user(fl->l_pid, &target_fl->l_pid);
6893     unlock_user_struct(target_fl, target_flock_addr, 1);
6894     return 0;
6895 }
6896 
6897 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6898 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6899 
6900 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6901 struct target_oabi_flock64 {
6902     abi_short l_type;
6903     abi_short l_whence;
6904     abi_llong l_start;
6905     abi_llong l_len;
6906     abi_int   l_pid;
6907 } QEMU_PACKED;
6908 
6909 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6910                                                    abi_ulong target_flock_addr)
6911 {
6912     struct target_oabi_flock64 *target_fl;
6913     int l_type;
6914 
6915     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6916         return -TARGET_EFAULT;
6917     }
6918 
6919     __get_user(l_type, &target_fl->l_type);
6920     l_type = target_to_host_flock(l_type);
6921     if (l_type < 0) {
6922         return l_type;
6923     }
6924     fl->l_type = l_type;
6925     __get_user(fl->l_whence, &target_fl->l_whence);
6926     __get_user(fl->l_start, &target_fl->l_start);
6927     __get_user(fl->l_len, &target_fl->l_len);
6928     __get_user(fl->l_pid, &target_fl->l_pid);
6929     unlock_user_struct(target_fl, target_flock_addr, 0);
6930     return 0;
6931 }
6932 
6933 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6934                                                  const struct flock64 *fl)
6935 {
6936     struct target_oabi_flock64 *target_fl;
6937     short l_type;
6938 
6939     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6940         return -TARGET_EFAULT;
6941     }
6942 
6943     l_type = host_to_target_flock(fl->l_type);
6944     __put_user(l_type, &target_fl->l_type);
6945     __put_user(fl->l_whence, &target_fl->l_whence);
6946     __put_user(fl->l_start, &target_fl->l_start);
6947     __put_user(fl->l_len, &target_fl->l_len);
6948     __put_user(fl->l_pid, &target_fl->l_pid);
6949     unlock_user_struct(target_fl, target_flock_addr, 1);
6950     return 0;
6951 }
6952 #endif
6953 
6954 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
6955                                               abi_ulong target_flock_addr)
6956 {
6957     struct target_flock64 *target_fl;
6958     int l_type;
6959 
6960     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6961         return -TARGET_EFAULT;
6962     }
6963 
6964     __get_user(l_type, &target_fl->l_type);
6965     l_type = target_to_host_flock(l_type);
6966     if (l_type < 0) {
6967         return l_type;
6968     }
6969     fl->l_type = l_type;
6970     __get_user(fl->l_whence, &target_fl->l_whence);
6971     __get_user(fl->l_start, &target_fl->l_start);
6972     __get_user(fl->l_len, &target_fl->l_len);
6973     __get_user(fl->l_pid, &target_fl->l_pid);
6974     unlock_user_struct(target_fl, target_flock_addr, 0);
6975     return 0;
6976 }
6977 
6978 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6979                                             const struct flock64 *fl)
6980 {
6981     struct target_flock64 *target_fl;
6982     short l_type;
6983 
6984     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6985         return -TARGET_EFAULT;
6986     }
6987 
6988     l_type = host_to_target_flock(fl->l_type);
6989     __put_user(l_type, &target_fl->l_type);
6990     __put_user(fl->l_whence, &target_fl->l_whence);
6991     __put_user(fl->l_start, &target_fl->l_start);
6992     __put_user(fl->l_len, &target_fl->l_len);
6993     __put_user(fl->l_pid, &target_fl->l_pid);
6994     unlock_user_struct(target_fl, target_flock_addr, 1);
6995     return 0;
6996 }
6997 
6998 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
6999 {
7000     struct flock64 fl64;
7001 #ifdef F_GETOWN_EX
7002     struct f_owner_ex fox;
7003     struct target_f_owner_ex *target_fox;
7004 #endif
7005     abi_long ret;
7006     int host_cmd = target_to_host_fcntl_cmd(cmd);
7007 
7008     if (host_cmd == -TARGET_EINVAL)
7009 	    return host_cmd;
7010 
7011     switch(cmd) {
7012     case TARGET_F_GETLK:
7013         ret = copy_from_user_flock(&fl64, arg);
7014         if (ret) {
7015             return ret;
7016         }
7017         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7018         if (ret == 0) {
7019             ret = copy_to_user_flock(arg, &fl64);
7020         }
7021         break;
7022 
7023     case TARGET_F_SETLK:
7024     case TARGET_F_SETLKW:
7025         ret = copy_from_user_flock(&fl64, arg);
7026         if (ret) {
7027             return ret;
7028         }
7029         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7030         break;
7031 
7032     case TARGET_F_GETLK64:
7033     case TARGET_F_OFD_GETLK:
7034         ret = copy_from_user_flock64(&fl64, arg);
7035         if (ret) {
7036             return ret;
7037         }
7038         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7039         if (ret == 0) {
7040             ret = copy_to_user_flock64(arg, &fl64);
7041         }
7042         break;
7043     case TARGET_F_SETLK64:
7044     case TARGET_F_SETLKW64:
7045     case TARGET_F_OFD_SETLK:
7046     case TARGET_F_OFD_SETLKW:
7047         ret = copy_from_user_flock64(&fl64, arg);
7048         if (ret) {
7049             return ret;
7050         }
7051         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7052         break;
7053 
7054     case TARGET_F_GETFL:
7055         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7056         if (ret >= 0) {
7057             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7058         }
7059         break;
7060 
7061     case TARGET_F_SETFL:
7062         ret = get_errno(safe_fcntl(fd, host_cmd,
7063                                    target_to_host_bitmask(arg,
7064                                                           fcntl_flags_tbl)));
7065         break;
7066 
7067 #ifdef F_GETOWN_EX
7068     case TARGET_F_GETOWN_EX:
7069         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7070         if (ret >= 0) {
7071             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7072                 return -TARGET_EFAULT;
7073             target_fox->type = tswap32(fox.type);
7074             target_fox->pid = tswap32(fox.pid);
7075             unlock_user_struct(target_fox, arg, 1);
7076         }
7077         break;
7078 #endif
7079 
7080 #ifdef F_SETOWN_EX
7081     case TARGET_F_SETOWN_EX:
7082         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7083             return -TARGET_EFAULT;
7084         fox.type = tswap32(target_fox->type);
7085         fox.pid = tswap32(target_fox->pid);
7086         unlock_user_struct(target_fox, arg, 0);
7087         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7088         break;
7089 #endif
7090 
7091     case TARGET_F_SETSIG:
7092         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7093         break;
7094 
7095     case TARGET_F_GETSIG:
7096         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7097         break;
7098 
7099     case TARGET_F_SETOWN:
7100     case TARGET_F_GETOWN:
7101     case TARGET_F_SETLEASE:
7102     case TARGET_F_GETLEASE:
7103     case TARGET_F_SETPIPE_SZ:
7104     case TARGET_F_GETPIPE_SZ:
7105     case TARGET_F_ADD_SEALS:
7106     case TARGET_F_GET_SEALS:
7107         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7108         break;
7109 
7110     default:
7111         ret = get_errno(safe_fcntl(fd, cmd, arg));
7112         break;
7113     }
7114     return ret;
7115 }
7116 
7117 #ifdef USE_UID16
7118 
7119 static inline int high2lowuid(int uid)
7120 {
7121     if (uid > 65535)
7122         return 65534;
7123     else
7124         return uid;
7125 }
7126 
7127 static inline int high2lowgid(int gid)
7128 {
7129     if (gid > 65535)
7130         return 65534;
7131     else
7132         return gid;
7133 }
7134 
7135 static inline int low2highuid(int uid)
7136 {
7137     if ((int16_t)uid == -1)
7138         return -1;
7139     else
7140         return uid;
7141 }
7142 
7143 static inline int low2highgid(int gid)
7144 {
7145     if ((int16_t)gid == -1)
7146         return -1;
7147     else
7148         return gid;
7149 }
7150 static inline int tswapid(int id)
7151 {
7152     return tswap16(id);
7153 }
7154 
7155 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7156 
7157 #else /* !USE_UID16 */
7158 static inline int high2lowuid(int uid)
7159 {
7160     return uid;
7161 }
7162 static inline int high2lowgid(int gid)
7163 {
7164     return gid;
7165 }
7166 static inline int low2highuid(int uid)
7167 {
7168     return uid;
7169 }
7170 static inline int low2highgid(int gid)
7171 {
7172     return gid;
7173 }
7174 static inline int tswapid(int id)
7175 {
7176     return tswap32(id);
7177 }
7178 
7179 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7180 
7181 #endif /* USE_UID16 */
7182 
7183 /* We must do direct syscalls for setting UID/GID, because we want to
7184  * implement the Linux system call semantics of "change only for this thread",
7185  * not the libc/POSIX semantics of "change for all threads in process".
7186  * (See http://ewontfix.com/17/ for more details.)
7187  * We use the 32-bit version of the syscalls if present; if it is not
7188  * then either the host architecture supports 32-bit UIDs natively with
7189  * the standard syscall, or the 16-bit UID is the best we can do.
7190  */
7191 #ifdef __NR_setuid32
7192 #define __NR_sys_setuid __NR_setuid32
7193 #else
7194 #define __NR_sys_setuid __NR_setuid
7195 #endif
7196 #ifdef __NR_setgid32
7197 #define __NR_sys_setgid __NR_setgid32
7198 #else
7199 #define __NR_sys_setgid __NR_setgid
7200 #endif
7201 #ifdef __NR_setresuid32
7202 #define __NR_sys_setresuid __NR_setresuid32
7203 #else
7204 #define __NR_sys_setresuid __NR_setresuid
7205 #endif
7206 #ifdef __NR_setresgid32
7207 #define __NR_sys_setresgid __NR_setresgid32
7208 #else
7209 #define __NR_sys_setresgid __NR_setresgid
7210 #endif
7211 
7212 _syscall1(int, sys_setuid, uid_t, uid)
7213 _syscall1(int, sys_setgid, gid_t, gid)
7214 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7215 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7216 
7217 void syscall_init(void)
7218 {
7219     IOCTLEntry *ie;
7220     const argtype *arg_type;
7221     int size;
7222 
7223     thunk_init(STRUCT_MAX);
7224 
7225 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7226 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7227 #include "syscall_types.h"
7228 #undef STRUCT
7229 #undef STRUCT_SPECIAL
7230 
7231     /* we patch the ioctl size if necessary. We rely on the fact that
7232        no ioctl has all the bits at '1' in the size field */
7233     ie = ioctl_entries;
7234     while (ie->target_cmd != 0) {
7235         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7236             TARGET_IOC_SIZEMASK) {
7237             arg_type = ie->arg_type;
7238             if (arg_type[0] != TYPE_PTR) {
7239                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7240                         ie->target_cmd);
7241                 exit(1);
7242             }
7243             arg_type++;
7244             size = thunk_type_size(arg_type, 0);
7245             ie->target_cmd = (ie->target_cmd &
7246                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7247                 (size << TARGET_IOC_SIZESHIFT);
7248         }
7249 
7250         /* automatic consistency check if same arch */
7251 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7252     (defined(__x86_64__) && defined(TARGET_X86_64))
7253         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7254             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7255                     ie->name, ie->target_cmd, ie->host_cmd);
7256         }
7257 #endif
7258         ie++;
7259     }
7260 }
7261 
7262 #ifdef TARGET_NR_truncate64
7263 static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
7264                                          abi_long arg2,
7265                                          abi_long arg3,
7266                                          abi_long arg4)
7267 {
7268     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7269         arg2 = arg3;
7270         arg3 = arg4;
7271     }
7272     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7273 }
7274 #endif
7275 
7276 #ifdef TARGET_NR_ftruncate64
7277 static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
7278                                           abi_long arg2,
7279                                           abi_long arg3,
7280                                           abi_long arg4)
7281 {
7282     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7283         arg2 = arg3;
7284         arg3 = arg4;
7285     }
7286     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7287 }
7288 #endif
7289 
7290 #if defined(TARGET_NR_timer_settime) || \
7291     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7292 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7293                                                  abi_ulong target_addr)
7294 {
7295     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7296                                 offsetof(struct target_itimerspec,
7297                                          it_interval)) ||
7298         target_to_host_timespec(&host_its->it_value, target_addr +
7299                                 offsetof(struct target_itimerspec,
7300                                          it_value))) {
7301         return -TARGET_EFAULT;
7302     }
7303 
7304     return 0;
7305 }
7306 #endif
7307 
7308 #if defined(TARGET_NR_timer_settime64) || \
7309     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7310 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7311                                                    abi_ulong target_addr)
7312 {
7313     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7314                                   offsetof(struct target__kernel_itimerspec,
7315                                            it_interval)) ||
7316         target_to_host_timespec64(&host_its->it_value, target_addr +
7317                                   offsetof(struct target__kernel_itimerspec,
7318                                            it_value))) {
7319         return -TARGET_EFAULT;
7320     }
7321 
7322     return 0;
7323 }
7324 #endif
7325 
7326 #if ((defined(TARGET_NR_timerfd_gettime) || \
7327       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7328       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7329 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7330                                                  struct itimerspec *host_its)
7331 {
7332     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7333                                                        it_interval),
7334                                 &host_its->it_interval) ||
7335         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7336                                                        it_value),
7337                                 &host_its->it_value)) {
7338         return -TARGET_EFAULT;
7339     }
7340     return 0;
7341 }
7342 #endif
7343 
7344 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7345       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7346       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7347 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7348                                                    struct itimerspec *host_its)
7349 {
7350     if (host_to_target_timespec64(target_addr +
7351                                   offsetof(struct target__kernel_itimerspec,
7352                                            it_interval),
7353                                   &host_its->it_interval) ||
7354         host_to_target_timespec64(target_addr +
7355                                   offsetof(struct target__kernel_itimerspec,
7356                                            it_value),
7357                                   &host_its->it_value)) {
7358         return -TARGET_EFAULT;
7359     }
7360     return 0;
7361 }
7362 #endif
7363 
7364 #if defined(TARGET_NR_adjtimex) || \
7365     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7366 static inline abi_long target_to_host_timex(struct timex *host_tx,
7367                                             abi_long target_addr)
7368 {
7369     struct target_timex *target_tx;
7370 
7371     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7372         return -TARGET_EFAULT;
7373     }
7374 
7375     __get_user(host_tx->modes, &target_tx->modes);
7376     __get_user(host_tx->offset, &target_tx->offset);
7377     __get_user(host_tx->freq, &target_tx->freq);
7378     __get_user(host_tx->maxerror, &target_tx->maxerror);
7379     __get_user(host_tx->esterror, &target_tx->esterror);
7380     __get_user(host_tx->status, &target_tx->status);
7381     __get_user(host_tx->constant, &target_tx->constant);
7382     __get_user(host_tx->precision, &target_tx->precision);
7383     __get_user(host_tx->tolerance, &target_tx->tolerance);
7384     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7385     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7386     __get_user(host_tx->tick, &target_tx->tick);
7387     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7388     __get_user(host_tx->jitter, &target_tx->jitter);
7389     __get_user(host_tx->shift, &target_tx->shift);
7390     __get_user(host_tx->stabil, &target_tx->stabil);
7391     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7392     __get_user(host_tx->calcnt, &target_tx->calcnt);
7393     __get_user(host_tx->errcnt, &target_tx->errcnt);
7394     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7395     __get_user(host_tx->tai, &target_tx->tai);
7396 
7397     unlock_user_struct(target_tx, target_addr, 0);
7398     return 0;
7399 }
7400 
7401 static inline abi_long host_to_target_timex(abi_long target_addr,
7402                                             struct timex *host_tx)
7403 {
7404     struct target_timex *target_tx;
7405 
7406     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7407         return -TARGET_EFAULT;
7408     }
7409 
7410     __put_user(host_tx->modes, &target_tx->modes);
7411     __put_user(host_tx->offset, &target_tx->offset);
7412     __put_user(host_tx->freq, &target_tx->freq);
7413     __put_user(host_tx->maxerror, &target_tx->maxerror);
7414     __put_user(host_tx->esterror, &target_tx->esterror);
7415     __put_user(host_tx->status, &target_tx->status);
7416     __put_user(host_tx->constant, &target_tx->constant);
7417     __put_user(host_tx->precision, &target_tx->precision);
7418     __put_user(host_tx->tolerance, &target_tx->tolerance);
7419     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7420     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7421     __put_user(host_tx->tick, &target_tx->tick);
7422     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7423     __put_user(host_tx->jitter, &target_tx->jitter);
7424     __put_user(host_tx->shift, &target_tx->shift);
7425     __put_user(host_tx->stabil, &target_tx->stabil);
7426     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7427     __put_user(host_tx->calcnt, &target_tx->calcnt);
7428     __put_user(host_tx->errcnt, &target_tx->errcnt);
7429     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7430     __put_user(host_tx->tai, &target_tx->tai);
7431 
7432     unlock_user_struct(target_tx, target_addr, 1);
7433     return 0;
7434 }
7435 #endif
7436 
7437 
7438 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7439 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7440                                               abi_long target_addr)
7441 {
7442     struct target__kernel_timex *target_tx;
7443 
7444     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7445                                  offsetof(struct target__kernel_timex,
7446                                           time))) {
7447         return -TARGET_EFAULT;
7448     }
7449 
7450     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7451         return -TARGET_EFAULT;
7452     }
7453 
7454     __get_user(host_tx->modes, &target_tx->modes);
7455     __get_user(host_tx->offset, &target_tx->offset);
7456     __get_user(host_tx->freq, &target_tx->freq);
7457     __get_user(host_tx->maxerror, &target_tx->maxerror);
7458     __get_user(host_tx->esterror, &target_tx->esterror);
7459     __get_user(host_tx->status, &target_tx->status);
7460     __get_user(host_tx->constant, &target_tx->constant);
7461     __get_user(host_tx->precision, &target_tx->precision);
7462     __get_user(host_tx->tolerance, &target_tx->tolerance);
7463     __get_user(host_tx->tick, &target_tx->tick);
7464     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7465     __get_user(host_tx->jitter, &target_tx->jitter);
7466     __get_user(host_tx->shift, &target_tx->shift);
7467     __get_user(host_tx->stabil, &target_tx->stabil);
7468     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7469     __get_user(host_tx->calcnt, &target_tx->calcnt);
7470     __get_user(host_tx->errcnt, &target_tx->errcnt);
7471     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7472     __get_user(host_tx->tai, &target_tx->tai);
7473 
7474     unlock_user_struct(target_tx, target_addr, 0);
7475     return 0;
7476 }
7477 
7478 static inline abi_long host_to_target_timex64(abi_long target_addr,
7479                                               struct timex *host_tx)
7480 {
7481     struct target__kernel_timex *target_tx;
7482 
7483    if (copy_to_user_timeval64(target_addr +
7484                               offsetof(struct target__kernel_timex, time),
7485                               &host_tx->time)) {
7486         return -TARGET_EFAULT;
7487     }
7488 
7489     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7490         return -TARGET_EFAULT;
7491     }
7492 
7493     __put_user(host_tx->modes, &target_tx->modes);
7494     __put_user(host_tx->offset, &target_tx->offset);
7495     __put_user(host_tx->freq, &target_tx->freq);
7496     __put_user(host_tx->maxerror, &target_tx->maxerror);
7497     __put_user(host_tx->esterror, &target_tx->esterror);
7498     __put_user(host_tx->status, &target_tx->status);
7499     __put_user(host_tx->constant, &target_tx->constant);
7500     __put_user(host_tx->precision, &target_tx->precision);
7501     __put_user(host_tx->tolerance, &target_tx->tolerance);
7502     __put_user(host_tx->tick, &target_tx->tick);
7503     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7504     __put_user(host_tx->jitter, &target_tx->jitter);
7505     __put_user(host_tx->shift, &target_tx->shift);
7506     __put_user(host_tx->stabil, &target_tx->stabil);
7507     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7508     __put_user(host_tx->calcnt, &target_tx->calcnt);
7509     __put_user(host_tx->errcnt, &target_tx->errcnt);
7510     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7511     __put_user(host_tx->tai, &target_tx->tai);
7512 
7513     unlock_user_struct(target_tx, target_addr, 1);
7514     return 0;
7515 }
7516 #endif
7517 
7518 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7519 #define sigev_notify_thread_id _sigev_un._tid
7520 #endif
7521 
7522 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7523                                                abi_ulong target_addr)
7524 {
7525     struct target_sigevent *target_sevp;
7526 
7527     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7528         return -TARGET_EFAULT;
7529     }
7530 
7531     /* This union is awkward on 64 bit systems because it has a 32 bit
7532      * integer and a pointer in it; we follow the conversion approach
7533      * used for handling sigval types in signal.c so the guest should get
7534      * the correct value back even if we did a 64 bit byteswap and it's
7535      * using the 32 bit integer.
7536      */
7537     host_sevp->sigev_value.sival_ptr =
7538         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7539     host_sevp->sigev_signo =
7540         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7541     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7542     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7543 
7544     unlock_user_struct(target_sevp, target_addr, 1);
7545     return 0;
7546 }
7547 
7548 #if defined(TARGET_NR_mlockall)
7549 static inline int target_to_host_mlockall_arg(int arg)
7550 {
7551     int result = 0;
7552 
7553     if (arg & TARGET_MCL_CURRENT) {
7554         result |= MCL_CURRENT;
7555     }
7556     if (arg & TARGET_MCL_FUTURE) {
7557         result |= MCL_FUTURE;
7558     }
7559 #ifdef MCL_ONFAULT
7560     if (arg & TARGET_MCL_ONFAULT) {
7561         result |= MCL_ONFAULT;
7562     }
7563 #endif
7564 
7565     return result;
7566 }
7567 #endif
7568 
7569 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7570      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7571      defined(TARGET_NR_newfstatat))
7572 static inline abi_long host_to_target_stat64(void *cpu_env,
7573                                              abi_ulong target_addr,
7574                                              struct stat *host_st)
7575 {
7576 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7577     if (((CPUARMState *)cpu_env)->eabi) {
7578         struct target_eabi_stat64 *target_st;
7579 
7580         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7581             return -TARGET_EFAULT;
7582         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7583         __put_user(host_st->st_dev, &target_st->st_dev);
7584         __put_user(host_st->st_ino, &target_st->st_ino);
7585 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7586         __put_user(host_st->st_ino, &target_st->__st_ino);
7587 #endif
7588         __put_user(host_st->st_mode, &target_st->st_mode);
7589         __put_user(host_st->st_nlink, &target_st->st_nlink);
7590         __put_user(host_st->st_uid, &target_st->st_uid);
7591         __put_user(host_st->st_gid, &target_st->st_gid);
7592         __put_user(host_st->st_rdev, &target_st->st_rdev);
7593         __put_user(host_st->st_size, &target_st->st_size);
7594         __put_user(host_st->st_blksize, &target_st->st_blksize);
7595         __put_user(host_st->st_blocks, &target_st->st_blocks);
7596         __put_user(host_st->st_atime, &target_st->target_st_atime);
7597         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7598         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7599 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7600         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7601         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7602         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7603 #endif
7604         unlock_user_struct(target_st, target_addr, 1);
7605     } else
7606 #endif
7607     {
7608 #if defined(TARGET_HAS_STRUCT_STAT64)
7609         struct target_stat64 *target_st;
7610 #else
7611         struct target_stat *target_st;
7612 #endif
7613 
7614         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7615             return -TARGET_EFAULT;
7616         memset(target_st, 0, sizeof(*target_st));
7617         __put_user(host_st->st_dev, &target_st->st_dev);
7618         __put_user(host_st->st_ino, &target_st->st_ino);
7619 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7620         __put_user(host_st->st_ino, &target_st->__st_ino);
7621 #endif
7622         __put_user(host_st->st_mode, &target_st->st_mode);
7623         __put_user(host_st->st_nlink, &target_st->st_nlink);
7624         __put_user(host_st->st_uid, &target_st->st_uid);
7625         __put_user(host_st->st_gid, &target_st->st_gid);
7626         __put_user(host_st->st_rdev, &target_st->st_rdev);
7627         /* XXX: better use of kernel struct */
7628         __put_user(host_st->st_size, &target_st->st_size);
7629         __put_user(host_st->st_blksize, &target_st->st_blksize);
7630         __put_user(host_st->st_blocks, &target_st->st_blocks);
7631         __put_user(host_st->st_atime, &target_st->target_st_atime);
7632         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7633         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7634 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7635         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7636         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7637         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7638 #endif
7639         unlock_user_struct(target_st, target_addr, 1);
7640     }
7641 
7642     return 0;
7643 }
7644 #endif
7645 
7646 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7647 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7648                                             abi_ulong target_addr)
7649 {
7650     struct target_statx *target_stx;
7651 
7652     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7653         return -TARGET_EFAULT;
7654     }
7655     memset(target_stx, 0, sizeof(*target_stx));
7656 
7657     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7658     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7659     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7660     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7661     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7662     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7663     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7664     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7665     __put_user(host_stx->stx_size, &target_stx->stx_size);
7666     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7667     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7668     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7669     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7670     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7671     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7672     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7673     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7674     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7675     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7676     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7677     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7678     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7679     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7680 
7681     unlock_user_struct(target_stx, target_addr, 1);
7682 
7683     return 0;
7684 }
7685 #endif
7686 
7687 static int do_sys_futex(int *uaddr, int op, int val,
7688                          const struct timespec *timeout, int *uaddr2,
7689                          int val3)
7690 {
7691 #if HOST_LONG_BITS == 64
7692 #if defined(__NR_futex)
7693     /* always a 64-bit time_t, it doesn't define _time64 version  */
7694     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7695 
7696 #endif
7697 #else /* HOST_LONG_BITS == 64 */
7698 #if defined(__NR_futex_time64)
7699     if (sizeof(timeout->tv_sec) == 8) {
7700         /* _time64 function on 32bit arch */
7701         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7702     }
7703 #endif
7704 #if defined(__NR_futex)
7705     /* old function on 32bit arch */
7706     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7707 #endif
7708 #endif /* HOST_LONG_BITS == 64 */
7709     g_assert_not_reached();
7710 }
7711 
7712 static int do_safe_futex(int *uaddr, int op, int val,
7713                          const struct timespec *timeout, int *uaddr2,
7714                          int val3)
7715 {
7716 #if HOST_LONG_BITS == 64
7717 #if defined(__NR_futex)
7718     /* always a 64-bit time_t, it doesn't define _time64 version  */
7719     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7720 #endif
7721 #else /* HOST_LONG_BITS == 64 */
7722 #if defined(__NR_futex_time64)
7723     if (sizeof(timeout->tv_sec) == 8) {
7724         /* _time64 function on 32bit arch */
7725         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7726                                            val3));
7727     }
7728 #endif
7729 #if defined(__NR_futex)
7730     /* old function on 32bit arch */
7731     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7732 #endif
7733 #endif /* HOST_LONG_BITS == 64 */
7734     return -TARGET_ENOSYS;
7735 }
7736 
7737 /* ??? Using host futex calls even when target atomic operations
7738    are not really atomic probably breaks things.  However implementing
7739    futexes locally would make futexes shared between multiple processes
7740    tricky.  However they're probably useless because guest atomic
7741    operations won't work either.  */
7742 #if defined(TARGET_NR_futex)
7743 static int do_futex(CPUState *cpu, target_ulong uaddr, int op, int val,
7744                     target_ulong timeout, target_ulong uaddr2, int val3)
7745 {
7746     struct timespec ts, *pts;
7747     int base_op;
7748 
7749     /* ??? We assume FUTEX_* constants are the same on both host
7750        and target.  */
7751 #ifdef FUTEX_CMD_MASK
7752     base_op = op & FUTEX_CMD_MASK;
7753 #else
7754     base_op = op;
7755 #endif
7756     switch (base_op) {
7757     case FUTEX_WAIT:
7758     case FUTEX_WAIT_BITSET:
7759         if (timeout) {
7760             pts = &ts;
7761             target_to_host_timespec(pts, timeout);
7762         } else {
7763             pts = NULL;
7764         }
7765         return do_safe_futex(g2h(cpu, uaddr),
7766                              op, tswap32(val), pts, NULL, val3);
7767     case FUTEX_WAKE:
7768         return do_safe_futex(g2h(cpu, uaddr),
7769                              op, val, NULL, NULL, 0);
7770     case FUTEX_FD:
7771         return do_safe_futex(g2h(cpu, uaddr),
7772                              op, val, NULL, NULL, 0);
7773     case FUTEX_REQUEUE:
7774     case FUTEX_CMP_REQUEUE:
7775     case FUTEX_WAKE_OP:
7776         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7777            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7778            But the prototype takes a `struct timespec *'; insert casts
7779            to satisfy the compiler.  We do not need to tswap TIMEOUT
7780            since it's not compared to guest memory.  */
7781         pts = (struct timespec *)(uintptr_t) timeout;
7782         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7783                              (base_op == FUTEX_CMP_REQUEUE
7784                               ? tswap32(val3) : val3));
7785     default:
7786         return -TARGET_ENOSYS;
7787     }
7788 }
7789 #endif
7790 
7791 #if defined(TARGET_NR_futex_time64)
7792 static int do_futex_time64(CPUState *cpu, target_ulong uaddr, int op,
7793                            int val, target_ulong timeout,
7794                            target_ulong uaddr2, int val3)
7795 {
7796     struct timespec ts, *pts;
7797     int base_op;
7798 
7799     /* ??? We assume FUTEX_* constants are the same on both host
7800        and target.  */
7801 #ifdef FUTEX_CMD_MASK
7802     base_op = op & FUTEX_CMD_MASK;
7803 #else
7804     base_op = op;
7805 #endif
7806     switch (base_op) {
7807     case FUTEX_WAIT:
7808     case FUTEX_WAIT_BITSET:
7809         if (timeout) {
7810             pts = &ts;
7811             if (target_to_host_timespec64(pts, timeout)) {
7812                 return -TARGET_EFAULT;
7813             }
7814         } else {
7815             pts = NULL;
7816         }
7817         return do_safe_futex(g2h(cpu, uaddr), op,
7818                              tswap32(val), pts, NULL, val3);
7819     case FUTEX_WAKE:
7820         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7821     case FUTEX_FD:
7822         return do_safe_futex(g2h(cpu, uaddr), op, val, NULL, NULL, 0);
7823     case FUTEX_REQUEUE:
7824     case FUTEX_CMP_REQUEUE:
7825     case FUTEX_WAKE_OP:
7826         /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
7827            TIMEOUT parameter is interpreted as a uint32_t by the kernel.
7828            But the prototype takes a `struct timespec *'; insert casts
7829            to satisfy the compiler.  We do not need to tswap TIMEOUT
7830            since it's not compared to guest memory.  */
7831         pts = (struct timespec *)(uintptr_t) timeout;
7832         return do_safe_futex(g2h(cpu, uaddr), op, val, pts, g2h(cpu, uaddr2),
7833                              (base_op == FUTEX_CMP_REQUEUE
7834                               ? tswap32(val3) : val3));
7835     default:
7836         return -TARGET_ENOSYS;
7837     }
7838 }
7839 #endif
7840 
7841 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7842 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7843                                      abi_long handle, abi_long mount_id,
7844                                      abi_long flags)
7845 {
7846     struct file_handle *target_fh;
7847     struct file_handle *fh;
7848     int mid = 0;
7849     abi_long ret;
7850     char *name;
7851     unsigned int size, total_size;
7852 
7853     if (get_user_s32(size, handle)) {
7854         return -TARGET_EFAULT;
7855     }
7856 
7857     name = lock_user_string(pathname);
7858     if (!name) {
7859         return -TARGET_EFAULT;
7860     }
7861 
7862     total_size = sizeof(struct file_handle) + size;
7863     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7864     if (!target_fh) {
7865         unlock_user(name, pathname, 0);
7866         return -TARGET_EFAULT;
7867     }
7868 
7869     fh = g_malloc0(total_size);
7870     fh->handle_bytes = size;
7871 
7872     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7873     unlock_user(name, pathname, 0);
7874 
7875     /* man name_to_handle_at(2):
7876      * Other than the use of the handle_bytes field, the caller should treat
7877      * the file_handle structure as an opaque data type
7878      */
7879 
7880     memcpy(target_fh, fh, total_size);
7881     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7882     target_fh->handle_type = tswap32(fh->handle_type);
7883     g_free(fh);
7884     unlock_user(target_fh, handle, total_size);
7885 
7886     if (put_user_s32(mid, mount_id)) {
7887         return -TARGET_EFAULT;
7888     }
7889 
7890     return ret;
7891 
7892 }
7893 #endif
7894 
7895 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7896 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7897                                      abi_long flags)
7898 {
7899     struct file_handle *target_fh;
7900     struct file_handle *fh;
7901     unsigned int size, total_size;
7902     abi_long ret;
7903 
7904     if (get_user_s32(size, handle)) {
7905         return -TARGET_EFAULT;
7906     }
7907 
7908     total_size = sizeof(struct file_handle) + size;
7909     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7910     if (!target_fh) {
7911         return -TARGET_EFAULT;
7912     }
7913 
7914     fh = g_memdup(target_fh, total_size);
7915     fh->handle_bytes = size;
7916     fh->handle_type = tswap32(target_fh->handle_type);
7917 
7918     ret = get_errno(open_by_handle_at(mount_fd, fh,
7919                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7920 
7921     g_free(fh);
7922 
7923     unlock_user(target_fh, handle, total_size);
7924 
7925     return ret;
7926 }
7927 #endif
7928 
7929 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7930 
7931 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7932 {
7933     int host_flags;
7934     target_sigset_t *target_mask;
7935     sigset_t host_mask;
7936     abi_long ret;
7937 
7938     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7939         return -TARGET_EINVAL;
7940     }
7941     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7942         return -TARGET_EFAULT;
7943     }
7944 
7945     target_to_host_sigset(&host_mask, target_mask);
7946 
7947     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7948 
7949     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7950     if (ret >= 0) {
7951         fd_trans_register(ret, &target_signalfd_trans);
7952     }
7953 
7954     unlock_user_struct(target_mask, mask, 0);
7955 
7956     return ret;
7957 }
7958 #endif
7959 
7960 /* Map host to target signal numbers for the wait family of syscalls.
7961    Assume all other status bits are the same.  */
7962 int host_to_target_waitstatus(int status)
7963 {
7964     if (WIFSIGNALED(status)) {
7965         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7966     }
7967     if (WIFSTOPPED(status)) {
7968         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7969                | (status & 0xff);
7970     }
7971     return status;
7972 }
7973 
7974 static int open_self_cmdline(void *cpu_env, int fd)
7975 {
7976     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7977     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
7978     int i;
7979 
7980     for (i = 0; i < bprm->argc; i++) {
7981         size_t len = strlen(bprm->argv[i]) + 1;
7982 
7983         if (write(fd, bprm->argv[i], len) != len) {
7984             return -1;
7985         }
7986     }
7987 
7988     return 0;
7989 }
7990 
7991 static int open_self_maps(void *cpu_env, int fd)
7992 {
7993     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
7994     TaskState *ts = cpu->opaque;
7995     GSList *map_info = read_self_maps();
7996     GSList *s;
7997     int count;
7998 
7999     for (s = map_info; s; s = g_slist_next(s)) {
8000         MapInfo *e = (MapInfo *) s->data;
8001 
8002         if (h2g_valid(e->start)) {
8003             unsigned long min = e->start;
8004             unsigned long max = e->end;
8005             int flags = page_get_flags(h2g(min));
8006             const char *path;
8007 
8008             max = h2g_valid(max - 1) ?
8009                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8010 
8011             if (page_check_range(h2g(min), max - min, flags) == -1) {
8012                 continue;
8013             }
8014 
8015             if (h2g(min) == ts->info->stack_limit) {
8016                 path = "[stack]";
8017             } else {
8018                 path = e->path;
8019             }
8020 
8021             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8022                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8023                             h2g(min), h2g(max - 1) + 1,
8024                             (flags & PAGE_READ) ? 'r' : '-',
8025                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8026                             (flags & PAGE_EXEC) ? 'x' : '-',
8027                             e->is_priv ? 'p' : 's',
8028                             (uint64_t) e->offset, e->dev, e->inode);
8029             if (path) {
8030                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8031             } else {
8032                 dprintf(fd, "\n");
8033             }
8034         }
8035     }
8036 
8037     free_self_maps(map_info);
8038 
8039 #ifdef TARGET_VSYSCALL_PAGE
8040     /*
8041      * We only support execution from the vsyscall page.
8042      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8043      */
8044     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8045                     " --xp 00000000 00:00 0",
8046                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8047     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8048 #endif
8049 
8050     return 0;
8051 }
8052 
8053 static int open_self_stat(void *cpu_env, int fd)
8054 {
8055     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8056     TaskState *ts = cpu->opaque;
8057     g_autoptr(GString) buf = g_string_new(NULL);
8058     int i;
8059 
8060     for (i = 0; i < 44; i++) {
8061         if (i == 0) {
8062             /* pid */
8063             g_string_printf(buf, FMT_pid " ", getpid());
8064         } else if (i == 1) {
8065             /* app name */
8066             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8067             bin = bin ? bin + 1 : ts->bprm->argv[0];
8068             g_string_printf(buf, "(%.15s) ", bin);
8069         } else if (i == 3) {
8070             /* ppid */
8071             g_string_printf(buf, FMT_pid " ", getppid());
8072         } else if (i == 21) {
8073             /* starttime */
8074             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8075         } else if (i == 27) {
8076             /* stack bottom */
8077             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8078         } else {
8079             /* for the rest, there is MasterCard */
8080             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8081         }
8082 
8083         if (write(fd, buf->str, buf->len) != buf->len) {
8084             return -1;
8085         }
8086     }
8087 
8088     return 0;
8089 }
8090 
8091 static int open_self_auxv(void *cpu_env, int fd)
8092 {
8093     CPUState *cpu = env_cpu((CPUArchState *)cpu_env);
8094     TaskState *ts = cpu->opaque;
8095     abi_ulong auxv = ts->info->saved_auxv;
8096     abi_ulong len = ts->info->auxv_len;
8097     char *ptr;
8098 
8099     /*
8100      * Auxiliary vector is stored in target process stack.
8101      * read in whole auxv vector and copy it to file
8102      */
8103     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8104     if (ptr != NULL) {
8105         while (len > 0) {
8106             ssize_t r;
8107             r = write(fd, ptr, len);
8108             if (r <= 0) {
8109                 break;
8110             }
8111             len -= r;
8112             ptr += r;
8113         }
8114         lseek(fd, 0, SEEK_SET);
8115         unlock_user(ptr, auxv, len);
8116     }
8117 
8118     return 0;
8119 }
8120 
8121 static int is_proc_myself(const char *filename, const char *entry)
8122 {
8123     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8124         filename += strlen("/proc/");
8125         if (!strncmp(filename, "self/", strlen("self/"))) {
8126             filename += strlen("self/");
8127         } else if (*filename >= '1' && *filename <= '9') {
8128             char myself[80];
8129             snprintf(myself, sizeof(myself), "%d/", getpid());
8130             if (!strncmp(filename, myself, strlen(myself))) {
8131                 filename += strlen(myself);
8132             } else {
8133                 return 0;
8134             }
8135         } else {
8136             return 0;
8137         }
8138         if (!strcmp(filename, entry)) {
8139             return 1;
8140         }
8141     }
8142     return 0;
8143 }
8144 
8145 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) || \
8146     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA)
8147 static int is_proc(const char *filename, const char *entry)
8148 {
8149     return strcmp(filename, entry) == 0;
8150 }
8151 #endif
8152 
8153 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8154 static int open_net_route(void *cpu_env, int fd)
8155 {
8156     FILE *fp;
8157     char *line = NULL;
8158     size_t len = 0;
8159     ssize_t read;
8160 
8161     fp = fopen("/proc/net/route", "r");
8162     if (fp == NULL) {
8163         return -1;
8164     }
8165 
8166     /* read header */
8167 
8168     read = getline(&line, &len, fp);
8169     dprintf(fd, "%s", line);
8170 
8171     /* read routes */
8172 
8173     while ((read = getline(&line, &len, fp)) != -1) {
8174         char iface[16];
8175         uint32_t dest, gw, mask;
8176         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8177         int fields;
8178 
8179         fields = sscanf(line,
8180                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8181                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8182                         &mask, &mtu, &window, &irtt);
8183         if (fields != 11) {
8184             continue;
8185         }
8186         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8187                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8188                 metric, tswap32(mask), mtu, window, irtt);
8189     }
8190 
8191     free(line);
8192     fclose(fp);
8193 
8194     return 0;
8195 }
8196 #endif
8197 
8198 #if defined(TARGET_SPARC)
8199 static int open_cpuinfo(void *cpu_env, int fd)
8200 {
8201     dprintf(fd, "type\t\t: sun4u\n");
8202     return 0;
8203 }
8204 #endif
8205 
8206 #if defined(TARGET_HPPA)
8207 static int open_cpuinfo(void *cpu_env, int fd)
8208 {
8209     dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8210     dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8211     dprintf(fd, "capabilities\t: os32\n");
8212     dprintf(fd, "model\t\t: 9000/778/B160L\n");
8213     dprintf(fd, "model name\t: Merlin L2 160 QEMU (9000/778/B160L)\n");
8214     return 0;
8215 }
8216 #endif
8217 
8218 #if defined(TARGET_M68K)
8219 static int open_hardware(void *cpu_env, int fd)
8220 {
8221     dprintf(fd, "Model:\t\tqemu-m68k\n");
8222     return 0;
8223 }
8224 #endif
8225 
8226 static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
8227 {
8228     struct fake_open {
8229         const char *filename;
8230         int (*fill)(void *cpu_env, int fd);
8231         int (*cmp)(const char *s1, const char *s2);
8232     };
8233     const struct fake_open *fake_open;
8234     static const struct fake_open fakes[] = {
8235         { "maps", open_self_maps, is_proc_myself },
8236         { "stat", open_self_stat, is_proc_myself },
8237         { "auxv", open_self_auxv, is_proc_myself },
8238         { "cmdline", open_self_cmdline, is_proc_myself },
8239 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
8240         { "/proc/net/route", open_net_route, is_proc },
8241 #endif
8242 #if defined(TARGET_SPARC) || defined(TARGET_HPPA)
8243         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8244 #endif
8245 #if defined(TARGET_M68K)
8246         { "/proc/hardware", open_hardware, is_proc },
8247 #endif
8248         { NULL, NULL, NULL }
8249     };
8250 
8251     if (is_proc_myself(pathname, "exe")) {
8252         int execfd = qemu_getauxval(AT_EXECFD);
8253         return execfd ? execfd : safe_openat(dirfd, exec_path, flags, mode);
8254     }
8255 
8256     for (fake_open = fakes; fake_open->filename; fake_open++) {
8257         if (fake_open->cmp(pathname, fake_open->filename)) {
8258             break;
8259         }
8260     }
8261 
8262     if (fake_open->filename) {
8263         const char *tmpdir;
8264         char filename[PATH_MAX];
8265         int fd, r;
8266 
8267         /* create temporary file to map stat to */
8268         tmpdir = getenv("TMPDIR");
8269         if (!tmpdir)
8270             tmpdir = "/tmp";
8271         snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8272         fd = mkstemp(filename);
8273         if (fd < 0) {
8274             return fd;
8275         }
8276         unlink(filename);
8277 
8278         if ((r = fake_open->fill(cpu_env, fd))) {
8279             int e = errno;
8280             close(fd);
8281             errno = e;
8282             return r;
8283         }
8284         lseek(fd, 0, SEEK_SET);
8285 
8286         return fd;
8287     }
8288 
8289     return safe_openat(dirfd, path(pathname), flags, mode);
8290 }
8291 
8292 #define TIMER_MAGIC 0x0caf0000
8293 #define TIMER_MAGIC_MASK 0xffff0000
8294 
8295 /* Convert QEMU provided timer ID back to internal 16bit index format */
8296 static target_timer_t get_timer_id(abi_long arg)
8297 {
8298     target_timer_t timerid = arg;
8299 
8300     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8301         return -TARGET_EINVAL;
8302     }
8303 
8304     timerid &= 0xffff;
8305 
8306     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8307         return -TARGET_EINVAL;
8308     }
8309 
8310     return timerid;
8311 }
8312 
8313 static int target_to_host_cpu_mask(unsigned long *host_mask,
8314                                    size_t host_size,
8315                                    abi_ulong target_addr,
8316                                    size_t target_size)
8317 {
8318     unsigned target_bits = sizeof(abi_ulong) * 8;
8319     unsigned host_bits = sizeof(*host_mask) * 8;
8320     abi_ulong *target_mask;
8321     unsigned i, j;
8322 
8323     assert(host_size >= target_size);
8324 
8325     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8326     if (!target_mask) {
8327         return -TARGET_EFAULT;
8328     }
8329     memset(host_mask, 0, host_size);
8330 
8331     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8332         unsigned bit = i * target_bits;
8333         abi_ulong val;
8334 
8335         __get_user(val, &target_mask[i]);
8336         for (j = 0; j < target_bits; j++, bit++) {
8337             if (val & (1UL << j)) {
8338                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8339             }
8340         }
8341     }
8342 
8343     unlock_user(target_mask, target_addr, 0);
8344     return 0;
8345 }
8346 
8347 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8348                                    size_t host_size,
8349                                    abi_ulong target_addr,
8350                                    size_t target_size)
8351 {
8352     unsigned target_bits = sizeof(abi_ulong) * 8;
8353     unsigned host_bits = sizeof(*host_mask) * 8;
8354     abi_ulong *target_mask;
8355     unsigned i, j;
8356 
8357     assert(host_size >= target_size);
8358 
8359     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8360     if (!target_mask) {
8361         return -TARGET_EFAULT;
8362     }
8363 
8364     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8365         unsigned bit = i * target_bits;
8366         abi_ulong val = 0;
8367 
8368         for (j = 0; j < target_bits; j++, bit++) {
8369             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8370                 val |= 1UL << j;
8371             }
8372         }
8373         __put_user(val, &target_mask[i]);
8374     }
8375 
8376     unlock_user(target_mask, target_addr, target_size);
8377     return 0;
8378 }
8379 
8380 #ifdef TARGET_NR_getdents
8381 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8382 {
8383     g_autofree void *hdirp = NULL;
8384     void *tdirp;
8385     int hlen, hoff, toff;
8386     int hreclen, treclen;
8387     off64_t prev_diroff = 0;
8388 
8389     hdirp = g_try_malloc(count);
8390     if (!hdirp) {
8391         return -TARGET_ENOMEM;
8392     }
8393 
8394 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8395     hlen = sys_getdents(dirfd, hdirp, count);
8396 #else
8397     hlen = sys_getdents64(dirfd, hdirp, count);
8398 #endif
8399 
8400     hlen = get_errno(hlen);
8401     if (is_error(hlen)) {
8402         return hlen;
8403     }
8404 
8405     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8406     if (!tdirp) {
8407         return -TARGET_EFAULT;
8408     }
8409 
8410     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8411 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8412         struct linux_dirent *hde = hdirp + hoff;
8413 #else
8414         struct linux_dirent64 *hde = hdirp + hoff;
8415 #endif
8416         struct target_dirent *tde = tdirp + toff;
8417         int namelen;
8418         uint8_t type;
8419 
8420         namelen = strlen(hde->d_name);
8421         hreclen = hde->d_reclen;
8422         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8423         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8424 
8425         if (toff + treclen > count) {
8426             /*
8427              * If the host struct is smaller than the target struct, or
8428              * requires less alignment and thus packs into less space,
8429              * then the host can return more entries than we can pass
8430              * on to the guest.
8431              */
8432             if (toff == 0) {
8433                 toff = -TARGET_EINVAL; /* result buffer is too small */
8434                 break;
8435             }
8436             /*
8437              * Return what we have, resetting the file pointer to the
8438              * location of the first record not returned.
8439              */
8440             lseek64(dirfd, prev_diroff, SEEK_SET);
8441             break;
8442         }
8443 
8444         prev_diroff = hde->d_off;
8445         tde->d_ino = tswapal(hde->d_ino);
8446         tde->d_off = tswapal(hde->d_off);
8447         tde->d_reclen = tswap16(treclen);
8448         memcpy(tde->d_name, hde->d_name, namelen + 1);
8449 
8450         /*
8451          * The getdents type is in what was formerly a padding byte at the
8452          * end of the structure.
8453          */
8454 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8455         type = *((uint8_t *)hde + hreclen - 1);
8456 #else
8457         type = hde->d_type;
8458 #endif
8459         *((uint8_t *)tde + treclen - 1) = type;
8460     }
8461 
8462     unlock_user(tdirp, arg2, toff);
8463     return toff;
8464 }
8465 #endif /* TARGET_NR_getdents */
8466 
8467 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8468 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8469 {
8470     g_autofree void *hdirp = NULL;
8471     void *tdirp;
8472     int hlen, hoff, toff;
8473     int hreclen, treclen;
8474     off64_t prev_diroff = 0;
8475 
8476     hdirp = g_try_malloc(count);
8477     if (!hdirp) {
8478         return -TARGET_ENOMEM;
8479     }
8480 
8481     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8482     if (is_error(hlen)) {
8483         return hlen;
8484     }
8485 
8486     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8487     if (!tdirp) {
8488         return -TARGET_EFAULT;
8489     }
8490 
8491     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8492         struct linux_dirent64 *hde = hdirp + hoff;
8493         struct target_dirent64 *tde = tdirp + toff;
8494         int namelen;
8495 
8496         namelen = strlen(hde->d_name) + 1;
8497         hreclen = hde->d_reclen;
8498         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8499         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8500 
8501         if (toff + treclen > count) {
8502             /*
8503              * If the host struct is smaller than the target struct, or
8504              * requires less alignment and thus packs into less space,
8505              * then the host can return more entries than we can pass
8506              * on to the guest.
8507              */
8508             if (toff == 0) {
8509                 toff = -TARGET_EINVAL; /* result buffer is too small */
8510                 break;
8511             }
8512             /*
8513              * Return what we have, resetting the file pointer to the
8514              * location of the first record not returned.
8515              */
8516             lseek64(dirfd, prev_diroff, SEEK_SET);
8517             break;
8518         }
8519 
8520         prev_diroff = hde->d_off;
8521         tde->d_ino = tswap64(hde->d_ino);
8522         tde->d_off = tswap64(hde->d_off);
8523         tde->d_reclen = tswap16(treclen);
8524         tde->d_type = hde->d_type;
8525         memcpy(tde->d_name, hde->d_name, namelen);
8526     }
8527 
8528     unlock_user(tdirp, arg2, toff);
8529     return toff;
8530 }
8531 #endif /* TARGET_NR_getdents64 */
8532 
8533 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
8534 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
8535 #endif
8536 
8537 /* This is an internal helper for do_syscall so that it is easier
8538  * to have a single return point, so that actions, such as logging
8539  * of syscall results, can be performed.
8540  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
8541  */
8542 static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1,
8543                             abi_long arg2, abi_long arg3, abi_long arg4,
8544                             abi_long arg5, abi_long arg6, abi_long arg7,
8545                             abi_long arg8)
8546 {
8547     CPUState *cpu = env_cpu(cpu_env);
8548     abi_long ret;
8549 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
8550     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
8551     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
8552     || defined(TARGET_NR_statx)
8553     struct stat st;
8554 #endif
8555 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
8556     || defined(TARGET_NR_fstatfs)
8557     struct statfs stfs;
8558 #endif
8559     void *p;
8560 
8561     switch(num) {
8562     case TARGET_NR_exit:
8563         /* In old applications this may be used to implement _exit(2).
8564            However in threaded applications it is used for thread termination,
8565            and _exit_group is used for application termination.
8566            Do thread termination if we have more then one thread.  */
8567 
8568         if (block_signals()) {
8569             return -QEMU_ERESTARTSYS;
8570         }
8571 
8572         pthread_mutex_lock(&clone_lock);
8573 
8574         if (CPU_NEXT(first_cpu)) {
8575             TaskState *ts = cpu->opaque;
8576 
8577             object_property_set_bool(OBJECT(cpu), "realized", false, NULL);
8578             object_unref(OBJECT(cpu));
8579             /*
8580              * At this point the CPU should be unrealized and removed
8581              * from cpu lists. We can clean-up the rest of the thread
8582              * data without the lock held.
8583              */
8584 
8585             pthread_mutex_unlock(&clone_lock);
8586 
8587             if (ts->child_tidptr) {
8588                 put_user_u32(0, ts->child_tidptr);
8589                 do_sys_futex(g2h(cpu, ts->child_tidptr),
8590                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
8591             }
8592             thread_cpu = NULL;
8593             g_free(ts);
8594             rcu_unregister_thread();
8595             pthread_exit(NULL);
8596         }
8597 
8598         pthread_mutex_unlock(&clone_lock);
8599         preexit_cleanup(cpu_env, arg1);
8600         _exit(arg1);
8601         return 0; /* avoid warning */
8602     case TARGET_NR_read:
8603         if (arg2 == 0 && arg3 == 0) {
8604             return get_errno(safe_read(arg1, 0, 0));
8605         } else {
8606             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
8607                 return -TARGET_EFAULT;
8608             ret = get_errno(safe_read(arg1, p, arg3));
8609             if (ret >= 0 &&
8610                 fd_trans_host_to_target_data(arg1)) {
8611                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
8612             }
8613             unlock_user(p, arg2, ret);
8614         }
8615         return ret;
8616     case TARGET_NR_write:
8617         if (arg2 == 0 && arg3 == 0) {
8618             return get_errno(safe_write(arg1, 0, 0));
8619         }
8620         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
8621             return -TARGET_EFAULT;
8622         if (fd_trans_target_to_host_data(arg1)) {
8623             void *copy = g_malloc(arg3);
8624             memcpy(copy, p, arg3);
8625             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
8626             if (ret >= 0) {
8627                 ret = get_errno(safe_write(arg1, copy, ret));
8628             }
8629             g_free(copy);
8630         } else {
8631             ret = get_errno(safe_write(arg1, p, arg3));
8632         }
8633         unlock_user(p, arg2, 0);
8634         return ret;
8635 
8636 #ifdef TARGET_NR_open
8637     case TARGET_NR_open:
8638         if (!(p = lock_user_string(arg1)))
8639             return -TARGET_EFAULT;
8640         ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
8641                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
8642                                   arg3));
8643         fd_trans_unregister(ret);
8644         unlock_user(p, arg1, 0);
8645         return ret;
8646 #endif
8647     case TARGET_NR_openat:
8648         if (!(p = lock_user_string(arg2)))
8649             return -TARGET_EFAULT;
8650         ret = get_errno(do_openat(cpu_env, arg1, p,
8651                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
8652                                   arg4));
8653         fd_trans_unregister(ret);
8654         unlock_user(p, arg2, 0);
8655         return ret;
8656 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8657     case TARGET_NR_name_to_handle_at:
8658         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
8659         return ret;
8660 #endif
8661 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8662     case TARGET_NR_open_by_handle_at:
8663         ret = do_open_by_handle_at(arg1, arg2, arg3);
8664         fd_trans_unregister(ret);
8665         return ret;
8666 #endif
8667     case TARGET_NR_close:
8668         fd_trans_unregister(arg1);
8669         return get_errno(close(arg1));
8670 
8671     case TARGET_NR_brk:
8672         return do_brk(arg1);
8673 #ifdef TARGET_NR_fork
8674     case TARGET_NR_fork:
8675         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
8676 #endif
8677 #ifdef TARGET_NR_waitpid
8678     case TARGET_NR_waitpid:
8679         {
8680             int status;
8681             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
8682             if (!is_error(ret) && arg2 && ret
8683                 && put_user_s32(host_to_target_waitstatus(status), arg2))
8684                 return -TARGET_EFAULT;
8685         }
8686         return ret;
8687 #endif
8688 #ifdef TARGET_NR_waitid
8689     case TARGET_NR_waitid:
8690         {
8691             siginfo_t info;
8692             info.si_pid = 0;
8693             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
8694             if (!is_error(ret) && arg3 && info.si_pid != 0) {
8695                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
8696                     return -TARGET_EFAULT;
8697                 host_to_target_siginfo(p, &info);
8698                 unlock_user(p, arg3, sizeof(target_siginfo_t));
8699             }
8700         }
8701         return ret;
8702 #endif
8703 #ifdef TARGET_NR_creat /* not on alpha */
8704     case TARGET_NR_creat:
8705         if (!(p = lock_user_string(arg1)))
8706             return -TARGET_EFAULT;
8707         ret = get_errno(creat(p, arg2));
8708         fd_trans_unregister(ret);
8709         unlock_user(p, arg1, 0);
8710         return ret;
8711 #endif
8712 #ifdef TARGET_NR_link
8713     case TARGET_NR_link:
8714         {
8715             void * p2;
8716             p = lock_user_string(arg1);
8717             p2 = lock_user_string(arg2);
8718             if (!p || !p2)
8719                 ret = -TARGET_EFAULT;
8720             else
8721                 ret = get_errno(link(p, p2));
8722             unlock_user(p2, arg2, 0);
8723             unlock_user(p, arg1, 0);
8724         }
8725         return ret;
8726 #endif
8727 #if defined(TARGET_NR_linkat)
8728     case TARGET_NR_linkat:
8729         {
8730             void * p2 = NULL;
8731             if (!arg2 || !arg4)
8732                 return -TARGET_EFAULT;
8733             p  = lock_user_string(arg2);
8734             p2 = lock_user_string(arg4);
8735             if (!p || !p2)
8736                 ret = -TARGET_EFAULT;
8737             else
8738                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
8739             unlock_user(p, arg2, 0);
8740             unlock_user(p2, arg4, 0);
8741         }
8742         return ret;
8743 #endif
8744 #ifdef TARGET_NR_unlink
8745     case TARGET_NR_unlink:
8746         if (!(p = lock_user_string(arg1)))
8747             return -TARGET_EFAULT;
8748         ret = get_errno(unlink(p));
8749         unlock_user(p, arg1, 0);
8750         return ret;
8751 #endif
8752 #if defined(TARGET_NR_unlinkat)
8753     case TARGET_NR_unlinkat:
8754         if (!(p = lock_user_string(arg2)))
8755             return -TARGET_EFAULT;
8756         ret = get_errno(unlinkat(arg1, p, arg3));
8757         unlock_user(p, arg2, 0);
8758         return ret;
8759 #endif
8760     case TARGET_NR_execve:
8761         {
8762             char **argp, **envp;
8763             int argc, envc;
8764             abi_ulong gp;
8765             abi_ulong guest_argp;
8766             abi_ulong guest_envp;
8767             abi_ulong addr;
8768             char **q;
8769 
8770             argc = 0;
8771             guest_argp = arg2;
8772             for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8773                 if (get_user_ual(addr, gp))
8774                     return -TARGET_EFAULT;
8775                 if (!addr)
8776                     break;
8777                 argc++;
8778             }
8779             envc = 0;
8780             guest_envp = arg3;
8781             for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8782                 if (get_user_ual(addr, gp))
8783                     return -TARGET_EFAULT;
8784                 if (!addr)
8785                     break;
8786                 envc++;
8787             }
8788 
8789             argp = g_new0(char *, argc + 1);
8790             envp = g_new0(char *, envc + 1);
8791 
8792             for (gp = guest_argp, q = argp; gp;
8793                   gp += sizeof(abi_ulong), q++) {
8794                 if (get_user_ual(addr, gp))
8795                     goto execve_efault;
8796                 if (!addr)
8797                     break;
8798                 if (!(*q = lock_user_string(addr)))
8799                     goto execve_efault;
8800             }
8801             *q = NULL;
8802 
8803             for (gp = guest_envp, q = envp; gp;
8804                   gp += sizeof(abi_ulong), q++) {
8805                 if (get_user_ual(addr, gp))
8806                     goto execve_efault;
8807                 if (!addr)
8808                     break;
8809                 if (!(*q = lock_user_string(addr)))
8810                     goto execve_efault;
8811             }
8812             *q = NULL;
8813 
8814             if (!(p = lock_user_string(arg1)))
8815                 goto execve_efault;
8816             /* Although execve() is not an interruptible syscall it is
8817              * a special case where we must use the safe_syscall wrapper:
8818              * if we allow a signal to happen before we make the host
8819              * syscall then we will 'lose' it, because at the point of
8820              * execve the process leaves QEMU's control. So we use the
8821              * safe syscall wrapper to ensure that we either take the
8822              * signal as a guest signal, or else it does not happen
8823              * before the execve completes and makes it the other
8824              * program's problem.
8825              */
8826             ret = get_errno(safe_execve(p, argp, envp));
8827             unlock_user(p, arg1, 0);
8828 
8829             goto execve_end;
8830 
8831         execve_efault:
8832             ret = -TARGET_EFAULT;
8833 
8834         execve_end:
8835             for (gp = guest_argp, q = argp; *q;
8836                   gp += sizeof(abi_ulong), q++) {
8837                 if (get_user_ual(addr, gp)
8838                     || !addr)
8839                     break;
8840                 unlock_user(*q, addr, 0);
8841             }
8842             for (gp = guest_envp, q = envp; *q;
8843                   gp += sizeof(abi_ulong), q++) {
8844                 if (get_user_ual(addr, gp)
8845                     || !addr)
8846                     break;
8847                 unlock_user(*q, addr, 0);
8848             }
8849 
8850             g_free(argp);
8851             g_free(envp);
8852         }
8853         return ret;
8854     case TARGET_NR_chdir:
8855         if (!(p = lock_user_string(arg1)))
8856             return -TARGET_EFAULT;
8857         ret = get_errno(chdir(p));
8858         unlock_user(p, arg1, 0);
8859         return ret;
8860 #ifdef TARGET_NR_time
8861     case TARGET_NR_time:
8862         {
8863             time_t host_time;
8864             ret = get_errno(time(&host_time));
8865             if (!is_error(ret)
8866                 && arg1
8867                 && put_user_sal(host_time, arg1))
8868                 return -TARGET_EFAULT;
8869         }
8870         return ret;
8871 #endif
8872 #ifdef TARGET_NR_mknod
8873     case TARGET_NR_mknod:
8874         if (!(p = lock_user_string(arg1)))
8875             return -TARGET_EFAULT;
8876         ret = get_errno(mknod(p, arg2, arg3));
8877         unlock_user(p, arg1, 0);
8878         return ret;
8879 #endif
8880 #if defined(TARGET_NR_mknodat)
8881     case TARGET_NR_mknodat:
8882         if (!(p = lock_user_string(arg2)))
8883             return -TARGET_EFAULT;
8884         ret = get_errno(mknodat(arg1, p, arg3, arg4));
8885         unlock_user(p, arg2, 0);
8886         return ret;
8887 #endif
8888 #ifdef TARGET_NR_chmod
8889     case TARGET_NR_chmod:
8890         if (!(p = lock_user_string(arg1)))
8891             return -TARGET_EFAULT;
8892         ret = get_errno(chmod(p, arg2));
8893         unlock_user(p, arg1, 0);
8894         return ret;
8895 #endif
8896 #ifdef TARGET_NR_lseek
8897     case TARGET_NR_lseek:
8898         return get_errno(lseek(arg1, arg2, arg3));
8899 #endif
8900 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
8901     /* Alpha specific */
8902     case TARGET_NR_getxpid:
8903         ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
8904         return get_errno(getpid());
8905 #endif
8906 #ifdef TARGET_NR_getpid
8907     case TARGET_NR_getpid:
8908         return get_errno(getpid());
8909 #endif
8910     case TARGET_NR_mount:
8911         {
8912             /* need to look at the data field */
8913             void *p2, *p3;
8914 
8915             if (arg1) {
8916                 p = lock_user_string(arg1);
8917                 if (!p) {
8918                     return -TARGET_EFAULT;
8919                 }
8920             } else {
8921                 p = NULL;
8922             }
8923 
8924             p2 = lock_user_string(arg2);
8925             if (!p2) {
8926                 if (arg1) {
8927                     unlock_user(p, arg1, 0);
8928                 }
8929                 return -TARGET_EFAULT;
8930             }
8931 
8932             if (arg3) {
8933                 p3 = lock_user_string(arg3);
8934                 if (!p3) {
8935                     if (arg1) {
8936                         unlock_user(p, arg1, 0);
8937                     }
8938                     unlock_user(p2, arg2, 0);
8939                     return -TARGET_EFAULT;
8940                 }
8941             } else {
8942                 p3 = NULL;
8943             }
8944 
8945             /* FIXME - arg5 should be locked, but it isn't clear how to
8946              * do that since it's not guaranteed to be a NULL-terminated
8947              * string.
8948              */
8949             if (!arg5) {
8950                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
8951             } else {
8952                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
8953             }
8954             ret = get_errno(ret);
8955 
8956             if (arg1) {
8957                 unlock_user(p, arg1, 0);
8958             }
8959             unlock_user(p2, arg2, 0);
8960             if (arg3) {
8961                 unlock_user(p3, arg3, 0);
8962             }
8963         }
8964         return ret;
8965 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
8966 #if defined(TARGET_NR_umount)
8967     case TARGET_NR_umount:
8968 #endif
8969 #if defined(TARGET_NR_oldumount)
8970     case TARGET_NR_oldumount:
8971 #endif
8972         if (!(p = lock_user_string(arg1)))
8973             return -TARGET_EFAULT;
8974         ret = get_errno(umount(p));
8975         unlock_user(p, arg1, 0);
8976         return ret;
8977 #endif
8978 #ifdef TARGET_NR_stime /* not on alpha */
8979     case TARGET_NR_stime:
8980         {
8981             struct timespec ts;
8982             ts.tv_nsec = 0;
8983             if (get_user_sal(ts.tv_sec, arg1)) {
8984                 return -TARGET_EFAULT;
8985             }
8986             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
8987         }
8988 #endif
8989 #ifdef TARGET_NR_alarm /* not on alpha */
8990     case TARGET_NR_alarm:
8991         return alarm(arg1);
8992 #endif
8993 #ifdef TARGET_NR_pause /* not on alpha */
8994     case TARGET_NR_pause:
8995         if (!block_signals()) {
8996             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
8997         }
8998         return -TARGET_EINTR;
8999 #endif
9000 #ifdef TARGET_NR_utime
9001     case TARGET_NR_utime:
9002         {
9003             struct utimbuf tbuf, *host_tbuf;
9004             struct target_utimbuf *target_tbuf;
9005             if (arg2) {
9006                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9007                     return -TARGET_EFAULT;
9008                 tbuf.actime = tswapal(target_tbuf->actime);
9009                 tbuf.modtime = tswapal(target_tbuf->modtime);
9010                 unlock_user_struct(target_tbuf, arg2, 0);
9011                 host_tbuf = &tbuf;
9012             } else {
9013                 host_tbuf = NULL;
9014             }
9015             if (!(p = lock_user_string(arg1)))
9016                 return -TARGET_EFAULT;
9017             ret = get_errno(utime(p, host_tbuf));
9018             unlock_user(p, arg1, 0);
9019         }
9020         return ret;
9021 #endif
9022 #ifdef TARGET_NR_utimes
9023     case TARGET_NR_utimes:
9024         {
9025             struct timeval *tvp, tv[2];
9026             if (arg2) {
9027                 if (copy_from_user_timeval(&tv[0], arg2)
9028                     || copy_from_user_timeval(&tv[1],
9029                                               arg2 + sizeof(struct target_timeval)))
9030                     return -TARGET_EFAULT;
9031                 tvp = tv;
9032             } else {
9033                 tvp = NULL;
9034             }
9035             if (!(p = lock_user_string(arg1)))
9036                 return -TARGET_EFAULT;
9037             ret = get_errno(utimes(p, tvp));
9038             unlock_user(p, arg1, 0);
9039         }
9040         return ret;
9041 #endif
9042 #if defined(TARGET_NR_futimesat)
9043     case TARGET_NR_futimesat:
9044         {
9045             struct timeval *tvp, tv[2];
9046             if (arg3) {
9047                 if (copy_from_user_timeval(&tv[0], arg3)
9048                     || copy_from_user_timeval(&tv[1],
9049                                               arg3 + sizeof(struct target_timeval)))
9050                     return -TARGET_EFAULT;
9051                 tvp = tv;
9052             } else {
9053                 tvp = NULL;
9054             }
9055             if (!(p = lock_user_string(arg2))) {
9056                 return -TARGET_EFAULT;
9057             }
9058             ret = get_errno(futimesat(arg1, path(p), tvp));
9059             unlock_user(p, arg2, 0);
9060         }
9061         return ret;
9062 #endif
9063 #ifdef TARGET_NR_access
9064     case TARGET_NR_access:
9065         if (!(p = lock_user_string(arg1))) {
9066             return -TARGET_EFAULT;
9067         }
9068         ret = get_errno(access(path(p), arg2));
9069         unlock_user(p, arg1, 0);
9070         return ret;
9071 #endif
9072 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9073     case TARGET_NR_faccessat:
9074         if (!(p = lock_user_string(arg2))) {
9075             return -TARGET_EFAULT;
9076         }
9077         ret = get_errno(faccessat(arg1, p, arg3, 0));
9078         unlock_user(p, arg2, 0);
9079         return ret;
9080 #endif
9081 #ifdef TARGET_NR_nice /* not on alpha */
9082     case TARGET_NR_nice:
9083         return get_errno(nice(arg1));
9084 #endif
9085     case TARGET_NR_sync:
9086         sync();
9087         return 0;
9088 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9089     case TARGET_NR_syncfs:
9090         return get_errno(syncfs(arg1));
9091 #endif
9092     case TARGET_NR_kill:
9093         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9094 #ifdef TARGET_NR_rename
9095     case TARGET_NR_rename:
9096         {
9097             void *p2;
9098             p = lock_user_string(arg1);
9099             p2 = lock_user_string(arg2);
9100             if (!p || !p2)
9101                 ret = -TARGET_EFAULT;
9102             else
9103                 ret = get_errno(rename(p, p2));
9104             unlock_user(p2, arg2, 0);
9105             unlock_user(p, arg1, 0);
9106         }
9107         return ret;
9108 #endif
9109 #if defined(TARGET_NR_renameat)
9110     case TARGET_NR_renameat:
9111         {
9112             void *p2;
9113             p  = lock_user_string(arg2);
9114             p2 = lock_user_string(arg4);
9115             if (!p || !p2)
9116                 ret = -TARGET_EFAULT;
9117             else
9118                 ret = get_errno(renameat(arg1, p, arg3, p2));
9119             unlock_user(p2, arg4, 0);
9120             unlock_user(p, arg2, 0);
9121         }
9122         return ret;
9123 #endif
9124 #if defined(TARGET_NR_renameat2)
9125     case TARGET_NR_renameat2:
9126         {
9127             void *p2;
9128             p  = lock_user_string(arg2);
9129             p2 = lock_user_string(arg4);
9130             if (!p || !p2) {
9131                 ret = -TARGET_EFAULT;
9132             } else {
9133                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9134             }
9135             unlock_user(p2, arg4, 0);
9136             unlock_user(p, arg2, 0);
9137         }
9138         return ret;
9139 #endif
9140 #ifdef TARGET_NR_mkdir
9141     case TARGET_NR_mkdir:
9142         if (!(p = lock_user_string(arg1)))
9143             return -TARGET_EFAULT;
9144         ret = get_errno(mkdir(p, arg2));
9145         unlock_user(p, arg1, 0);
9146         return ret;
9147 #endif
9148 #if defined(TARGET_NR_mkdirat)
9149     case TARGET_NR_mkdirat:
9150         if (!(p = lock_user_string(arg2)))
9151             return -TARGET_EFAULT;
9152         ret = get_errno(mkdirat(arg1, p, arg3));
9153         unlock_user(p, arg2, 0);
9154         return ret;
9155 #endif
9156 #ifdef TARGET_NR_rmdir
9157     case TARGET_NR_rmdir:
9158         if (!(p = lock_user_string(arg1)))
9159             return -TARGET_EFAULT;
9160         ret = get_errno(rmdir(p));
9161         unlock_user(p, arg1, 0);
9162         return ret;
9163 #endif
9164     case TARGET_NR_dup:
9165         ret = get_errno(dup(arg1));
9166         if (ret >= 0) {
9167             fd_trans_dup(arg1, ret);
9168         }
9169         return ret;
9170 #ifdef TARGET_NR_pipe
9171     case TARGET_NR_pipe:
9172         return do_pipe(cpu_env, arg1, 0, 0);
9173 #endif
9174 #ifdef TARGET_NR_pipe2
9175     case TARGET_NR_pipe2:
9176         return do_pipe(cpu_env, arg1,
9177                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9178 #endif
9179     case TARGET_NR_times:
9180         {
9181             struct target_tms *tmsp;
9182             struct tms tms;
9183             ret = get_errno(times(&tms));
9184             if (arg1) {
9185                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9186                 if (!tmsp)
9187                     return -TARGET_EFAULT;
9188                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9189                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9190                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9191                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9192             }
9193             if (!is_error(ret))
9194                 ret = host_to_target_clock_t(ret);
9195         }
9196         return ret;
9197     case TARGET_NR_acct:
9198         if (arg1 == 0) {
9199             ret = get_errno(acct(NULL));
9200         } else {
9201             if (!(p = lock_user_string(arg1))) {
9202                 return -TARGET_EFAULT;
9203             }
9204             ret = get_errno(acct(path(p)));
9205             unlock_user(p, arg1, 0);
9206         }
9207         return ret;
9208 #ifdef TARGET_NR_umount2
9209     case TARGET_NR_umount2:
9210         if (!(p = lock_user_string(arg1)))
9211             return -TARGET_EFAULT;
9212         ret = get_errno(umount2(p, arg2));
9213         unlock_user(p, arg1, 0);
9214         return ret;
9215 #endif
9216     case TARGET_NR_ioctl:
9217         return do_ioctl(arg1, arg2, arg3);
9218 #ifdef TARGET_NR_fcntl
9219     case TARGET_NR_fcntl:
9220         return do_fcntl(arg1, arg2, arg3);
9221 #endif
9222     case TARGET_NR_setpgid:
9223         return get_errno(setpgid(arg1, arg2));
9224     case TARGET_NR_umask:
9225         return get_errno(umask(arg1));
9226     case TARGET_NR_chroot:
9227         if (!(p = lock_user_string(arg1)))
9228             return -TARGET_EFAULT;
9229         ret = get_errno(chroot(p));
9230         unlock_user(p, arg1, 0);
9231         return ret;
9232 #ifdef TARGET_NR_dup2
9233     case TARGET_NR_dup2:
9234         ret = get_errno(dup2(arg1, arg2));
9235         if (ret >= 0) {
9236             fd_trans_dup(arg1, arg2);
9237         }
9238         return ret;
9239 #endif
9240 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9241     case TARGET_NR_dup3:
9242     {
9243         int host_flags;
9244 
9245         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9246             return -EINVAL;
9247         }
9248         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9249         ret = get_errno(dup3(arg1, arg2, host_flags));
9250         if (ret >= 0) {
9251             fd_trans_dup(arg1, arg2);
9252         }
9253         return ret;
9254     }
9255 #endif
9256 #ifdef TARGET_NR_getppid /* not on alpha */
9257     case TARGET_NR_getppid:
9258         return get_errno(getppid());
9259 #endif
9260 #ifdef TARGET_NR_getpgrp
9261     case TARGET_NR_getpgrp:
9262         return get_errno(getpgrp());
9263 #endif
9264     case TARGET_NR_setsid:
9265         return get_errno(setsid());
9266 #ifdef TARGET_NR_sigaction
9267     case TARGET_NR_sigaction:
9268         {
9269 #if defined(TARGET_MIPS)
9270 	    struct target_sigaction act, oact, *pact, *old_act;
9271 
9272 	    if (arg2) {
9273                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9274                     return -TARGET_EFAULT;
9275 		act._sa_handler = old_act->_sa_handler;
9276 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9277 		act.sa_flags = old_act->sa_flags;
9278 		unlock_user_struct(old_act, arg2, 0);
9279 		pact = &act;
9280 	    } else {
9281 		pact = NULL;
9282 	    }
9283 
9284         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9285 
9286 	    if (!is_error(ret) && arg3) {
9287                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9288                     return -TARGET_EFAULT;
9289 		old_act->_sa_handler = oact._sa_handler;
9290 		old_act->sa_flags = oact.sa_flags;
9291 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9292 		old_act->sa_mask.sig[1] = 0;
9293 		old_act->sa_mask.sig[2] = 0;
9294 		old_act->sa_mask.sig[3] = 0;
9295 		unlock_user_struct(old_act, arg3, 1);
9296 	    }
9297 #else
9298             struct target_old_sigaction *old_act;
9299             struct target_sigaction act, oact, *pact;
9300             if (arg2) {
9301                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9302                     return -TARGET_EFAULT;
9303                 act._sa_handler = old_act->_sa_handler;
9304                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9305                 act.sa_flags = old_act->sa_flags;
9306 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9307                 act.sa_restorer = old_act->sa_restorer;
9308 #endif
9309                 unlock_user_struct(old_act, arg2, 0);
9310                 pact = &act;
9311             } else {
9312                 pact = NULL;
9313             }
9314             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9315             if (!is_error(ret) && arg3) {
9316                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9317                     return -TARGET_EFAULT;
9318                 old_act->_sa_handler = oact._sa_handler;
9319                 old_act->sa_mask = oact.sa_mask.sig[0];
9320                 old_act->sa_flags = oact.sa_flags;
9321 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9322                 old_act->sa_restorer = oact.sa_restorer;
9323 #endif
9324                 unlock_user_struct(old_act, arg3, 1);
9325             }
9326 #endif
9327         }
9328         return ret;
9329 #endif
9330     case TARGET_NR_rt_sigaction:
9331         {
9332             /*
9333              * For Alpha and SPARC this is a 5 argument syscall, with
9334              * a 'restorer' parameter which must be copied into the
9335              * sa_restorer field of the sigaction struct.
9336              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9337              * and arg5 is the sigsetsize.
9338              */
9339 #if defined(TARGET_ALPHA)
9340             target_ulong sigsetsize = arg4;
9341             target_ulong restorer = arg5;
9342 #elif defined(TARGET_SPARC)
9343             target_ulong restorer = arg4;
9344             target_ulong sigsetsize = arg5;
9345 #else
9346             target_ulong sigsetsize = arg4;
9347             target_ulong restorer = 0;
9348 #endif
9349             struct target_sigaction *act = NULL;
9350             struct target_sigaction *oact = NULL;
9351 
9352             if (sigsetsize != sizeof(target_sigset_t)) {
9353                 return -TARGET_EINVAL;
9354             }
9355             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9356                 return -TARGET_EFAULT;
9357             }
9358             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9359                 ret = -TARGET_EFAULT;
9360             } else {
9361                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9362                 if (oact) {
9363                     unlock_user_struct(oact, arg3, 1);
9364                 }
9365             }
9366             if (act) {
9367                 unlock_user_struct(act, arg2, 0);
9368             }
9369         }
9370         return ret;
9371 #ifdef TARGET_NR_sgetmask /* not on alpha */
9372     case TARGET_NR_sgetmask:
9373         {
9374             sigset_t cur_set;
9375             abi_ulong target_set;
9376             ret = do_sigprocmask(0, NULL, &cur_set);
9377             if (!ret) {
9378                 host_to_target_old_sigset(&target_set, &cur_set);
9379                 ret = target_set;
9380             }
9381         }
9382         return ret;
9383 #endif
9384 #ifdef TARGET_NR_ssetmask /* not on alpha */
9385     case TARGET_NR_ssetmask:
9386         {
9387             sigset_t set, oset;
9388             abi_ulong target_set = arg1;
9389             target_to_host_old_sigset(&set, &target_set);
9390             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
9391             if (!ret) {
9392                 host_to_target_old_sigset(&target_set, &oset);
9393                 ret = target_set;
9394             }
9395         }
9396         return ret;
9397 #endif
9398 #ifdef TARGET_NR_sigprocmask
9399     case TARGET_NR_sigprocmask:
9400         {
9401 #if defined(TARGET_ALPHA)
9402             sigset_t set, oldset;
9403             abi_ulong mask;
9404             int how;
9405 
9406             switch (arg1) {
9407             case TARGET_SIG_BLOCK:
9408                 how = SIG_BLOCK;
9409                 break;
9410             case TARGET_SIG_UNBLOCK:
9411                 how = SIG_UNBLOCK;
9412                 break;
9413             case TARGET_SIG_SETMASK:
9414                 how = SIG_SETMASK;
9415                 break;
9416             default:
9417                 return -TARGET_EINVAL;
9418             }
9419             mask = arg2;
9420             target_to_host_old_sigset(&set, &mask);
9421 
9422             ret = do_sigprocmask(how, &set, &oldset);
9423             if (!is_error(ret)) {
9424                 host_to_target_old_sigset(&mask, &oldset);
9425                 ret = mask;
9426                 ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
9427             }
9428 #else
9429             sigset_t set, oldset, *set_ptr;
9430             int how;
9431 
9432             if (arg2) {
9433                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9434                 if (!p) {
9435                     return -TARGET_EFAULT;
9436                 }
9437                 target_to_host_old_sigset(&set, p);
9438                 unlock_user(p, arg2, 0);
9439                 set_ptr = &set;
9440                 switch (arg1) {
9441                 case TARGET_SIG_BLOCK:
9442                     how = SIG_BLOCK;
9443                     break;
9444                 case TARGET_SIG_UNBLOCK:
9445                     how = SIG_UNBLOCK;
9446                     break;
9447                 case TARGET_SIG_SETMASK:
9448                     how = SIG_SETMASK;
9449                     break;
9450                 default:
9451                     return -TARGET_EINVAL;
9452                 }
9453             } else {
9454                 how = 0;
9455                 set_ptr = NULL;
9456             }
9457             ret = do_sigprocmask(how, set_ptr, &oldset);
9458             if (!is_error(ret) && arg3) {
9459                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9460                     return -TARGET_EFAULT;
9461                 host_to_target_old_sigset(p, &oldset);
9462                 unlock_user(p, arg3, sizeof(target_sigset_t));
9463             }
9464 #endif
9465         }
9466         return ret;
9467 #endif
9468     case TARGET_NR_rt_sigprocmask:
9469         {
9470             int how = arg1;
9471             sigset_t set, oldset, *set_ptr;
9472 
9473             if (arg4 != sizeof(target_sigset_t)) {
9474                 return -TARGET_EINVAL;
9475             }
9476 
9477             if (arg2) {
9478                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
9479                 if (!p) {
9480                     return -TARGET_EFAULT;
9481                 }
9482                 target_to_host_sigset(&set, p);
9483                 unlock_user(p, arg2, 0);
9484                 set_ptr = &set;
9485                 switch(how) {
9486                 case TARGET_SIG_BLOCK:
9487                     how = SIG_BLOCK;
9488                     break;
9489                 case TARGET_SIG_UNBLOCK:
9490                     how = SIG_UNBLOCK;
9491                     break;
9492                 case TARGET_SIG_SETMASK:
9493                     how = SIG_SETMASK;
9494                     break;
9495                 default:
9496                     return -TARGET_EINVAL;
9497                 }
9498             } else {
9499                 how = 0;
9500                 set_ptr = NULL;
9501             }
9502             ret = do_sigprocmask(how, set_ptr, &oldset);
9503             if (!is_error(ret) && arg3) {
9504                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
9505                     return -TARGET_EFAULT;
9506                 host_to_target_sigset(p, &oldset);
9507                 unlock_user(p, arg3, sizeof(target_sigset_t));
9508             }
9509         }
9510         return ret;
9511 #ifdef TARGET_NR_sigpending
9512     case TARGET_NR_sigpending:
9513         {
9514             sigset_t set;
9515             ret = get_errno(sigpending(&set));
9516             if (!is_error(ret)) {
9517                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9518                     return -TARGET_EFAULT;
9519                 host_to_target_old_sigset(p, &set);
9520                 unlock_user(p, arg1, sizeof(target_sigset_t));
9521             }
9522         }
9523         return ret;
9524 #endif
9525     case TARGET_NR_rt_sigpending:
9526         {
9527             sigset_t set;
9528 
9529             /* Yes, this check is >, not != like most. We follow the kernel's
9530              * logic and it does it like this because it implements
9531              * NR_sigpending through the same code path, and in that case
9532              * the old_sigset_t is smaller in size.
9533              */
9534             if (arg2 > sizeof(target_sigset_t)) {
9535                 return -TARGET_EINVAL;
9536             }
9537 
9538             ret = get_errno(sigpending(&set));
9539             if (!is_error(ret)) {
9540                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
9541                     return -TARGET_EFAULT;
9542                 host_to_target_sigset(p, &set);
9543                 unlock_user(p, arg1, sizeof(target_sigset_t));
9544             }
9545         }
9546         return ret;
9547 #ifdef TARGET_NR_sigsuspend
9548     case TARGET_NR_sigsuspend:
9549         {
9550             sigset_t *set;
9551 
9552 #if defined(TARGET_ALPHA)
9553             TaskState *ts = cpu->opaque;
9554             /* target_to_host_old_sigset will bswap back */
9555             abi_ulong mask = tswapal(arg1);
9556             set = &ts->sigsuspend_mask;
9557             target_to_host_old_sigset(set, &mask);
9558 #else
9559             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
9560             if (ret != 0) {
9561                 return ret;
9562             }
9563 #endif
9564             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9565             finish_sigsuspend_mask(ret);
9566         }
9567         return ret;
9568 #endif
9569     case TARGET_NR_rt_sigsuspend:
9570         {
9571             sigset_t *set;
9572 
9573             ret = process_sigsuspend_mask(&set, arg1, arg2);
9574             if (ret != 0) {
9575                 return ret;
9576             }
9577             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
9578             finish_sigsuspend_mask(ret);
9579         }
9580         return ret;
9581 #ifdef TARGET_NR_rt_sigtimedwait
9582     case TARGET_NR_rt_sigtimedwait:
9583         {
9584             sigset_t set;
9585             struct timespec uts, *puts;
9586             siginfo_t uinfo;
9587 
9588             if (arg4 != sizeof(target_sigset_t)) {
9589                 return -TARGET_EINVAL;
9590             }
9591 
9592             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
9593                 return -TARGET_EFAULT;
9594             target_to_host_sigset(&set, p);
9595             unlock_user(p, arg1, 0);
9596             if (arg3) {
9597                 puts = &uts;
9598                 if (target_to_host_timespec(puts, arg3)) {
9599                     return -TARGET_EFAULT;
9600                 }
9601             } else {
9602                 puts = NULL;
9603             }
9604             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9605                                                  SIGSET_T_SIZE));
9606             if (!is_error(ret)) {
9607                 if (arg2) {
9608                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
9609                                   0);
9610                     if (!p) {
9611                         return -TARGET_EFAULT;
9612                     }
9613                     host_to_target_siginfo(p, &uinfo);
9614                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9615                 }
9616                 ret = host_to_target_signal(ret);
9617             }
9618         }
9619         return ret;
9620 #endif
9621 #ifdef TARGET_NR_rt_sigtimedwait_time64
9622     case TARGET_NR_rt_sigtimedwait_time64:
9623         {
9624             sigset_t set;
9625             struct timespec uts, *puts;
9626             siginfo_t uinfo;
9627 
9628             if (arg4 != sizeof(target_sigset_t)) {
9629                 return -TARGET_EINVAL;
9630             }
9631 
9632             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
9633             if (!p) {
9634                 return -TARGET_EFAULT;
9635             }
9636             target_to_host_sigset(&set, p);
9637             unlock_user(p, arg1, 0);
9638             if (arg3) {
9639                 puts = &uts;
9640                 if (target_to_host_timespec64(puts, arg3)) {
9641                     return -TARGET_EFAULT;
9642                 }
9643             } else {
9644                 puts = NULL;
9645             }
9646             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
9647                                                  SIGSET_T_SIZE));
9648             if (!is_error(ret)) {
9649                 if (arg2) {
9650                     p = lock_user(VERIFY_WRITE, arg2,
9651                                   sizeof(target_siginfo_t), 0);
9652                     if (!p) {
9653                         return -TARGET_EFAULT;
9654                     }
9655                     host_to_target_siginfo(p, &uinfo);
9656                     unlock_user(p, arg2, sizeof(target_siginfo_t));
9657                 }
9658                 ret = host_to_target_signal(ret);
9659             }
9660         }
9661         return ret;
9662 #endif
9663     case TARGET_NR_rt_sigqueueinfo:
9664         {
9665             siginfo_t uinfo;
9666 
9667             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9668             if (!p) {
9669                 return -TARGET_EFAULT;
9670             }
9671             target_to_host_siginfo(&uinfo, p);
9672             unlock_user(p, arg3, 0);
9673             ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
9674         }
9675         return ret;
9676     case TARGET_NR_rt_tgsigqueueinfo:
9677         {
9678             siginfo_t uinfo;
9679 
9680             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
9681             if (!p) {
9682                 return -TARGET_EFAULT;
9683             }
9684             target_to_host_siginfo(&uinfo, p);
9685             unlock_user(p, arg4, 0);
9686             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, arg3, &uinfo));
9687         }
9688         return ret;
9689 #ifdef TARGET_NR_sigreturn
9690     case TARGET_NR_sigreturn:
9691         if (block_signals()) {
9692             return -QEMU_ERESTARTSYS;
9693         }
9694         return do_sigreturn(cpu_env);
9695 #endif
9696     case TARGET_NR_rt_sigreturn:
9697         if (block_signals()) {
9698             return -QEMU_ERESTARTSYS;
9699         }
9700         return do_rt_sigreturn(cpu_env);
9701     case TARGET_NR_sethostname:
9702         if (!(p = lock_user_string(arg1)))
9703             return -TARGET_EFAULT;
9704         ret = get_errno(sethostname(p, arg2));
9705         unlock_user(p, arg1, 0);
9706         return ret;
9707 #ifdef TARGET_NR_setrlimit
9708     case TARGET_NR_setrlimit:
9709         {
9710             int resource = target_to_host_resource(arg1);
9711             struct target_rlimit *target_rlim;
9712             struct rlimit rlim;
9713             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
9714                 return -TARGET_EFAULT;
9715             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
9716             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
9717             unlock_user_struct(target_rlim, arg2, 0);
9718             /*
9719              * If we just passed through resource limit settings for memory then
9720              * they would also apply to QEMU's own allocations, and QEMU will
9721              * crash or hang or die if its allocations fail. Ideally we would
9722              * track the guest allocations in QEMU and apply the limits ourselves.
9723              * For now, just tell the guest the call succeeded but don't actually
9724              * limit anything.
9725              */
9726             if (resource != RLIMIT_AS &&
9727                 resource != RLIMIT_DATA &&
9728                 resource != RLIMIT_STACK) {
9729                 return get_errno(setrlimit(resource, &rlim));
9730             } else {
9731                 return 0;
9732             }
9733         }
9734 #endif
9735 #ifdef TARGET_NR_getrlimit
9736     case TARGET_NR_getrlimit:
9737         {
9738             int resource = target_to_host_resource(arg1);
9739             struct target_rlimit *target_rlim;
9740             struct rlimit rlim;
9741 
9742             ret = get_errno(getrlimit(resource, &rlim));
9743             if (!is_error(ret)) {
9744                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
9745                     return -TARGET_EFAULT;
9746                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
9747                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
9748                 unlock_user_struct(target_rlim, arg2, 1);
9749             }
9750         }
9751         return ret;
9752 #endif
9753     case TARGET_NR_getrusage:
9754         {
9755             struct rusage rusage;
9756             ret = get_errno(getrusage(arg1, &rusage));
9757             if (!is_error(ret)) {
9758                 ret = host_to_target_rusage(arg2, &rusage);
9759             }
9760         }
9761         return ret;
9762 #if defined(TARGET_NR_gettimeofday)
9763     case TARGET_NR_gettimeofday:
9764         {
9765             struct timeval tv;
9766             struct timezone tz;
9767 
9768             ret = get_errno(gettimeofday(&tv, &tz));
9769             if (!is_error(ret)) {
9770                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
9771                     return -TARGET_EFAULT;
9772                 }
9773                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
9774                     return -TARGET_EFAULT;
9775                 }
9776             }
9777         }
9778         return ret;
9779 #endif
9780 #if defined(TARGET_NR_settimeofday)
9781     case TARGET_NR_settimeofday:
9782         {
9783             struct timeval tv, *ptv = NULL;
9784             struct timezone tz, *ptz = NULL;
9785 
9786             if (arg1) {
9787                 if (copy_from_user_timeval(&tv, arg1)) {
9788                     return -TARGET_EFAULT;
9789                 }
9790                 ptv = &tv;
9791             }
9792 
9793             if (arg2) {
9794                 if (copy_from_user_timezone(&tz, arg2)) {
9795                     return -TARGET_EFAULT;
9796                 }
9797                 ptz = &tz;
9798             }
9799 
9800             return get_errno(settimeofday(ptv, ptz));
9801         }
9802 #endif
9803 #if defined(TARGET_NR_select)
9804     case TARGET_NR_select:
9805 #if defined(TARGET_WANT_NI_OLD_SELECT)
9806         /* some architectures used to have old_select here
9807          * but now ENOSYS it.
9808          */
9809         ret = -TARGET_ENOSYS;
9810 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
9811         ret = do_old_select(arg1);
9812 #else
9813         ret = do_select(arg1, arg2, arg3, arg4, arg5);
9814 #endif
9815         return ret;
9816 #endif
9817 #ifdef TARGET_NR_pselect6
9818     case TARGET_NR_pselect6:
9819         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
9820 #endif
9821 #ifdef TARGET_NR_pselect6_time64
9822     case TARGET_NR_pselect6_time64:
9823         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
9824 #endif
9825 #ifdef TARGET_NR_symlink
9826     case TARGET_NR_symlink:
9827         {
9828             void *p2;
9829             p = lock_user_string(arg1);
9830             p2 = lock_user_string(arg2);
9831             if (!p || !p2)
9832                 ret = -TARGET_EFAULT;
9833             else
9834                 ret = get_errno(symlink(p, p2));
9835             unlock_user(p2, arg2, 0);
9836             unlock_user(p, arg1, 0);
9837         }
9838         return ret;
9839 #endif
9840 #if defined(TARGET_NR_symlinkat)
9841     case TARGET_NR_symlinkat:
9842         {
9843             void *p2;
9844             p  = lock_user_string(arg1);
9845             p2 = lock_user_string(arg3);
9846             if (!p || !p2)
9847                 ret = -TARGET_EFAULT;
9848             else
9849                 ret = get_errno(symlinkat(p, arg2, p2));
9850             unlock_user(p2, arg3, 0);
9851             unlock_user(p, arg1, 0);
9852         }
9853         return ret;
9854 #endif
9855 #ifdef TARGET_NR_readlink
9856     case TARGET_NR_readlink:
9857         {
9858             void *p2;
9859             p = lock_user_string(arg1);
9860             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
9861             if (!p || !p2) {
9862                 ret = -TARGET_EFAULT;
9863             } else if (!arg3) {
9864                 /* Short circuit this for the magic exe check. */
9865                 ret = -TARGET_EINVAL;
9866             } else if (is_proc_myself((const char *)p, "exe")) {
9867                 char real[PATH_MAX], *temp;
9868                 temp = realpath(exec_path, real);
9869                 /* Return value is # of bytes that we wrote to the buffer. */
9870                 if (temp == NULL) {
9871                     ret = get_errno(-1);
9872                 } else {
9873                     /* Don't worry about sign mismatch as earlier mapping
9874                      * logic would have thrown a bad address error. */
9875                     ret = MIN(strlen(real), arg3);
9876                     /* We cannot NUL terminate the string. */
9877                     memcpy(p2, real, ret);
9878                 }
9879             } else {
9880                 ret = get_errno(readlink(path(p), p2, arg3));
9881             }
9882             unlock_user(p2, arg2, ret);
9883             unlock_user(p, arg1, 0);
9884         }
9885         return ret;
9886 #endif
9887 #if defined(TARGET_NR_readlinkat)
9888     case TARGET_NR_readlinkat:
9889         {
9890             void *p2;
9891             p  = lock_user_string(arg2);
9892             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
9893             if (!p || !p2) {
9894                 ret = -TARGET_EFAULT;
9895             } else if (is_proc_myself((const char *)p, "exe")) {
9896                 char real[PATH_MAX], *temp;
9897                 temp = realpath(exec_path, real);
9898                 ret = temp == NULL ? get_errno(-1) : strlen(real) ;
9899                 snprintf((char *)p2, arg4, "%s", real);
9900             } else {
9901                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
9902             }
9903             unlock_user(p2, arg3, ret);
9904             unlock_user(p, arg2, 0);
9905         }
9906         return ret;
9907 #endif
9908 #ifdef TARGET_NR_swapon
9909     case TARGET_NR_swapon:
9910         if (!(p = lock_user_string(arg1)))
9911             return -TARGET_EFAULT;
9912         ret = get_errno(swapon(p, arg2));
9913         unlock_user(p, arg1, 0);
9914         return ret;
9915 #endif
9916     case TARGET_NR_reboot:
9917         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
9918            /* arg4 must be ignored in all other cases */
9919            p = lock_user_string(arg4);
9920            if (!p) {
9921                return -TARGET_EFAULT;
9922            }
9923            ret = get_errno(reboot(arg1, arg2, arg3, p));
9924            unlock_user(p, arg4, 0);
9925         } else {
9926            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
9927         }
9928         return ret;
9929 #ifdef TARGET_NR_mmap
9930     case TARGET_NR_mmap:
9931 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
9932     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
9933     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
9934     || defined(TARGET_S390X)
9935         {
9936             abi_ulong *v;
9937             abi_ulong v1, v2, v3, v4, v5, v6;
9938             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
9939                 return -TARGET_EFAULT;
9940             v1 = tswapal(v[0]);
9941             v2 = tswapal(v[1]);
9942             v3 = tswapal(v[2]);
9943             v4 = tswapal(v[3]);
9944             v5 = tswapal(v[4]);
9945             v6 = tswapal(v[5]);
9946             unlock_user(v, arg1, 0);
9947             ret = get_errno(target_mmap(v1, v2, v3,
9948                                         target_to_host_bitmask(v4, mmap_flags_tbl),
9949                                         v5, v6));
9950         }
9951 #else
9952         /* mmap pointers are always untagged */
9953         ret = get_errno(target_mmap(arg1, arg2, arg3,
9954                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
9955                                     arg5,
9956                                     arg6));
9957 #endif
9958         return ret;
9959 #endif
9960 #ifdef TARGET_NR_mmap2
9961     case TARGET_NR_mmap2:
9962 #ifndef MMAP_SHIFT
9963 #define MMAP_SHIFT 12
9964 #endif
9965         ret = target_mmap(arg1, arg2, arg3,
9966                           target_to_host_bitmask(arg4, mmap_flags_tbl),
9967                           arg5, arg6 << MMAP_SHIFT);
9968         return get_errno(ret);
9969 #endif
9970     case TARGET_NR_munmap:
9971         arg1 = cpu_untagged_addr(cpu, arg1);
9972         return get_errno(target_munmap(arg1, arg2));
9973     case TARGET_NR_mprotect:
9974         arg1 = cpu_untagged_addr(cpu, arg1);
9975         {
9976             TaskState *ts = cpu->opaque;
9977             /* Special hack to detect libc making the stack executable.  */
9978             if ((arg3 & PROT_GROWSDOWN)
9979                 && arg1 >= ts->info->stack_limit
9980                 && arg1 <= ts->info->start_stack) {
9981                 arg3 &= ~PROT_GROWSDOWN;
9982                 arg2 = arg2 + arg1 - ts->info->stack_limit;
9983                 arg1 = ts->info->stack_limit;
9984             }
9985         }
9986         return get_errno(target_mprotect(arg1, arg2, arg3));
9987 #ifdef TARGET_NR_mremap
9988     case TARGET_NR_mremap:
9989         arg1 = cpu_untagged_addr(cpu, arg1);
9990         /* mremap new_addr (arg5) is always untagged */
9991         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
9992 #endif
9993         /* ??? msync/mlock/munlock are broken for softmmu.  */
9994 #ifdef TARGET_NR_msync
9995     case TARGET_NR_msync:
9996         return get_errno(msync(g2h(cpu, arg1), arg2, arg3));
9997 #endif
9998 #ifdef TARGET_NR_mlock
9999     case TARGET_NR_mlock:
10000         return get_errno(mlock(g2h(cpu, arg1), arg2));
10001 #endif
10002 #ifdef TARGET_NR_munlock
10003     case TARGET_NR_munlock:
10004         return get_errno(munlock(g2h(cpu, arg1), arg2));
10005 #endif
10006 #ifdef TARGET_NR_mlockall
10007     case TARGET_NR_mlockall:
10008         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10009 #endif
10010 #ifdef TARGET_NR_munlockall
10011     case TARGET_NR_munlockall:
10012         return get_errno(munlockall());
10013 #endif
10014 #ifdef TARGET_NR_truncate
10015     case TARGET_NR_truncate:
10016         if (!(p = lock_user_string(arg1)))
10017             return -TARGET_EFAULT;
10018         ret = get_errno(truncate(p, arg2));
10019         unlock_user(p, arg1, 0);
10020         return ret;
10021 #endif
10022 #ifdef TARGET_NR_ftruncate
10023     case TARGET_NR_ftruncate:
10024         return get_errno(ftruncate(arg1, arg2));
10025 #endif
10026     case TARGET_NR_fchmod:
10027         return get_errno(fchmod(arg1, arg2));
10028 #if defined(TARGET_NR_fchmodat)
10029     case TARGET_NR_fchmodat:
10030         if (!(p = lock_user_string(arg2)))
10031             return -TARGET_EFAULT;
10032         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10033         unlock_user(p, arg2, 0);
10034         return ret;
10035 #endif
10036     case TARGET_NR_getpriority:
10037         /* Note that negative values are valid for getpriority, so we must
10038            differentiate based on errno settings.  */
10039         errno = 0;
10040         ret = getpriority(arg1, arg2);
10041         if (ret == -1 && errno != 0) {
10042             return -host_to_target_errno(errno);
10043         }
10044 #ifdef TARGET_ALPHA
10045         /* Return value is the unbiased priority.  Signal no error.  */
10046         ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
10047 #else
10048         /* Return value is a biased priority to avoid negative numbers.  */
10049         ret = 20 - ret;
10050 #endif
10051         return ret;
10052     case TARGET_NR_setpriority:
10053         return get_errno(setpriority(arg1, arg2, arg3));
10054 #ifdef TARGET_NR_statfs
10055     case TARGET_NR_statfs:
10056         if (!(p = lock_user_string(arg1))) {
10057             return -TARGET_EFAULT;
10058         }
10059         ret = get_errno(statfs(path(p), &stfs));
10060         unlock_user(p, arg1, 0);
10061     convert_statfs:
10062         if (!is_error(ret)) {
10063             struct target_statfs *target_stfs;
10064 
10065             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10066                 return -TARGET_EFAULT;
10067             __put_user(stfs.f_type, &target_stfs->f_type);
10068             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10069             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10070             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10071             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10072             __put_user(stfs.f_files, &target_stfs->f_files);
10073             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10074             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10075             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10076             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10077             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10078 #ifdef _STATFS_F_FLAGS
10079             __put_user(stfs.f_flags, &target_stfs->f_flags);
10080 #else
10081             __put_user(0, &target_stfs->f_flags);
10082 #endif
10083             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10084             unlock_user_struct(target_stfs, arg2, 1);
10085         }
10086         return ret;
10087 #endif
10088 #ifdef TARGET_NR_fstatfs
10089     case TARGET_NR_fstatfs:
10090         ret = get_errno(fstatfs(arg1, &stfs));
10091         goto convert_statfs;
10092 #endif
10093 #ifdef TARGET_NR_statfs64
10094     case TARGET_NR_statfs64:
10095         if (!(p = lock_user_string(arg1))) {
10096             return -TARGET_EFAULT;
10097         }
10098         ret = get_errno(statfs(path(p), &stfs));
10099         unlock_user(p, arg1, 0);
10100     convert_statfs64:
10101         if (!is_error(ret)) {
10102             struct target_statfs64 *target_stfs;
10103 
10104             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10105                 return -TARGET_EFAULT;
10106             __put_user(stfs.f_type, &target_stfs->f_type);
10107             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10108             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10109             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10110             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10111             __put_user(stfs.f_files, &target_stfs->f_files);
10112             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10113             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10114             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10115             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10116             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10117 #ifdef _STATFS_F_FLAGS
10118             __put_user(stfs.f_flags, &target_stfs->f_flags);
10119 #else
10120             __put_user(0, &target_stfs->f_flags);
10121 #endif
10122             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10123             unlock_user_struct(target_stfs, arg3, 1);
10124         }
10125         return ret;
10126     case TARGET_NR_fstatfs64:
10127         ret = get_errno(fstatfs(arg1, &stfs));
10128         goto convert_statfs64;
10129 #endif
10130 #ifdef TARGET_NR_socketcall
10131     case TARGET_NR_socketcall:
10132         return do_socketcall(arg1, arg2);
10133 #endif
10134 #ifdef TARGET_NR_accept
10135     case TARGET_NR_accept:
10136         return do_accept4(arg1, arg2, arg3, 0);
10137 #endif
10138 #ifdef TARGET_NR_accept4
10139     case TARGET_NR_accept4:
10140         return do_accept4(arg1, arg2, arg3, arg4);
10141 #endif
10142 #ifdef TARGET_NR_bind
10143     case TARGET_NR_bind:
10144         return do_bind(arg1, arg2, arg3);
10145 #endif
10146 #ifdef TARGET_NR_connect
10147     case TARGET_NR_connect:
10148         return do_connect(arg1, arg2, arg3);
10149 #endif
10150 #ifdef TARGET_NR_getpeername
10151     case TARGET_NR_getpeername:
10152         return do_getpeername(arg1, arg2, arg3);
10153 #endif
10154 #ifdef TARGET_NR_getsockname
10155     case TARGET_NR_getsockname:
10156         return do_getsockname(arg1, arg2, arg3);
10157 #endif
10158 #ifdef TARGET_NR_getsockopt
10159     case TARGET_NR_getsockopt:
10160         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10161 #endif
10162 #ifdef TARGET_NR_listen
10163     case TARGET_NR_listen:
10164         return get_errno(listen(arg1, arg2));
10165 #endif
10166 #ifdef TARGET_NR_recv
10167     case TARGET_NR_recv:
10168         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10169 #endif
10170 #ifdef TARGET_NR_recvfrom
10171     case TARGET_NR_recvfrom:
10172         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10173 #endif
10174 #ifdef TARGET_NR_recvmsg
10175     case TARGET_NR_recvmsg:
10176         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10177 #endif
10178 #ifdef TARGET_NR_send
10179     case TARGET_NR_send:
10180         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10181 #endif
10182 #ifdef TARGET_NR_sendmsg
10183     case TARGET_NR_sendmsg:
10184         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10185 #endif
10186 #ifdef TARGET_NR_sendmmsg
10187     case TARGET_NR_sendmmsg:
10188         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10189 #endif
10190 #ifdef TARGET_NR_recvmmsg
10191     case TARGET_NR_recvmmsg:
10192         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10193 #endif
10194 #ifdef TARGET_NR_sendto
10195     case TARGET_NR_sendto:
10196         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10197 #endif
10198 #ifdef TARGET_NR_shutdown
10199     case TARGET_NR_shutdown:
10200         return get_errno(shutdown(arg1, arg2));
10201 #endif
10202 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10203     case TARGET_NR_getrandom:
10204         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10205         if (!p) {
10206             return -TARGET_EFAULT;
10207         }
10208         ret = get_errno(getrandom(p, arg2, arg3));
10209         unlock_user(p, arg1, ret);
10210         return ret;
10211 #endif
10212 #ifdef TARGET_NR_socket
10213     case TARGET_NR_socket:
10214         return do_socket(arg1, arg2, arg3);
10215 #endif
10216 #ifdef TARGET_NR_socketpair
10217     case TARGET_NR_socketpair:
10218         return do_socketpair(arg1, arg2, arg3, arg4);
10219 #endif
10220 #ifdef TARGET_NR_setsockopt
10221     case TARGET_NR_setsockopt:
10222         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10223 #endif
10224 #if defined(TARGET_NR_syslog)
10225     case TARGET_NR_syslog:
10226         {
10227             int len = arg2;
10228 
10229             switch (arg1) {
10230             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10231             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10232             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10233             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10234             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10235             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10236             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10237             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10238                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10239             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10240             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10241             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10242                 {
10243                     if (len < 0) {
10244                         return -TARGET_EINVAL;
10245                     }
10246                     if (len == 0) {
10247                         return 0;
10248                     }
10249                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10250                     if (!p) {
10251                         return -TARGET_EFAULT;
10252                     }
10253                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10254                     unlock_user(p, arg2, arg3);
10255                 }
10256                 return ret;
10257             default:
10258                 return -TARGET_EINVAL;
10259             }
10260         }
10261         break;
10262 #endif
10263     case TARGET_NR_setitimer:
10264         {
10265             struct itimerval value, ovalue, *pvalue;
10266 
10267             if (arg2) {
10268                 pvalue = &value;
10269                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10270                     || copy_from_user_timeval(&pvalue->it_value,
10271                                               arg2 + sizeof(struct target_timeval)))
10272                     return -TARGET_EFAULT;
10273             } else {
10274                 pvalue = NULL;
10275             }
10276             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10277             if (!is_error(ret) && arg3) {
10278                 if (copy_to_user_timeval(arg3,
10279                                          &ovalue.it_interval)
10280                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10281                                             &ovalue.it_value))
10282                     return -TARGET_EFAULT;
10283             }
10284         }
10285         return ret;
10286     case TARGET_NR_getitimer:
10287         {
10288             struct itimerval value;
10289 
10290             ret = get_errno(getitimer(arg1, &value));
10291             if (!is_error(ret) && arg2) {
10292                 if (copy_to_user_timeval(arg2,
10293                                          &value.it_interval)
10294                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10295                                             &value.it_value))
10296                     return -TARGET_EFAULT;
10297             }
10298         }
10299         return ret;
10300 #ifdef TARGET_NR_stat
10301     case TARGET_NR_stat:
10302         if (!(p = lock_user_string(arg1))) {
10303             return -TARGET_EFAULT;
10304         }
10305         ret = get_errno(stat(path(p), &st));
10306         unlock_user(p, arg1, 0);
10307         goto do_stat;
10308 #endif
10309 #ifdef TARGET_NR_lstat
10310     case TARGET_NR_lstat:
10311         if (!(p = lock_user_string(arg1))) {
10312             return -TARGET_EFAULT;
10313         }
10314         ret = get_errno(lstat(path(p), &st));
10315         unlock_user(p, arg1, 0);
10316         goto do_stat;
10317 #endif
10318 #ifdef TARGET_NR_fstat
10319     case TARGET_NR_fstat:
10320         {
10321             ret = get_errno(fstat(arg1, &st));
10322 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10323         do_stat:
10324 #endif
10325             if (!is_error(ret)) {
10326                 struct target_stat *target_st;
10327 
10328                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10329                     return -TARGET_EFAULT;
10330                 memset(target_st, 0, sizeof(*target_st));
10331                 __put_user(st.st_dev, &target_st->st_dev);
10332                 __put_user(st.st_ino, &target_st->st_ino);
10333                 __put_user(st.st_mode, &target_st->st_mode);
10334                 __put_user(st.st_uid, &target_st->st_uid);
10335                 __put_user(st.st_gid, &target_st->st_gid);
10336                 __put_user(st.st_nlink, &target_st->st_nlink);
10337                 __put_user(st.st_rdev, &target_st->st_rdev);
10338                 __put_user(st.st_size, &target_st->st_size);
10339                 __put_user(st.st_blksize, &target_st->st_blksize);
10340                 __put_user(st.st_blocks, &target_st->st_blocks);
10341                 __put_user(st.st_atime, &target_st->target_st_atime);
10342                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10343                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10344 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10345                 __put_user(st.st_atim.tv_nsec,
10346                            &target_st->target_st_atime_nsec);
10347                 __put_user(st.st_mtim.tv_nsec,
10348                            &target_st->target_st_mtime_nsec);
10349                 __put_user(st.st_ctim.tv_nsec,
10350                            &target_st->target_st_ctime_nsec);
10351 #endif
10352                 unlock_user_struct(target_st, arg2, 1);
10353             }
10354         }
10355         return ret;
10356 #endif
10357     case TARGET_NR_vhangup:
10358         return get_errno(vhangup());
10359 #ifdef TARGET_NR_syscall
10360     case TARGET_NR_syscall:
10361         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10362                           arg6, arg7, arg8, 0);
10363 #endif
10364 #if defined(TARGET_NR_wait4)
10365     case TARGET_NR_wait4:
10366         {
10367             int status;
10368             abi_long status_ptr = arg2;
10369             struct rusage rusage, *rusage_ptr;
10370             abi_ulong target_rusage = arg4;
10371             abi_long rusage_err;
10372             if (target_rusage)
10373                 rusage_ptr = &rusage;
10374             else
10375                 rusage_ptr = NULL;
10376             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10377             if (!is_error(ret)) {
10378                 if (status_ptr && ret) {
10379                     status = host_to_target_waitstatus(status);
10380                     if (put_user_s32(status, status_ptr))
10381                         return -TARGET_EFAULT;
10382                 }
10383                 if (target_rusage) {
10384                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10385                     if (rusage_err) {
10386                         ret = rusage_err;
10387                     }
10388                 }
10389             }
10390         }
10391         return ret;
10392 #endif
10393 #ifdef TARGET_NR_swapoff
10394     case TARGET_NR_swapoff:
10395         if (!(p = lock_user_string(arg1)))
10396             return -TARGET_EFAULT;
10397         ret = get_errno(swapoff(p));
10398         unlock_user(p, arg1, 0);
10399         return ret;
10400 #endif
10401     case TARGET_NR_sysinfo:
10402         {
10403             struct target_sysinfo *target_value;
10404             struct sysinfo value;
10405             ret = get_errno(sysinfo(&value));
10406             if (!is_error(ret) && arg1)
10407             {
10408                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
10409                     return -TARGET_EFAULT;
10410                 __put_user(value.uptime, &target_value->uptime);
10411                 __put_user(value.loads[0], &target_value->loads[0]);
10412                 __put_user(value.loads[1], &target_value->loads[1]);
10413                 __put_user(value.loads[2], &target_value->loads[2]);
10414                 __put_user(value.totalram, &target_value->totalram);
10415                 __put_user(value.freeram, &target_value->freeram);
10416                 __put_user(value.sharedram, &target_value->sharedram);
10417                 __put_user(value.bufferram, &target_value->bufferram);
10418                 __put_user(value.totalswap, &target_value->totalswap);
10419                 __put_user(value.freeswap, &target_value->freeswap);
10420                 __put_user(value.procs, &target_value->procs);
10421                 __put_user(value.totalhigh, &target_value->totalhigh);
10422                 __put_user(value.freehigh, &target_value->freehigh);
10423                 __put_user(value.mem_unit, &target_value->mem_unit);
10424                 unlock_user_struct(target_value, arg1, 1);
10425             }
10426         }
10427         return ret;
10428 #ifdef TARGET_NR_ipc
10429     case TARGET_NR_ipc:
10430         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
10431 #endif
10432 #ifdef TARGET_NR_semget
10433     case TARGET_NR_semget:
10434         return get_errno(semget(arg1, arg2, arg3));
10435 #endif
10436 #ifdef TARGET_NR_semop
10437     case TARGET_NR_semop:
10438         return do_semtimedop(arg1, arg2, arg3, 0, false);
10439 #endif
10440 #ifdef TARGET_NR_semtimedop
10441     case TARGET_NR_semtimedop:
10442         return do_semtimedop(arg1, arg2, arg3, arg4, false);
10443 #endif
10444 #ifdef TARGET_NR_semtimedop_time64
10445     case TARGET_NR_semtimedop_time64:
10446         return do_semtimedop(arg1, arg2, arg3, arg4, true);
10447 #endif
10448 #ifdef TARGET_NR_semctl
10449     case TARGET_NR_semctl:
10450         return do_semctl(arg1, arg2, arg3, arg4);
10451 #endif
10452 #ifdef TARGET_NR_msgctl
10453     case TARGET_NR_msgctl:
10454         return do_msgctl(arg1, arg2, arg3);
10455 #endif
10456 #ifdef TARGET_NR_msgget
10457     case TARGET_NR_msgget:
10458         return get_errno(msgget(arg1, arg2));
10459 #endif
10460 #ifdef TARGET_NR_msgrcv
10461     case TARGET_NR_msgrcv:
10462         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
10463 #endif
10464 #ifdef TARGET_NR_msgsnd
10465     case TARGET_NR_msgsnd:
10466         return do_msgsnd(arg1, arg2, arg3, arg4);
10467 #endif
10468 #ifdef TARGET_NR_shmget
10469     case TARGET_NR_shmget:
10470         return get_errno(shmget(arg1, arg2, arg3));
10471 #endif
10472 #ifdef TARGET_NR_shmctl
10473     case TARGET_NR_shmctl:
10474         return do_shmctl(arg1, arg2, arg3);
10475 #endif
10476 #ifdef TARGET_NR_shmat
10477     case TARGET_NR_shmat:
10478         return do_shmat(cpu_env, arg1, arg2, arg3);
10479 #endif
10480 #ifdef TARGET_NR_shmdt
10481     case TARGET_NR_shmdt:
10482         return do_shmdt(arg1);
10483 #endif
10484     case TARGET_NR_fsync:
10485         return get_errno(fsync(arg1));
10486     case TARGET_NR_clone:
10487         /* Linux manages to have three different orderings for its
10488          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
10489          * match the kernel's CONFIG_CLONE_* settings.
10490          * Microblaze is further special in that it uses a sixth
10491          * implicit argument to clone for the TLS pointer.
10492          */
10493 #if defined(TARGET_MICROBLAZE)
10494         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
10495 #elif defined(TARGET_CLONE_BACKWARDS)
10496         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
10497 #elif defined(TARGET_CLONE_BACKWARDS2)
10498         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
10499 #else
10500         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
10501 #endif
10502         return ret;
10503 #ifdef __NR_exit_group
10504         /* new thread calls */
10505     case TARGET_NR_exit_group:
10506         preexit_cleanup(cpu_env, arg1);
10507         return get_errno(exit_group(arg1));
10508 #endif
10509     case TARGET_NR_setdomainname:
10510         if (!(p = lock_user_string(arg1)))
10511             return -TARGET_EFAULT;
10512         ret = get_errno(setdomainname(p, arg2));
10513         unlock_user(p, arg1, 0);
10514         return ret;
10515     case TARGET_NR_uname:
10516         /* no need to transcode because we use the linux syscall */
10517         {
10518             struct new_utsname * buf;
10519 
10520             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
10521                 return -TARGET_EFAULT;
10522             ret = get_errno(sys_uname(buf));
10523             if (!is_error(ret)) {
10524                 /* Overwrite the native machine name with whatever is being
10525                    emulated. */
10526                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
10527                           sizeof(buf->machine));
10528                 /* Allow the user to override the reported release.  */
10529                 if (qemu_uname_release && *qemu_uname_release) {
10530                     g_strlcpy(buf->release, qemu_uname_release,
10531                               sizeof(buf->release));
10532                 }
10533             }
10534             unlock_user_struct(buf, arg1, 1);
10535         }
10536         return ret;
10537 #ifdef TARGET_I386
10538     case TARGET_NR_modify_ldt:
10539         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
10540 #if !defined(TARGET_X86_64)
10541     case TARGET_NR_vm86:
10542         return do_vm86(cpu_env, arg1, arg2);
10543 #endif
10544 #endif
10545 #if defined(TARGET_NR_adjtimex)
10546     case TARGET_NR_adjtimex:
10547         {
10548             struct timex host_buf;
10549 
10550             if (target_to_host_timex(&host_buf, arg1) != 0) {
10551                 return -TARGET_EFAULT;
10552             }
10553             ret = get_errno(adjtimex(&host_buf));
10554             if (!is_error(ret)) {
10555                 if (host_to_target_timex(arg1, &host_buf) != 0) {
10556                     return -TARGET_EFAULT;
10557                 }
10558             }
10559         }
10560         return ret;
10561 #endif
10562 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
10563     case TARGET_NR_clock_adjtime:
10564         {
10565             struct timex htx, *phtx = &htx;
10566 
10567             if (target_to_host_timex(phtx, arg2) != 0) {
10568                 return -TARGET_EFAULT;
10569             }
10570             ret = get_errno(clock_adjtime(arg1, phtx));
10571             if (!is_error(ret) && phtx) {
10572                 if (host_to_target_timex(arg2, phtx) != 0) {
10573                     return -TARGET_EFAULT;
10574                 }
10575             }
10576         }
10577         return ret;
10578 #endif
10579 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
10580     case TARGET_NR_clock_adjtime64:
10581         {
10582             struct timex htx;
10583 
10584             if (target_to_host_timex64(&htx, arg2) != 0) {
10585                 return -TARGET_EFAULT;
10586             }
10587             ret = get_errno(clock_adjtime(arg1, &htx));
10588             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
10589                     return -TARGET_EFAULT;
10590             }
10591         }
10592         return ret;
10593 #endif
10594     case TARGET_NR_getpgid:
10595         return get_errno(getpgid(arg1));
10596     case TARGET_NR_fchdir:
10597         return get_errno(fchdir(arg1));
10598     case TARGET_NR_personality:
10599         return get_errno(personality(arg1));
10600 #ifdef TARGET_NR__llseek /* Not on alpha */
10601     case TARGET_NR__llseek:
10602         {
10603             int64_t res;
10604 #if !defined(__NR_llseek)
10605             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
10606             if (res == -1) {
10607                 ret = get_errno(res);
10608             } else {
10609                 ret = 0;
10610             }
10611 #else
10612             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
10613 #endif
10614             if ((ret == 0) && put_user_s64(res, arg4)) {
10615                 return -TARGET_EFAULT;
10616             }
10617         }
10618         return ret;
10619 #endif
10620 #ifdef TARGET_NR_getdents
10621     case TARGET_NR_getdents:
10622         return do_getdents(arg1, arg2, arg3);
10623 #endif /* TARGET_NR_getdents */
10624 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
10625     case TARGET_NR_getdents64:
10626         return do_getdents64(arg1, arg2, arg3);
10627 #endif /* TARGET_NR_getdents64 */
10628 #if defined(TARGET_NR__newselect)
10629     case TARGET_NR__newselect:
10630         return do_select(arg1, arg2, arg3, arg4, arg5);
10631 #endif
10632 #ifdef TARGET_NR_poll
10633     case TARGET_NR_poll:
10634         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
10635 #endif
10636 #ifdef TARGET_NR_ppoll
10637     case TARGET_NR_ppoll:
10638         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
10639 #endif
10640 #ifdef TARGET_NR_ppoll_time64
10641     case TARGET_NR_ppoll_time64:
10642         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
10643 #endif
10644     case TARGET_NR_flock:
10645         /* NOTE: the flock constant seems to be the same for every
10646            Linux platform */
10647         return get_errno(safe_flock(arg1, arg2));
10648     case TARGET_NR_readv:
10649         {
10650             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10651             if (vec != NULL) {
10652                 ret = get_errno(safe_readv(arg1, vec, arg3));
10653                 unlock_iovec(vec, arg2, arg3, 1);
10654             } else {
10655                 ret = -host_to_target_errno(errno);
10656             }
10657         }
10658         return ret;
10659     case TARGET_NR_writev:
10660         {
10661             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10662             if (vec != NULL) {
10663                 ret = get_errno(safe_writev(arg1, vec, arg3));
10664                 unlock_iovec(vec, arg2, arg3, 0);
10665             } else {
10666                 ret = -host_to_target_errno(errno);
10667             }
10668         }
10669         return ret;
10670 #if defined(TARGET_NR_preadv)
10671     case TARGET_NR_preadv:
10672         {
10673             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
10674             if (vec != NULL) {
10675                 unsigned long low, high;
10676 
10677                 target_to_host_low_high(arg4, arg5, &low, &high);
10678                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
10679                 unlock_iovec(vec, arg2, arg3, 1);
10680             } else {
10681                 ret = -host_to_target_errno(errno);
10682            }
10683         }
10684         return ret;
10685 #endif
10686 #if defined(TARGET_NR_pwritev)
10687     case TARGET_NR_pwritev:
10688         {
10689             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
10690             if (vec != NULL) {
10691                 unsigned long low, high;
10692 
10693                 target_to_host_low_high(arg4, arg5, &low, &high);
10694                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
10695                 unlock_iovec(vec, arg2, arg3, 0);
10696             } else {
10697                 ret = -host_to_target_errno(errno);
10698            }
10699         }
10700         return ret;
10701 #endif
10702     case TARGET_NR_getsid:
10703         return get_errno(getsid(arg1));
10704 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
10705     case TARGET_NR_fdatasync:
10706         return get_errno(fdatasync(arg1));
10707 #endif
10708     case TARGET_NR_sched_getaffinity:
10709         {
10710             unsigned int mask_size;
10711             unsigned long *mask;
10712 
10713             /*
10714              * sched_getaffinity needs multiples of ulong, so need to take
10715              * care of mismatches between target ulong and host ulong sizes.
10716              */
10717             if (arg2 & (sizeof(abi_ulong) - 1)) {
10718                 return -TARGET_EINVAL;
10719             }
10720             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10721 
10722             mask = alloca(mask_size);
10723             memset(mask, 0, mask_size);
10724             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
10725 
10726             if (!is_error(ret)) {
10727                 if (ret > arg2) {
10728                     /* More data returned than the caller's buffer will fit.
10729                      * This only happens if sizeof(abi_long) < sizeof(long)
10730                      * and the caller passed us a buffer holding an odd number
10731                      * of abi_longs. If the host kernel is actually using the
10732                      * extra 4 bytes then fail EINVAL; otherwise we can just
10733                      * ignore them and only copy the interesting part.
10734                      */
10735                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
10736                     if (numcpus > arg2 * 8) {
10737                         return -TARGET_EINVAL;
10738                     }
10739                     ret = arg2;
10740                 }
10741 
10742                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
10743                     return -TARGET_EFAULT;
10744                 }
10745             }
10746         }
10747         return ret;
10748     case TARGET_NR_sched_setaffinity:
10749         {
10750             unsigned int mask_size;
10751             unsigned long *mask;
10752 
10753             /*
10754              * sched_setaffinity needs multiples of ulong, so need to take
10755              * care of mismatches between target ulong and host ulong sizes.
10756              */
10757             if (arg2 & (sizeof(abi_ulong) - 1)) {
10758                 return -TARGET_EINVAL;
10759             }
10760             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
10761             mask = alloca(mask_size);
10762 
10763             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
10764             if (ret) {
10765                 return ret;
10766             }
10767 
10768             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
10769         }
10770     case TARGET_NR_getcpu:
10771         {
10772             unsigned cpu, node;
10773             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
10774                                        arg2 ? &node : NULL,
10775                                        NULL));
10776             if (is_error(ret)) {
10777                 return ret;
10778             }
10779             if (arg1 && put_user_u32(cpu, arg1)) {
10780                 return -TARGET_EFAULT;
10781             }
10782             if (arg2 && put_user_u32(node, arg2)) {
10783                 return -TARGET_EFAULT;
10784             }
10785         }
10786         return ret;
10787     case TARGET_NR_sched_setparam:
10788         {
10789             struct target_sched_param *target_schp;
10790             struct sched_param schp;
10791 
10792             if (arg2 == 0) {
10793                 return -TARGET_EINVAL;
10794             }
10795             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
10796                 return -TARGET_EFAULT;
10797             }
10798             schp.sched_priority = tswap32(target_schp->sched_priority);
10799             unlock_user_struct(target_schp, arg2, 0);
10800             return get_errno(sys_sched_setparam(arg1, &schp));
10801         }
10802     case TARGET_NR_sched_getparam:
10803         {
10804             struct target_sched_param *target_schp;
10805             struct sched_param schp;
10806 
10807             if (arg2 == 0) {
10808                 return -TARGET_EINVAL;
10809             }
10810             ret = get_errno(sys_sched_getparam(arg1, &schp));
10811             if (!is_error(ret)) {
10812                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
10813                     return -TARGET_EFAULT;
10814                 }
10815                 target_schp->sched_priority = tswap32(schp.sched_priority);
10816                 unlock_user_struct(target_schp, arg2, 1);
10817             }
10818         }
10819         return ret;
10820     case TARGET_NR_sched_setscheduler:
10821         {
10822             struct target_sched_param *target_schp;
10823             struct sched_param schp;
10824             if (arg3 == 0) {
10825                 return -TARGET_EINVAL;
10826             }
10827             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
10828                 return -TARGET_EFAULT;
10829             }
10830             schp.sched_priority = tswap32(target_schp->sched_priority);
10831             unlock_user_struct(target_schp, arg3, 0);
10832             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
10833         }
10834     case TARGET_NR_sched_getscheduler:
10835         return get_errno(sys_sched_getscheduler(arg1));
10836     case TARGET_NR_sched_getattr:
10837         {
10838             struct target_sched_attr *target_scha;
10839             struct sched_attr scha;
10840             if (arg2 == 0) {
10841                 return -TARGET_EINVAL;
10842             }
10843             if (arg3 > sizeof(scha)) {
10844                 arg3 = sizeof(scha);
10845             }
10846             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
10847             if (!is_error(ret)) {
10848                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10849                 if (!target_scha) {
10850                     return -TARGET_EFAULT;
10851                 }
10852                 target_scha->size = tswap32(scha.size);
10853                 target_scha->sched_policy = tswap32(scha.sched_policy);
10854                 target_scha->sched_flags = tswap64(scha.sched_flags);
10855                 target_scha->sched_nice = tswap32(scha.sched_nice);
10856                 target_scha->sched_priority = tswap32(scha.sched_priority);
10857                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
10858                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
10859                 target_scha->sched_period = tswap64(scha.sched_period);
10860                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
10861                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
10862                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
10863                 }
10864                 unlock_user(target_scha, arg2, arg3);
10865             }
10866             return ret;
10867         }
10868     case TARGET_NR_sched_setattr:
10869         {
10870             struct target_sched_attr *target_scha;
10871             struct sched_attr scha;
10872             uint32_t size;
10873             int zeroed;
10874             if (arg2 == 0) {
10875                 return -TARGET_EINVAL;
10876             }
10877             if (get_user_u32(size, arg2)) {
10878                 return -TARGET_EFAULT;
10879             }
10880             if (!size) {
10881                 size = offsetof(struct target_sched_attr, sched_util_min);
10882             }
10883             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
10884                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10885                     return -TARGET_EFAULT;
10886                 }
10887                 return -TARGET_E2BIG;
10888             }
10889 
10890             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
10891             if (zeroed < 0) {
10892                 return zeroed;
10893             } else if (zeroed == 0) {
10894                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
10895                     return -TARGET_EFAULT;
10896                 }
10897                 return -TARGET_E2BIG;
10898             }
10899             if (size > sizeof(struct target_sched_attr)) {
10900                 size = sizeof(struct target_sched_attr);
10901             }
10902 
10903             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
10904             if (!target_scha) {
10905                 return -TARGET_EFAULT;
10906             }
10907             scha.size = size;
10908             scha.sched_policy = tswap32(target_scha->sched_policy);
10909             scha.sched_flags = tswap64(target_scha->sched_flags);
10910             scha.sched_nice = tswap32(target_scha->sched_nice);
10911             scha.sched_priority = tswap32(target_scha->sched_priority);
10912             scha.sched_runtime = tswap64(target_scha->sched_runtime);
10913             scha.sched_deadline = tswap64(target_scha->sched_deadline);
10914             scha.sched_period = tswap64(target_scha->sched_period);
10915             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
10916                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
10917                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
10918             }
10919             unlock_user(target_scha, arg2, 0);
10920             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
10921         }
10922     case TARGET_NR_sched_yield:
10923         return get_errno(sched_yield());
10924     case TARGET_NR_sched_get_priority_max:
10925         return get_errno(sched_get_priority_max(arg1));
10926     case TARGET_NR_sched_get_priority_min:
10927         return get_errno(sched_get_priority_min(arg1));
10928 #ifdef TARGET_NR_sched_rr_get_interval
10929     case TARGET_NR_sched_rr_get_interval:
10930         {
10931             struct timespec ts;
10932             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10933             if (!is_error(ret)) {
10934                 ret = host_to_target_timespec(arg2, &ts);
10935             }
10936         }
10937         return ret;
10938 #endif
10939 #ifdef TARGET_NR_sched_rr_get_interval_time64
10940     case TARGET_NR_sched_rr_get_interval_time64:
10941         {
10942             struct timespec ts;
10943             ret = get_errno(sched_rr_get_interval(arg1, &ts));
10944             if (!is_error(ret)) {
10945                 ret = host_to_target_timespec64(arg2, &ts);
10946             }
10947         }
10948         return ret;
10949 #endif
10950 #if defined(TARGET_NR_nanosleep)
10951     case TARGET_NR_nanosleep:
10952         {
10953             struct timespec req, rem;
10954             target_to_host_timespec(&req, arg1);
10955             ret = get_errno(safe_nanosleep(&req, &rem));
10956             if (is_error(ret) && arg2) {
10957                 host_to_target_timespec(arg2, &rem);
10958             }
10959         }
10960         return ret;
10961 #endif
10962     case TARGET_NR_prctl:
10963         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
10964         break;
10965 #ifdef TARGET_NR_arch_prctl
10966     case TARGET_NR_arch_prctl:
10967         return do_arch_prctl(cpu_env, arg1, arg2);
10968 #endif
10969 #ifdef TARGET_NR_pread64
10970     case TARGET_NR_pread64:
10971         if (regpairs_aligned(cpu_env, num)) {
10972             arg4 = arg5;
10973             arg5 = arg6;
10974         }
10975         if (arg2 == 0 && arg3 == 0) {
10976             /* Special-case NULL buffer and zero length, which should succeed */
10977             p = 0;
10978         } else {
10979             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10980             if (!p) {
10981                 return -TARGET_EFAULT;
10982             }
10983         }
10984         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
10985         unlock_user(p, arg2, ret);
10986         return ret;
10987     case TARGET_NR_pwrite64:
10988         if (regpairs_aligned(cpu_env, num)) {
10989             arg4 = arg5;
10990             arg5 = arg6;
10991         }
10992         if (arg2 == 0 && arg3 == 0) {
10993             /* Special-case NULL buffer and zero length, which should succeed */
10994             p = 0;
10995         } else {
10996             p = lock_user(VERIFY_READ, arg2, arg3, 1);
10997             if (!p) {
10998                 return -TARGET_EFAULT;
10999             }
11000         }
11001         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11002         unlock_user(p, arg2, 0);
11003         return ret;
11004 #endif
11005     case TARGET_NR_getcwd:
11006         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11007             return -TARGET_EFAULT;
11008         ret = get_errno(sys_getcwd1(p, arg2));
11009         unlock_user(p, arg1, ret);
11010         return ret;
11011     case TARGET_NR_capget:
11012     case TARGET_NR_capset:
11013     {
11014         struct target_user_cap_header *target_header;
11015         struct target_user_cap_data *target_data = NULL;
11016         struct __user_cap_header_struct header;
11017         struct __user_cap_data_struct data[2];
11018         struct __user_cap_data_struct *dataptr = NULL;
11019         int i, target_datalen;
11020         int data_items = 1;
11021 
11022         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11023             return -TARGET_EFAULT;
11024         }
11025         header.version = tswap32(target_header->version);
11026         header.pid = tswap32(target_header->pid);
11027 
11028         if (header.version != _LINUX_CAPABILITY_VERSION) {
11029             /* Version 2 and up takes pointer to two user_data structs */
11030             data_items = 2;
11031         }
11032 
11033         target_datalen = sizeof(*target_data) * data_items;
11034 
11035         if (arg2) {
11036             if (num == TARGET_NR_capget) {
11037                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11038             } else {
11039                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11040             }
11041             if (!target_data) {
11042                 unlock_user_struct(target_header, arg1, 0);
11043                 return -TARGET_EFAULT;
11044             }
11045 
11046             if (num == TARGET_NR_capset) {
11047                 for (i = 0; i < data_items; i++) {
11048                     data[i].effective = tswap32(target_data[i].effective);
11049                     data[i].permitted = tswap32(target_data[i].permitted);
11050                     data[i].inheritable = tswap32(target_data[i].inheritable);
11051                 }
11052             }
11053 
11054             dataptr = data;
11055         }
11056 
11057         if (num == TARGET_NR_capget) {
11058             ret = get_errno(capget(&header, dataptr));
11059         } else {
11060             ret = get_errno(capset(&header, dataptr));
11061         }
11062 
11063         /* The kernel always updates version for both capget and capset */
11064         target_header->version = tswap32(header.version);
11065         unlock_user_struct(target_header, arg1, 1);
11066 
11067         if (arg2) {
11068             if (num == TARGET_NR_capget) {
11069                 for (i = 0; i < data_items; i++) {
11070                     target_data[i].effective = tswap32(data[i].effective);
11071                     target_data[i].permitted = tswap32(data[i].permitted);
11072                     target_data[i].inheritable = tswap32(data[i].inheritable);
11073                 }
11074                 unlock_user(target_data, arg2, target_datalen);
11075             } else {
11076                 unlock_user(target_data, arg2, 0);
11077             }
11078         }
11079         return ret;
11080     }
11081     case TARGET_NR_sigaltstack:
11082         return do_sigaltstack(arg1, arg2, cpu_env);
11083 
11084 #ifdef CONFIG_SENDFILE
11085 #ifdef TARGET_NR_sendfile
11086     case TARGET_NR_sendfile:
11087     {
11088         off_t *offp = NULL;
11089         off_t off;
11090         if (arg3) {
11091             ret = get_user_sal(off, arg3);
11092             if (is_error(ret)) {
11093                 return ret;
11094             }
11095             offp = &off;
11096         }
11097         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11098         if (!is_error(ret) && arg3) {
11099             abi_long ret2 = put_user_sal(off, arg3);
11100             if (is_error(ret2)) {
11101                 ret = ret2;
11102             }
11103         }
11104         return ret;
11105     }
11106 #endif
11107 #ifdef TARGET_NR_sendfile64
11108     case TARGET_NR_sendfile64:
11109     {
11110         off_t *offp = NULL;
11111         off_t off;
11112         if (arg3) {
11113             ret = get_user_s64(off, arg3);
11114             if (is_error(ret)) {
11115                 return ret;
11116             }
11117             offp = &off;
11118         }
11119         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11120         if (!is_error(ret) && arg3) {
11121             abi_long ret2 = put_user_s64(off, arg3);
11122             if (is_error(ret2)) {
11123                 ret = ret2;
11124             }
11125         }
11126         return ret;
11127     }
11128 #endif
11129 #endif
11130 #ifdef TARGET_NR_vfork
11131     case TARGET_NR_vfork:
11132         return get_errno(do_fork(cpu_env,
11133                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11134                          0, 0, 0, 0));
11135 #endif
11136 #ifdef TARGET_NR_ugetrlimit
11137     case TARGET_NR_ugetrlimit:
11138     {
11139 	struct rlimit rlim;
11140 	int resource = target_to_host_resource(arg1);
11141 	ret = get_errno(getrlimit(resource, &rlim));
11142 	if (!is_error(ret)) {
11143 	    struct target_rlimit *target_rlim;
11144             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11145                 return -TARGET_EFAULT;
11146 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11147 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11148             unlock_user_struct(target_rlim, arg2, 1);
11149 	}
11150         return ret;
11151     }
11152 #endif
11153 #ifdef TARGET_NR_truncate64
11154     case TARGET_NR_truncate64:
11155         if (!(p = lock_user_string(arg1)))
11156             return -TARGET_EFAULT;
11157 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11158         unlock_user(p, arg1, 0);
11159         return ret;
11160 #endif
11161 #ifdef TARGET_NR_ftruncate64
11162     case TARGET_NR_ftruncate64:
11163         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11164 #endif
11165 #ifdef TARGET_NR_stat64
11166     case TARGET_NR_stat64:
11167         if (!(p = lock_user_string(arg1))) {
11168             return -TARGET_EFAULT;
11169         }
11170         ret = get_errno(stat(path(p), &st));
11171         unlock_user(p, arg1, 0);
11172         if (!is_error(ret))
11173             ret = host_to_target_stat64(cpu_env, arg2, &st);
11174         return ret;
11175 #endif
11176 #ifdef TARGET_NR_lstat64
11177     case TARGET_NR_lstat64:
11178         if (!(p = lock_user_string(arg1))) {
11179             return -TARGET_EFAULT;
11180         }
11181         ret = get_errno(lstat(path(p), &st));
11182         unlock_user(p, arg1, 0);
11183         if (!is_error(ret))
11184             ret = host_to_target_stat64(cpu_env, arg2, &st);
11185         return ret;
11186 #endif
11187 #ifdef TARGET_NR_fstat64
11188     case TARGET_NR_fstat64:
11189         ret = get_errno(fstat(arg1, &st));
11190         if (!is_error(ret))
11191             ret = host_to_target_stat64(cpu_env, arg2, &st);
11192         return ret;
11193 #endif
11194 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11195 #ifdef TARGET_NR_fstatat64
11196     case TARGET_NR_fstatat64:
11197 #endif
11198 #ifdef TARGET_NR_newfstatat
11199     case TARGET_NR_newfstatat:
11200 #endif
11201         if (!(p = lock_user_string(arg2))) {
11202             return -TARGET_EFAULT;
11203         }
11204         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11205         unlock_user(p, arg2, 0);
11206         if (!is_error(ret))
11207             ret = host_to_target_stat64(cpu_env, arg3, &st);
11208         return ret;
11209 #endif
11210 #if defined(TARGET_NR_statx)
11211     case TARGET_NR_statx:
11212         {
11213             struct target_statx *target_stx;
11214             int dirfd = arg1;
11215             int flags = arg3;
11216 
11217             p = lock_user_string(arg2);
11218             if (p == NULL) {
11219                 return -TARGET_EFAULT;
11220             }
11221 #if defined(__NR_statx)
11222             {
11223                 /*
11224                  * It is assumed that struct statx is architecture independent.
11225                  */
11226                 struct target_statx host_stx;
11227                 int mask = arg4;
11228 
11229                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11230                 if (!is_error(ret)) {
11231                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11232                         unlock_user(p, arg2, 0);
11233                         return -TARGET_EFAULT;
11234                     }
11235                 }
11236 
11237                 if (ret != -TARGET_ENOSYS) {
11238                     unlock_user(p, arg2, 0);
11239                     return ret;
11240                 }
11241             }
11242 #endif
11243             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11244             unlock_user(p, arg2, 0);
11245 
11246             if (!is_error(ret)) {
11247                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11248                     return -TARGET_EFAULT;
11249                 }
11250                 memset(target_stx, 0, sizeof(*target_stx));
11251                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11252                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11253                 __put_user(st.st_ino, &target_stx->stx_ino);
11254                 __put_user(st.st_mode, &target_stx->stx_mode);
11255                 __put_user(st.st_uid, &target_stx->stx_uid);
11256                 __put_user(st.st_gid, &target_stx->stx_gid);
11257                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11258                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11259                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11260                 __put_user(st.st_size, &target_stx->stx_size);
11261                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11262                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11263                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11264                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11265                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11266                 unlock_user_struct(target_stx, arg5, 1);
11267             }
11268         }
11269         return ret;
11270 #endif
11271 #ifdef TARGET_NR_lchown
11272     case TARGET_NR_lchown:
11273         if (!(p = lock_user_string(arg1)))
11274             return -TARGET_EFAULT;
11275         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11276         unlock_user(p, arg1, 0);
11277         return ret;
11278 #endif
11279 #ifdef TARGET_NR_getuid
11280     case TARGET_NR_getuid:
11281         return get_errno(high2lowuid(getuid()));
11282 #endif
11283 #ifdef TARGET_NR_getgid
11284     case TARGET_NR_getgid:
11285         return get_errno(high2lowgid(getgid()));
11286 #endif
11287 #ifdef TARGET_NR_geteuid
11288     case TARGET_NR_geteuid:
11289         return get_errno(high2lowuid(geteuid()));
11290 #endif
11291 #ifdef TARGET_NR_getegid
11292     case TARGET_NR_getegid:
11293         return get_errno(high2lowgid(getegid()));
11294 #endif
11295     case TARGET_NR_setreuid:
11296         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11297     case TARGET_NR_setregid:
11298         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11299     case TARGET_NR_getgroups:
11300         {
11301             int gidsetsize = arg1;
11302             target_id *target_grouplist;
11303             gid_t *grouplist;
11304             int i;
11305 
11306             grouplist = alloca(gidsetsize * sizeof(gid_t));
11307             ret = get_errno(getgroups(gidsetsize, grouplist));
11308             if (gidsetsize == 0)
11309                 return ret;
11310             if (!is_error(ret)) {
11311                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
11312                 if (!target_grouplist)
11313                     return -TARGET_EFAULT;
11314                 for(i = 0;i < ret; i++)
11315                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11316                 unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
11317             }
11318         }
11319         return ret;
11320     case TARGET_NR_setgroups:
11321         {
11322             int gidsetsize = arg1;
11323             target_id *target_grouplist;
11324             gid_t *grouplist = NULL;
11325             int i;
11326             if (gidsetsize) {
11327                 grouplist = alloca(gidsetsize * sizeof(gid_t));
11328                 target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
11329                 if (!target_grouplist) {
11330                     return -TARGET_EFAULT;
11331                 }
11332                 for (i = 0; i < gidsetsize; i++) {
11333                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11334                 }
11335                 unlock_user(target_grouplist, arg2, 0);
11336             }
11337             return get_errno(setgroups(gidsetsize, grouplist));
11338         }
11339     case TARGET_NR_fchown:
11340         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11341 #if defined(TARGET_NR_fchownat)
11342     case TARGET_NR_fchownat:
11343         if (!(p = lock_user_string(arg2)))
11344             return -TARGET_EFAULT;
11345         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11346                                  low2highgid(arg4), arg5));
11347         unlock_user(p, arg2, 0);
11348         return ret;
11349 #endif
11350 #ifdef TARGET_NR_setresuid
11351     case TARGET_NR_setresuid:
11352         return get_errno(sys_setresuid(low2highuid(arg1),
11353                                        low2highuid(arg2),
11354                                        low2highuid(arg3)));
11355 #endif
11356 #ifdef TARGET_NR_getresuid
11357     case TARGET_NR_getresuid:
11358         {
11359             uid_t ruid, euid, suid;
11360             ret = get_errno(getresuid(&ruid, &euid, &suid));
11361             if (!is_error(ret)) {
11362                 if (put_user_id(high2lowuid(ruid), arg1)
11363                     || put_user_id(high2lowuid(euid), arg2)
11364                     || put_user_id(high2lowuid(suid), arg3))
11365                     return -TARGET_EFAULT;
11366             }
11367         }
11368         return ret;
11369 #endif
11370 #ifdef TARGET_NR_getresgid
11371     case TARGET_NR_setresgid:
11372         return get_errno(sys_setresgid(low2highgid(arg1),
11373                                        low2highgid(arg2),
11374                                        low2highgid(arg3)));
11375 #endif
11376 #ifdef TARGET_NR_getresgid
11377     case TARGET_NR_getresgid:
11378         {
11379             gid_t rgid, egid, sgid;
11380             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11381             if (!is_error(ret)) {
11382                 if (put_user_id(high2lowgid(rgid), arg1)
11383                     || put_user_id(high2lowgid(egid), arg2)
11384                     || put_user_id(high2lowgid(sgid), arg3))
11385                     return -TARGET_EFAULT;
11386             }
11387         }
11388         return ret;
11389 #endif
11390 #ifdef TARGET_NR_chown
11391     case TARGET_NR_chown:
11392         if (!(p = lock_user_string(arg1)))
11393             return -TARGET_EFAULT;
11394         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
11395         unlock_user(p, arg1, 0);
11396         return ret;
11397 #endif
11398     case TARGET_NR_setuid:
11399         return get_errno(sys_setuid(low2highuid(arg1)));
11400     case TARGET_NR_setgid:
11401         return get_errno(sys_setgid(low2highgid(arg1)));
11402     case TARGET_NR_setfsuid:
11403         return get_errno(setfsuid(arg1));
11404     case TARGET_NR_setfsgid:
11405         return get_errno(setfsgid(arg1));
11406 
11407 #ifdef TARGET_NR_lchown32
11408     case TARGET_NR_lchown32:
11409         if (!(p = lock_user_string(arg1)))
11410             return -TARGET_EFAULT;
11411         ret = get_errno(lchown(p, arg2, arg3));
11412         unlock_user(p, arg1, 0);
11413         return ret;
11414 #endif
11415 #ifdef TARGET_NR_getuid32
11416     case TARGET_NR_getuid32:
11417         return get_errno(getuid());
11418 #endif
11419 
11420 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
11421    /* Alpha specific */
11422     case TARGET_NR_getxuid:
11423          {
11424             uid_t euid;
11425             euid=geteuid();
11426             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
11427          }
11428         return get_errno(getuid());
11429 #endif
11430 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
11431    /* Alpha specific */
11432     case TARGET_NR_getxgid:
11433          {
11434             uid_t egid;
11435             egid=getegid();
11436             ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
11437          }
11438         return get_errno(getgid());
11439 #endif
11440 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
11441     /* Alpha specific */
11442     case TARGET_NR_osf_getsysinfo:
11443         ret = -TARGET_EOPNOTSUPP;
11444         switch (arg1) {
11445           case TARGET_GSI_IEEE_FP_CONTROL:
11446             {
11447                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
11448                 uint64_t swcr = ((CPUAlphaState *)cpu_env)->swcr;
11449 
11450                 swcr &= ~SWCR_STATUS_MASK;
11451                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
11452 
11453                 if (put_user_u64 (swcr, arg2))
11454                         return -TARGET_EFAULT;
11455                 ret = 0;
11456             }
11457             break;
11458 
11459           /* case GSI_IEEE_STATE_AT_SIGNAL:
11460              -- Not implemented in linux kernel.
11461              case GSI_UACPROC:
11462              -- Retrieves current unaligned access state; not much used.
11463              case GSI_PROC_TYPE:
11464              -- Retrieves implver information; surely not used.
11465              case GSI_GET_HWRPB:
11466              -- Grabs a copy of the HWRPB; surely not used.
11467           */
11468         }
11469         return ret;
11470 #endif
11471 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
11472     /* Alpha specific */
11473     case TARGET_NR_osf_setsysinfo:
11474         ret = -TARGET_EOPNOTSUPP;
11475         switch (arg1) {
11476           case TARGET_SSI_IEEE_FP_CONTROL:
11477             {
11478                 uint64_t swcr, fpcr;
11479 
11480                 if (get_user_u64 (swcr, arg2)) {
11481                     return -TARGET_EFAULT;
11482                 }
11483 
11484                 /*
11485                  * The kernel calls swcr_update_status to update the
11486                  * status bits from the fpcr at every point that it
11487                  * could be queried.  Therefore, we store the status
11488                  * bits only in FPCR.
11489                  */
11490                 ((CPUAlphaState *)cpu_env)->swcr
11491                     = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
11492 
11493                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11494                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
11495                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
11496                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11497                 ret = 0;
11498             }
11499             break;
11500 
11501           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
11502             {
11503                 uint64_t exc, fpcr, fex;
11504 
11505                 if (get_user_u64(exc, arg2)) {
11506                     return -TARGET_EFAULT;
11507                 }
11508                 exc &= SWCR_STATUS_MASK;
11509                 fpcr = cpu_alpha_load_fpcr(cpu_env);
11510 
11511                 /* Old exceptions are not signaled.  */
11512                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
11513                 fex = exc & ~fex;
11514                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
11515                 fex &= ((CPUArchState *)cpu_env)->swcr;
11516 
11517                 /* Update the hardware fpcr.  */
11518                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
11519                 cpu_alpha_store_fpcr(cpu_env, fpcr);
11520 
11521                 if (fex) {
11522                     int si_code = TARGET_FPE_FLTUNK;
11523                     target_siginfo_t info;
11524 
11525                     if (fex & SWCR_TRAP_ENABLE_DNO) {
11526                         si_code = TARGET_FPE_FLTUND;
11527                     }
11528                     if (fex & SWCR_TRAP_ENABLE_INE) {
11529                         si_code = TARGET_FPE_FLTRES;
11530                     }
11531                     if (fex & SWCR_TRAP_ENABLE_UNF) {
11532                         si_code = TARGET_FPE_FLTUND;
11533                     }
11534                     if (fex & SWCR_TRAP_ENABLE_OVF) {
11535                         si_code = TARGET_FPE_FLTOVF;
11536                     }
11537                     if (fex & SWCR_TRAP_ENABLE_DZE) {
11538                         si_code = TARGET_FPE_FLTDIV;
11539                     }
11540                     if (fex & SWCR_TRAP_ENABLE_INV) {
11541                         si_code = TARGET_FPE_FLTINV;
11542                     }
11543 
11544                     info.si_signo = SIGFPE;
11545                     info.si_errno = 0;
11546                     info.si_code = si_code;
11547                     info._sifields._sigfault._addr
11548                         = ((CPUArchState *)cpu_env)->pc;
11549                     queue_signal((CPUArchState *)cpu_env, info.si_signo,
11550                                  QEMU_SI_FAULT, &info);
11551                 }
11552                 ret = 0;
11553             }
11554             break;
11555 
11556           /* case SSI_NVPAIRS:
11557              -- Used with SSIN_UACPROC to enable unaligned accesses.
11558              case SSI_IEEE_STATE_AT_SIGNAL:
11559              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
11560              -- Not implemented in linux kernel
11561           */
11562         }
11563         return ret;
11564 #endif
11565 #ifdef TARGET_NR_osf_sigprocmask
11566     /* Alpha specific.  */
11567     case TARGET_NR_osf_sigprocmask:
11568         {
11569             abi_ulong mask;
11570             int how;
11571             sigset_t set, oldset;
11572 
11573             switch(arg1) {
11574             case TARGET_SIG_BLOCK:
11575                 how = SIG_BLOCK;
11576                 break;
11577             case TARGET_SIG_UNBLOCK:
11578                 how = SIG_UNBLOCK;
11579                 break;
11580             case TARGET_SIG_SETMASK:
11581                 how = SIG_SETMASK;
11582                 break;
11583             default:
11584                 return -TARGET_EINVAL;
11585             }
11586             mask = arg2;
11587             target_to_host_old_sigset(&set, &mask);
11588             ret = do_sigprocmask(how, &set, &oldset);
11589             if (!ret) {
11590                 host_to_target_old_sigset(&mask, &oldset);
11591                 ret = mask;
11592             }
11593         }
11594         return ret;
11595 #endif
11596 
11597 #ifdef TARGET_NR_getgid32
11598     case TARGET_NR_getgid32:
11599         return get_errno(getgid());
11600 #endif
11601 #ifdef TARGET_NR_geteuid32
11602     case TARGET_NR_geteuid32:
11603         return get_errno(geteuid());
11604 #endif
11605 #ifdef TARGET_NR_getegid32
11606     case TARGET_NR_getegid32:
11607         return get_errno(getegid());
11608 #endif
11609 #ifdef TARGET_NR_setreuid32
11610     case TARGET_NR_setreuid32:
11611         return get_errno(setreuid(arg1, arg2));
11612 #endif
11613 #ifdef TARGET_NR_setregid32
11614     case TARGET_NR_setregid32:
11615         return get_errno(setregid(arg1, arg2));
11616 #endif
11617 #ifdef TARGET_NR_getgroups32
11618     case TARGET_NR_getgroups32:
11619         {
11620             int gidsetsize = arg1;
11621             uint32_t *target_grouplist;
11622             gid_t *grouplist;
11623             int i;
11624 
11625             grouplist = alloca(gidsetsize * sizeof(gid_t));
11626             ret = get_errno(getgroups(gidsetsize, grouplist));
11627             if (gidsetsize == 0)
11628                 return ret;
11629             if (!is_error(ret)) {
11630                 target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
11631                 if (!target_grouplist) {
11632                     return -TARGET_EFAULT;
11633                 }
11634                 for(i = 0;i < ret; i++)
11635                     target_grouplist[i] = tswap32(grouplist[i]);
11636                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
11637             }
11638         }
11639         return ret;
11640 #endif
11641 #ifdef TARGET_NR_setgroups32
11642     case TARGET_NR_setgroups32:
11643         {
11644             int gidsetsize = arg1;
11645             uint32_t *target_grouplist;
11646             gid_t *grouplist;
11647             int i;
11648 
11649             grouplist = alloca(gidsetsize * sizeof(gid_t));
11650             target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
11651             if (!target_grouplist) {
11652                 return -TARGET_EFAULT;
11653             }
11654             for(i = 0;i < gidsetsize; i++)
11655                 grouplist[i] = tswap32(target_grouplist[i]);
11656             unlock_user(target_grouplist, arg2, 0);
11657             return get_errno(setgroups(gidsetsize, grouplist));
11658         }
11659 #endif
11660 #ifdef TARGET_NR_fchown32
11661     case TARGET_NR_fchown32:
11662         return get_errno(fchown(arg1, arg2, arg3));
11663 #endif
11664 #ifdef TARGET_NR_setresuid32
11665     case TARGET_NR_setresuid32:
11666         return get_errno(sys_setresuid(arg1, arg2, arg3));
11667 #endif
11668 #ifdef TARGET_NR_getresuid32
11669     case TARGET_NR_getresuid32:
11670         {
11671             uid_t ruid, euid, suid;
11672             ret = get_errno(getresuid(&ruid, &euid, &suid));
11673             if (!is_error(ret)) {
11674                 if (put_user_u32(ruid, arg1)
11675                     || put_user_u32(euid, arg2)
11676                     || put_user_u32(suid, arg3))
11677                     return -TARGET_EFAULT;
11678             }
11679         }
11680         return ret;
11681 #endif
11682 #ifdef TARGET_NR_setresgid32
11683     case TARGET_NR_setresgid32:
11684         return get_errno(sys_setresgid(arg1, arg2, arg3));
11685 #endif
11686 #ifdef TARGET_NR_getresgid32
11687     case TARGET_NR_getresgid32:
11688         {
11689             gid_t rgid, egid, sgid;
11690             ret = get_errno(getresgid(&rgid, &egid, &sgid));
11691             if (!is_error(ret)) {
11692                 if (put_user_u32(rgid, arg1)
11693                     || put_user_u32(egid, arg2)
11694                     || put_user_u32(sgid, arg3))
11695                     return -TARGET_EFAULT;
11696             }
11697         }
11698         return ret;
11699 #endif
11700 #ifdef TARGET_NR_chown32
11701     case TARGET_NR_chown32:
11702         if (!(p = lock_user_string(arg1)))
11703             return -TARGET_EFAULT;
11704         ret = get_errno(chown(p, arg2, arg3));
11705         unlock_user(p, arg1, 0);
11706         return ret;
11707 #endif
11708 #ifdef TARGET_NR_setuid32
11709     case TARGET_NR_setuid32:
11710         return get_errno(sys_setuid(arg1));
11711 #endif
11712 #ifdef TARGET_NR_setgid32
11713     case TARGET_NR_setgid32:
11714         return get_errno(sys_setgid(arg1));
11715 #endif
11716 #ifdef TARGET_NR_setfsuid32
11717     case TARGET_NR_setfsuid32:
11718         return get_errno(setfsuid(arg1));
11719 #endif
11720 #ifdef TARGET_NR_setfsgid32
11721     case TARGET_NR_setfsgid32:
11722         return get_errno(setfsgid(arg1));
11723 #endif
11724 #ifdef TARGET_NR_mincore
11725     case TARGET_NR_mincore:
11726         {
11727             void *a = lock_user(VERIFY_READ, arg1, arg2, 0);
11728             if (!a) {
11729                 return -TARGET_ENOMEM;
11730             }
11731             p = lock_user_string(arg3);
11732             if (!p) {
11733                 ret = -TARGET_EFAULT;
11734             } else {
11735                 ret = get_errno(mincore(a, arg2, p));
11736                 unlock_user(p, arg3, ret);
11737             }
11738             unlock_user(a, arg1, 0);
11739         }
11740         return ret;
11741 #endif
11742 #ifdef TARGET_NR_arm_fadvise64_64
11743     case TARGET_NR_arm_fadvise64_64:
11744         /* arm_fadvise64_64 looks like fadvise64_64 but
11745          * with different argument order: fd, advice, offset, len
11746          * rather than the usual fd, offset, len, advice.
11747          * Note that offset and len are both 64-bit so appear as
11748          * pairs of 32-bit registers.
11749          */
11750         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
11751                             target_offset64(arg5, arg6), arg2);
11752         return -host_to_target_errno(ret);
11753 #endif
11754 
11755 #if TARGET_ABI_BITS == 32
11756 
11757 #ifdef TARGET_NR_fadvise64_64
11758     case TARGET_NR_fadvise64_64:
11759 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
11760         /* 6 args: fd, advice, offset (high, low), len (high, low) */
11761         ret = arg2;
11762         arg2 = arg3;
11763         arg3 = arg4;
11764         arg4 = arg5;
11765         arg5 = arg6;
11766         arg6 = ret;
11767 #else
11768         /* 6 args: fd, offset (high, low), len (high, low), advice */
11769         if (regpairs_aligned(cpu_env, num)) {
11770             /* offset is in (3,4), len in (5,6) and advice in 7 */
11771             arg2 = arg3;
11772             arg3 = arg4;
11773             arg4 = arg5;
11774             arg5 = arg6;
11775             arg6 = arg7;
11776         }
11777 #endif
11778         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
11779                             target_offset64(arg4, arg5), arg6);
11780         return -host_to_target_errno(ret);
11781 #endif
11782 
11783 #ifdef TARGET_NR_fadvise64
11784     case TARGET_NR_fadvise64:
11785         /* 5 args: fd, offset (high, low), len, advice */
11786         if (regpairs_aligned(cpu_env, num)) {
11787             /* offset is in (3,4), len in 5 and advice in 6 */
11788             arg2 = arg3;
11789             arg3 = arg4;
11790             arg4 = arg5;
11791             arg5 = arg6;
11792         }
11793         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
11794         return -host_to_target_errno(ret);
11795 #endif
11796 
11797 #else /* not a 32-bit ABI */
11798 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
11799 #ifdef TARGET_NR_fadvise64_64
11800     case TARGET_NR_fadvise64_64:
11801 #endif
11802 #ifdef TARGET_NR_fadvise64
11803     case TARGET_NR_fadvise64:
11804 #endif
11805 #ifdef TARGET_S390X
11806         switch (arg4) {
11807         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
11808         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
11809         case 6: arg4 = POSIX_FADV_DONTNEED; break;
11810         case 7: arg4 = POSIX_FADV_NOREUSE; break;
11811         default: break;
11812         }
11813 #endif
11814         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
11815 #endif
11816 #endif /* end of 64-bit ABI fadvise handling */
11817 
11818 #ifdef TARGET_NR_madvise
11819     case TARGET_NR_madvise:
11820         /* A straight passthrough may not be safe because qemu sometimes
11821            turns private file-backed mappings into anonymous mappings.
11822            This will break MADV_DONTNEED.
11823            This is a hint, so ignoring and returning success is ok.  */
11824         return 0;
11825 #endif
11826 #ifdef TARGET_NR_fcntl64
11827     case TARGET_NR_fcntl64:
11828     {
11829         int cmd;
11830         struct flock64 fl;
11831         from_flock64_fn *copyfrom = copy_from_user_flock64;
11832         to_flock64_fn *copyto = copy_to_user_flock64;
11833 
11834 #ifdef TARGET_ARM
11835         if (!((CPUARMState *)cpu_env)->eabi) {
11836             copyfrom = copy_from_user_oabi_flock64;
11837             copyto = copy_to_user_oabi_flock64;
11838         }
11839 #endif
11840 
11841         cmd = target_to_host_fcntl_cmd(arg2);
11842         if (cmd == -TARGET_EINVAL) {
11843             return cmd;
11844         }
11845 
11846         switch(arg2) {
11847         case TARGET_F_GETLK64:
11848             ret = copyfrom(&fl, arg3);
11849             if (ret) {
11850                 break;
11851             }
11852             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11853             if (ret == 0) {
11854                 ret = copyto(arg3, &fl);
11855             }
11856 	    break;
11857 
11858         case TARGET_F_SETLK64:
11859         case TARGET_F_SETLKW64:
11860             ret = copyfrom(&fl, arg3);
11861             if (ret) {
11862                 break;
11863             }
11864             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
11865 	    break;
11866         default:
11867             ret = do_fcntl(arg1, arg2, arg3);
11868             break;
11869         }
11870         return ret;
11871     }
11872 #endif
11873 #ifdef TARGET_NR_cacheflush
11874     case TARGET_NR_cacheflush:
11875         /* self-modifying code is handled automatically, so nothing needed */
11876         return 0;
11877 #endif
11878 #ifdef TARGET_NR_getpagesize
11879     case TARGET_NR_getpagesize:
11880         return TARGET_PAGE_SIZE;
11881 #endif
11882     case TARGET_NR_gettid:
11883         return get_errno(sys_gettid());
11884 #ifdef TARGET_NR_readahead
11885     case TARGET_NR_readahead:
11886 #if TARGET_ABI_BITS == 32
11887         if (regpairs_aligned(cpu_env, num)) {
11888             arg2 = arg3;
11889             arg3 = arg4;
11890             arg4 = arg5;
11891         }
11892         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
11893 #else
11894         ret = get_errno(readahead(arg1, arg2, arg3));
11895 #endif
11896         return ret;
11897 #endif
11898 #ifdef CONFIG_ATTR
11899 #ifdef TARGET_NR_setxattr
11900     case TARGET_NR_listxattr:
11901     case TARGET_NR_llistxattr:
11902     {
11903         void *p, *b = 0;
11904         if (arg2) {
11905             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11906             if (!b) {
11907                 return -TARGET_EFAULT;
11908             }
11909         }
11910         p = lock_user_string(arg1);
11911         if (p) {
11912             if (num == TARGET_NR_listxattr) {
11913                 ret = get_errno(listxattr(p, b, arg3));
11914             } else {
11915                 ret = get_errno(llistxattr(p, b, arg3));
11916             }
11917         } else {
11918             ret = -TARGET_EFAULT;
11919         }
11920         unlock_user(p, arg1, 0);
11921         unlock_user(b, arg2, arg3);
11922         return ret;
11923     }
11924     case TARGET_NR_flistxattr:
11925     {
11926         void *b = 0;
11927         if (arg2) {
11928             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11929             if (!b) {
11930                 return -TARGET_EFAULT;
11931             }
11932         }
11933         ret = get_errno(flistxattr(arg1, b, arg3));
11934         unlock_user(b, arg2, arg3);
11935         return ret;
11936     }
11937     case TARGET_NR_setxattr:
11938     case TARGET_NR_lsetxattr:
11939         {
11940             void *p, *n, *v = 0;
11941             if (arg3) {
11942                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11943                 if (!v) {
11944                     return -TARGET_EFAULT;
11945                 }
11946             }
11947             p = lock_user_string(arg1);
11948             n = lock_user_string(arg2);
11949             if (p && n) {
11950                 if (num == TARGET_NR_setxattr) {
11951                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
11952                 } else {
11953                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
11954                 }
11955             } else {
11956                 ret = -TARGET_EFAULT;
11957             }
11958             unlock_user(p, arg1, 0);
11959             unlock_user(n, arg2, 0);
11960             unlock_user(v, arg3, 0);
11961         }
11962         return ret;
11963     case TARGET_NR_fsetxattr:
11964         {
11965             void *n, *v = 0;
11966             if (arg3) {
11967                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
11968                 if (!v) {
11969                     return -TARGET_EFAULT;
11970                 }
11971             }
11972             n = lock_user_string(arg2);
11973             if (n) {
11974                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
11975             } else {
11976                 ret = -TARGET_EFAULT;
11977             }
11978             unlock_user(n, arg2, 0);
11979             unlock_user(v, arg3, 0);
11980         }
11981         return ret;
11982     case TARGET_NR_getxattr:
11983     case TARGET_NR_lgetxattr:
11984         {
11985             void *p, *n, *v = 0;
11986             if (arg3) {
11987                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
11988                 if (!v) {
11989                     return -TARGET_EFAULT;
11990                 }
11991             }
11992             p = lock_user_string(arg1);
11993             n = lock_user_string(arg2);
11994             if (p && n) {
11995                 if (num == TARGET_NR_getxattr) {
11996                     ret = get_errno(getxattr(p, n, v, arg4));
11997                 } else {
11998                     ret = get_errno(lgetxattr(p, n, v, arg4));
11999                 }
12000             } else {
12001                 ret = -TARGET_EFAULT;
12002             }
12003             unlock_user(p, arg1, 0);
12004             unlock_user(n, arg2, 0);
12005             unlock_user(v, arg3, arg4);
12006         }
12007         return ret;
12008     case TARGET_NR_fgetxattr:
12009         {
12010             void *n, *v = 0;
12011             if (arg3) {
12012                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12013                 if (!v) {
12014                     return -TARGET_EFAULT;
12015                 }
12016             }
12017             n = lock_user_string(arg2);
12018             if (n) {
12019                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12020             } else {
12021                 ret = -TARGET_EFAULT;
12022             }
12023             unlock_user(n, arg2, 0);
12024             unlock_user(v, arg3, arg4);
12025         }
12026         return ret;
12027     case TARGET_NR_removexattr:
12028     case TARGET_NR_lremovexattr:
12029         {
12030             void *p, *n;
12031             p = lock_user_string(arg1);
12032             n = lock_user_string(arg2);
12033             if (p && n) {
12034                 if (num == TARGET_NR_removexattr) {
12035                     ret = get_errno(removexattr(p, n));
12036                 } else {
12037                     ret = get_errno(lremovexattr(p, n));
12038                 }
12039             } else {
12040                 ret = -TARGET_EFAULT;
12041             }
12042             unlock_user(p, arg1, 0);
12043             unlock_user(n, arg2, 0);
12044         }
12045         return ret;
12046     case TARGET_NR_fremovexattr:
12047         {
12048             void *n;
12049             n = lock_user_string(arg2);
12050             if (n) {
12051                 ret = get_errno(fremovexattr(arg1, n));
12052             } else {
12053                 ret = -TARGET_EFAULT;
12054             }
12055             unlock_user(n, arg2, 0);
12056         }
12057         return ret;
12058 #endif
12059 #endif /* CONFIG_ATTR */
12060 #ifdef TARGET_NR_set_thread_area
12061     case TARGET_NR_set_thread_area:
12062 #if defined(TARGET_MIPS)
12063       ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
12064       return 0;
12065 #elif defined(TARGET_CRIS)
12066       if (arg1 & 0xff)
12067           ret = -TARGET_EINVAL;
12068       else {
12069           ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
12070           ret = 0;
12071       }
12072       return ret;
12073 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12074       return do_set_thread_area(cpu_env, arg1);
12075 #elif defined(TARGET_M68K)
12076       {
12077           TaskState *ts = cpu->opaque;
12078           ts->tp_value = arg1;
12079           return 0;
12080       }
12081 #else
12082       return -TARGET_ENOSYS;
12083 #endif
12084 #endif
12085 #ifdef TARGET_NR_get_thread_area
12086     case TARGET_NR_get_thread_area:
12087 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12088         return do_get_thread_area(cpu_env, arg1);
12089 #elif defined(TARGET_M68K)
12090         {
12091             TaskState *ts = cpu->opaque;
12092             return ts->tp_value;
12093         }
12094 #else
12095         return -TARGET_ENOSYS;
12096 #endif
12097 #endif
12098 #ifdef TARGET_NR_getdomainname
12099     case TARGET_NR_getdomainname:
12100         return -TARGET_ENOSYS;
12101 #endif
12102 
12103 #ifdef TARGET_NR_clock_settime
12104     case TARGET_NR_clock_settime:
12105     {
12106         struct timespec ts;
12107 
12108         ret = target_to_host_timespec(&ts, arg2);
12109         if (!is_error(ret)) {
12110             ret = get_errno(clock_settime(arg1, &ts));
12111         }
12112         return ret;
12113     }
12114 #endif
12115 #ifdef TARGET_NR_clock_settime64
12116     case TARGET_NR_clock_settime64:
12117     {
12118         struct timespec ts;
12119 
12120         ret = target_to_host_timespec64(&ts, arg2);
12121         if (!is_error(ret)) {
12122             ret = get_errno(clock_settime(arg1, &ts));
12123         }
12124         return ret;
12125     }
12126 #endif
12127 #ifdef TARGET_NR_clock_gettime
12128     case TARGET_NR_clock_gettime:
12129     {
12130         struct timespec ts;
12131         ret = get_errno(clock_gettime(arg1, &ts));
12132         if (!is_error(ret)) {
12133             ret = host_to_target_timespec(arg2, &ts);
12134         }
12135         return ret;
12136     }
12137 #endif
12138 #ifdef TARGET_NR_clock_gettime64
12139     case TARGET_NR_clock_gettime64:
12140     {
12141         struct timespec ts;
12142         ret = get_errno(clock_gettime(arg1, &ts));
12143         if (!is_error(ret)) {
12144             ret = host_to_target_timespec64(arg2, &ts);
12145         }
12146         return ret;
12147     }
12148 #endif
12149 #ifdef TARGET_NR_clock_getres
12150     case TARGET_NR_clock_getres:
12151     {
12152         struct timespec ts;
12153         ret = get_errno(clock_getres(arg1, &ts));
12154         if (!is_error(ret)) {
12155             host_to_target_timespec(arg2, &ts);
12156         }
12157         return ret;
12158     }
12159 #endif
12160 #ifdef TARGET_NR_clock_getres_time64
12161     case TARGET_NR_clock_getres_time64:
12162     {
12163         struct timespec ts;
12164         ret = get_errno(clock_getres(arg1, &ts));
12165         if (!is_error(ret)) {
12166             host_to_target_timespec64(arg2, &ts);
12167         }
12168         return ret;
12169     }
12170 #endif
12171 #ifdef TARGET_NR_clock_nanosleep
12172     case TARGET_NR_clock_nanosleep:
12173     {
12174         struct timespec ts;
12175         if (target_to_host_timespec(&ts, arg3)) {
12176             return -TARGET_EFAULT;
12177         }
12178         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12179                                              &ts, arg4 ? &ts : NULL));
12180         /*
12181          * if the call is interrupted by a signal handler, it fails
12182          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12183          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12184          */
12185         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12186             host_to_target_timespec(arg4, &ts)) {
12187               return -TARGET_EFAULT;
12188         }
12189 
12190         return ret;
12191     }
12192 #endif
12193 #ifdef TARGET_NR_clock_nanosleep_time64
12194     case TARGET_NR_clock_nanosleep_time64:
12195     {
12196         struct timespec ts;
12197 
12198         if (target_to_host_timespec64(&ts, arg3)) {
12199             return -TARGET_EFAULT;
12200         }
12201 
12202         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12203                                              &ts, arg4 ? &ts : NULL));
12204 
12205         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12206             host_to_target_timespec64(arg4, &ts)) {
12207             return -TARGET_EFAULT;
12208         }
12209         return ret;
12210     }
12211 #endif
12212 
12213 #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
12214     case TARGET_NR_set_tid_address:
12215         return get_errno(set_tid_address((int *)g2h(cpu, arg1)));
12216 #endif
12217 
12218     case TARGET_NR_tkill:
12219         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12220 
12221     case TARGET_NR_tgkill:
12222         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12223                          target_to_host_signal(arg3)));
12224 
12225 #ifdef TARGET_NR_set_robust_list
12226     case TARGET_NR_set_robust_list:
12227     case TARGET_NR_get_robust_list:
12228         /* The ABI for supporting robust futexes has userspace pass
12229          * the kernel a pointer to a linked list which is updated by
12230          * userspace after the syscall; the list is walked by the kernel
12231          * when the thread exits. Since the linked list in QEMU guest
12232          * memory isn't a valid linked list for the host and we have
12233          * no way to reliably intercept the thread-death event, we can't
12234          * support these. Silently return ENOSYS so that guest userspace
12235          * falls back to a non-robust futex implementation (which should
12236          * be OK except in the corner case of the guest crashing while
12237          * holding a mutex that is shared with another process via
12238          * shared memory).
12239          */
12240         return -TARGET_ENOSYS;
12241 #endif
12242 
12243 #if defined(TARGET_NR_utimensat)
12244     case TARGET_NR_utimensat:
12245         {
12246             struct timespec *tsp, ts[2];
12247             if (!arg3) {
12248                 tsp = NULL;
12249             } else {
12250                 if (target_to_host_timespec(ts, arg3)) {
12251                     return -TARGET_EFAULT;
12252                 }
12253                 if (target_to_host_timespec(ts + 1, arg3 +
12254                                             sizeof(struct target_timespec))) {
12255                     return -TARGET_EFAULT;
12256                 }
12257                 tsp = ts;
12258             }
12259             if (!arg2)
12260                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12261             else {
12262                 if (!(p = lock_user_string(arg2))) {
12263                     return -TARGET_EFAULT;
12264                 }
12265                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12266                 unlock_user(p, arg2, 0);
12267             }
12268         }
12269         return ret;
12270 #endif
12271 #ifdef TARGET_NR_utimensat_time64
12272     case TARGET_NR_utimensat_time64:
12273         {
12274             struct timespec *tsp, ts[2];
12275             if (!arg3) {
12276                 tsp = NULL;
12277             } else {
12278                 if (target_to_host_timespec64(ts, arg3)) {
12279                     return -TARGET_EFAULT;
12280                 }
12281                 if (target_to_host_timespec64(ts + 1, arg3 +
12282                                      sizeof(struct target__kernel_timespec))) {
12283                     return -TARGET_EFAULT;
12284                 }
12285                 tsp = ts;
12286             }
12287             if (!arg2)
12288                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12289             else {
12290                 p = lock_user_string(arg2);
12291                 if (!p) {
12292                     return -TARGET_EFAULT;
12293                 }
12294                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12295                 unlock_user(p, arg2, 0);
12296             }
12297         }
12298         return ret;
12299 #endif
12300 #ifdef TARGET_NR_futex
12301     case TARGET_NR_futex:
12302         return do_futex(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12303 #endif
12304 #ifdef TARGET_NR_futex_time64
12305     case TARGET_NR_futex_time64:
12306         return do_futex_time64(cpu, arg1, arg2, arg3, arg4, arg5, arg6);
12307 #endif
12308 #ifdef CONFIG_INOTIFY
12309 #if defined(TARGET_NR_inotify_init)
12310     case TARGET_NR_inotify_init:
12311         ret = get_errno(inotify_init());
12312         if (ret >= 0) {
12313             fd_trans_register(ret, &target_inotify_trans);
12314         }
12315         return ret;
12316 #endif
12317 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12318     case TARGET_NR_inotify_init1:
12319         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12320                                           fcntl_flags_tbl)));
12321         if (ret >= 0) {
12322             fd_trans_register(ret, &target_inotify_trans);
12323         }
12324         return ret;
12325 #endif
12326 #if defined(TARGET_NR_inotify_add_watch)
12327     case TARGET_NR_inotify_add_watch:
12328         p = lock_user_string(arg2);
12329         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12330         unlock_user(p, arg2, 0);
12331         return ret;
12332 #endif
12333 #if defined(TARGET_NR_inotify_rm_watch)
12334     case TARGET_NR_inotify_rm_watch:
12335         return get_errno(inotify_rm_watch(arg1, arg2));
12336 #endif
12337 #endif
12338 
12339 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12340     case TARGET_NR_mq_open:
12341         {
12342             struct mq_attr posix_mq_attr;
12343             struct mq_attr *pposix_mq_attr;
12344             int host_flags;
12345 
12346             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12347             pposix_mq_attr = NULL;
12348             if (arg4) {
12349                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12350                     return -TARGET_EFAULT;
12351                 }
12352                 pposix_mq_attr = &posix_mq_attr;
12353             }
12354             p = lock_user_string(arg1 - 1);
12355             if (!p) {
12356                 return -TARGET_EFAULT;
12357             }
12358             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
12359             unlock_user (p, arg1, 0);
12360         }
12361         return ret;
12362 
12363     case TARGET_NR_mq_unlink:
12364         p = lock_user_string(arg1 - 1);
12365         if (!p) {
12366             return -TARGET_EFAULT;
12367         }
12368         ret = get_errno(mq_unlink(p));
12369         unlock_user (p, arg1, 0);
12370         return ret;
12371 
12372 #ifdef TARGET_NR_mq_timedsend
12373     case TARGET_NR_mq_timedsend:
12374         {
12375             struct timespec ts;
12376 
12377             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12378             if (arg5 != 0) {
12379                 if (target_to_host_timespec(&ts, arg5)) {
12380                     return -TARGET_EFAULT;
12381                 }
12382                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12383                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12384                     return -TARGET_EFAULT;
12385                 }
12386             } else {
12387                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12388             }
12389             unlock_user (p, arg2, arg3);
12390         }
12391         return ret;
12392 #endif
12393 #ifdef TARGET_NR_mq_timedsend_time64
12394     case TARGET_NR_mq_timedsend_time64:
12395         {
12396             struct timespec ts;
12397 
12398             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12399             if (arg5 != 0) {
12400                 if (target_to_host_timespec64(&ts, arg5)) {
12401                     return -TARGET_EFAULT;
12402                 }
12403                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
12404                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12405                     return -TARGET_EFAULT;
12406                 }
12407             } else {
12408                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
12409             }
12410             unlock_user(p, arg2, arg3);
12411         }
12412         return ret;
12413 #endif
12414 
12415 #ifdef TARGET_NR_mq_timedreceive
12416     case TARGET_NR_mq_timedreceive:
12417         {
12418             struct timespec ts;
12419             unsigned int prio;
12420 
12421             p = lock_user (VERIFY_READ, arg2, arg3, 1);
12422             if (arg5 != 0) {
12423                 if (target_to_host_timespec(&ts, arg5)) {
12424                     return -TARGET_EFAULT;
12425                 }
12426                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12427                                                      &prio, &ts));
12428                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
12429                     return -TARGET_EFAULT;
12430                 }
12431             } else {
12432                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12433                                                      &prio, NULL));
12434             }
12435             unlock_user (p, arg2, arg3);
12436             if (arg4 != 0)
12437                 put_user_u32(prio, arg4);
12438         }
12439         return ret;
12440 #endif
12441 #ifdef TARGET_NR_mq_timedreceive_time64
12442     case TARGET_NR_mq_timedreceive_time64:
12443         {
12444             struct timespec ts;
12445             unsigned int prio;
12446 
12447             p = lock_user(VERIFY_READ, arg2, arg3, 1);
12448             if (arg5 != 0) {
12449                 if (target_to_host_timespec64(&ts, arg5)) {
12450                     return -TARGET_EFAULT;
12451                 }
12452                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12453                                                      &prio, &ts));
12454                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
12455                     return -TARGET_EFAULT;
12456                 }
12457             } else {
12458                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
12459                                                      &prio, NULL));
12460             }
12461             unlock_user(p, arg2, arg3);
12462             if (arg4 != 0) {
12463                 put_user_u32(prio, arg4);
12464             }
12465         }
12466         return ret;
12467 #endif
12468 
12469     /* Not implemented for now... */
12470 /*     case TARGET_NR_mq_notify: */
12471 /*         break; */
12472 
12473     case TARGET_NR_mq_getsetattr:
12474         {
12475             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
12476             ret = 0;
12477             if (arg2 != 0) {
12478                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
12479                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
12480                                            &posix_mq_attr_out));
12481             } else if (arg3 != 0) {
12482                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
12483             }
12484             if (ret == 0 && arg3 != 0) {
12485                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
12486             }
12487         }
12488         return ret;
12489 #endif
12490 
12491 #ifdef CONFIG_SPLICE
12492 #ifdef TARGET_NR_tee
12493     case TARGET_NR_tee:
12494         {
12495             ret = get_errno(tee(arg1,arg2,arg3,arg4));
12496         }
12497         return ret;
12498 #endif
12499 #ifdef TARGET_NR_splice
12500     case TARGET_NR_splice:
12501         {
12502             loff_t loff_in, loff_out;
12503             loff_t *ploff_in = NULL, *ploff_out = NULL;
12504             if (arg2) {
12505                 if (get_user_u64(loff_in, arg2)) {
12506                     return -TARGET_EFAULT;
12507                 }
12508                 ploff_in = &loff_in;
12509             }
12510             if (arg4) {
12511                 if (get_user_u64(loff_out, arg4)) {
12512                     return -TARGET_EFAULT;
12513                 }
12514                 ploff_out = &loff_out;
12515             }
12516             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
12517             if (arg2) {
12518                 if (put_user_u64(loff_in, arg2)) {
12519                     return -TARGET_EFAULT;
12520                 }
12521             }
12522             if (arg4) {
12523                 if (put_user_u64(loff_out, arg4)) {
12524                     return -TARGET_EFAULT;
12525                 }
12526             }
12527         }
12528         return ret;
12529 #endif
12530 #ifdef TARGET_NR_vmsplice
12531 	case TARGET_NR_vmsplice:
12532         {
12533             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
12534             if (vec != NULL) {
12535                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
12536                 unlock_iovec(vec, arg2, arg3, 0);
12537             } else {
12538                 ret = -host_to_target_errno(errno);
12539             }
12540         }
12541         return ret;
12542 #endif
12543 #endif /* CONFIG_SPLICE */
12544 #ifdef CONFIG_EVENTFD
12545 #if defined(TARGET_NR_eventfd)
12546     case TARGET_NR_eventfd:
12547         ret = get_errno(eventfd(arg1, 0));
12548         if (ret >= 0) {
12549             fd_trans_register(ret, &target_eventfd_trans);
12550         }
12551         return ret;
12552 #endif
12553 #if defined(TARGET_NR_eventfd2)
12554     case TARGET_NR_eventfd2:
12555     {
12556         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
12557         if (arg2 & TARGET_O_NONBLOCK) {
12558             host_flags |= O_NONBLOCK;
12559         }
12560         if (arg2 & TARGET_O_CLOEXEC) {
12561             host_flags |= O_CLOEXEC;
12562         }
12563         ret = get_errno(eventfd(arg1, host_flags));
12564         if (ret >= 0) {
12565             fd_trans_register(ret, &target_eventfd_trans);
12566         }
12567         return ret;
12568     }
12569 #endif
12570 #endif /* CONFIG_EVENTFD  */
12571 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
12572     case TARGET_NR_fallocate:
12573 #if TARGET_ABI_BITS == 32
12574         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
12575                                   target_offset64(arg5, arg6)));
12576 #else
12577         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
12578 #endif
12579         return ret;
12580 #endif
12581 #if defined(CONFIG_SYNC_FILE_RANGE)
12582 #if defined(TARGET_NR_sync_file_range)
12583     case TARGET_NR_sync_file_range:
12584 #if TARGET_ABI_BITS == 32
12585 #if defined(TARGET_MIPS)
12586         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12587                                         target_offset64(arg5, arg6), arg7));
12588 #else
12589         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
12590                                         target_offset64(arg4, arg5), arg6));
12591 #endif /* !TARGET_MIPS */
12592 #else
12593         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
12594 #endif
12595         return ret;
12596 #endif
12597 #if defined(TARGET_NR_sync_file_range2) || \
12598     defined(TARGET_NR_arm_sync_file_range)
12599 #if defined(TARGET_NR_sync_file_range2)
12600     case TARGET_NR_sync_file_range2:
12601 #endif
12602 #if defined(TARGET_NR_arm_sync_file_range)
12603     case TARGET_NR_arm_sync_file_range:
12604 #endif
12605         /* This is like sync_file_range but the arguments are reordered */
12606 #if TARGET_ABI_BITS == 32
12607         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
12608                                         target_offset64(arg5, arg6), arg2));
12609 #else
12610         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
12611 #endif
12612         return ret;
12613 #endif
12614 #endif
12615 #if defined(TARGET_NR_signalfd4)
12616     case TARGET_NR_signalfd4:
12617         return do_signalfd4(arg1, arg2, arg4);
12618 #endif
12619 #if defined(TARGET_NR_signalfd)
12620     case TARGET_NR_signalfd:
12621         return do_signalfd4(arg1, arg2, 0);
12622 #endif
12623 #if defined(CONFIG_EPOLL)
12624 #if defined(TARGET_NR_epoll_create)
12625     case TARGET_NR_epoll_create:
12626         return get_errno(epoll_create(arg1));
12627 #endif
12628 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
12629     case TARGET_NR_epoll_create1:
12630         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
12631 #endif
12632 #if defined(TARGET_NR_epoll_ctl)
12633     case TARGET_NR_epoll_ctl:
12634     {
12635         struct epoll_event ep;
12636         struct epoll_event *epp = 0;
12637         if (arg4) {
12638             if (arg2 != EPOLL_CTL_DEL) {
12639                 struct target_epoll_event *target_ep;
12640                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
12641                     return -TARGET_EFAULT;
12642                 }
12643                 ep.events = tswap32(target_ep->events);
12644                 /*
12645                  * The epoll_data_t union is just opaque data to the kernel,
12646                  * so we transfer all 64 bits across and need not worry what
12647                  * actual data type it is.
12648                  */
12649                 ep.data.u64 = tswap64(target_ep->data.u64);
12650                 unlock_user_struct(target_ep, arg4, 0);
12651             }
12652             /*
12653              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
12654              * non-null pointer, even though this argument is ignored.
12655              *
12656              */
12657             epp = &ep;
12658         }
12659         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
12660     }
12661 #endif
12662 
12663 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
12664 #if defined(TARGET_NR_epoll_wait)
12665     case TARGET_NR_epoll_wait:
12666 #endif
12667 #if defined(TARGET_NR_epoll_pwait)
12668     case TARGET_NR_epoll_pwait:
12669 #endif
12670     {
12671         struct target_epoll_event *target_ep;
12672         struct epoll_event *ep;
12673         int epfd = arg1;
12674         int maxevents = arg3;
12675         int timeout = arg4;
12676 
12677         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
12678             return -TARGET_EINVAL;
12679         }
12680 
12681         target_ep = lock_user(VERIFY_WRITE, arg2,
12682                               maxevents * sizeof(struct target_epoll_event), 1);
12683         if (!target_ep) {
12684             return -TARGET_EFAULT;
12685         }
12686 
12687         ep = g_try_new(struct epoll_event, maxevents);
12688         if (!ep) {
12689             unlock_user(target_ep, arg2, 0);
12690             return -TARGET_ENOMEM;
12691         }
12692 
12693         switch (num) {
12694 #if defined(TARGET_NR_epoll_pwait)
12695         case TARGET_NR_epoll_pwait:
12696         {
12697             sigset_t *set = NULL;
12698 
12699             if (arg5) {
12700                 ret = process_sigsuspend_mask(&set, arg5, arg6);
12701                 if (ret != 0) {
12702                     break;
12703                 }
12704             }
12705 
12706             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12707                                              set, SIGSET_T_SIZE));
12708 
12709             if (set) {
12710                 finish_sigsuspend_mask(ret);
12711             }
12712             break;
12713         }
12714 #endif
12715 #if defined(TARGET_NR_epoll_wait)
12716         case TARGET_NR_epoll_wait:
12717             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
12718                                              NULL, 0));
12719             break;
12720 #endif
12721         default:
12722             ret = -TARGET_ENOSYS;
12723         }
12724         if (!is_error(ret)) {
12725             int i;
12726             for (i = 0; i < ret; i++) {
12727                 target_ep[i].events = tswap32(ep[i].events);
12728                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
12729             }
12730             unlock_user(target_ep, arg2,
12731                         ret * sizeof(struct target_epoll_event));
12732         } else {
12733             unlock_user(target_ep, arg2, 0);
12734         }
12735         g_free(ep);
12736         return ret;
12737     }
12738 #endif
12739 #endif
12740 #ifdef TARGET_NR_prlimit64
12741     case TARGET_NR_prlimit64:
12742     {
12743         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
12744         struct target_rlimit64 *target_rnew, *target_rold;
12745         struct host_rlimit64 rnew, rold, *rnewp = 0;
12746         int resource = target_to_host_resource(arg2);
12747 
12748         if (arg3 && (resource != RLIMIT_AS &&
12749                      resource != RLIMIT_DATA &&
12750                      resource != RLIMIT_STACK)) {
12751             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
12752                 return -TARGET_EFAULT;
12753             }
12754             rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
12755             rnew.rlim_max = tswap64(target_rnew->rlim_max);
12756             unlock_user_struct(target_rnew, arg3, 0);
12757             rnewp = &rnew;
12758         }
12759 
12760         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
12761         if (!is_error(ret) && arg4) {
12762             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
12763                 return -TARGET_EFAULT;
12764             }
12765             target_rold->rlim_cur = tswap64(rold.rlim_cur);
12766             target_rold->rlim_max = tswap64(rold.rlim_max);
12767             unlock_user_struct(target_rold, arg4, 1);
12768         }
12769         return ret;
12770     }
12771 #endif
12772 #ifdef TARGET_NR_gethostname
12773     case TARGET_NR_gethostname:
12774     {
12775         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
12776         if (name) {
12777             ret = get_errno(gethostname(name, arg2));
12778             unlock_user(name, arg1, arg2);
12779         } else {
12780             ret = -TARGET_EFAULT;
12781         }
12782         return ret;
12783     }
12784 #endif
12785 #ifdef TARGET_NR_atomic_cmpxchg_32
12786     case TARGET_NR_atomic_cmpxchg_32:
12787     {
12788         /* should use start_exclusive from main.c */
12789         abi_ulong mem_value;
12790         if (get_user_u32(mem_value, arg6)) {
12791             target_siginfo_t info;
12792             info.si_signo = SIGSEGV;
12793             info.si_errno = 0;
12794             info.si_code = TARGET_SEGV_MAPERR;
12795             info._sifields._sigfault._addr = arg6;
12796             queue_signal((CPUArchState *)cpu_env, info.si_signo,
12797                          QEMU_SI_FAULT, &info);
12798             ret = 0xdeadbeef;
12799 
12800         }
12801         if (mem_value == arg2)
12802             put_user_u32(arg1, arg6);
12803         return mem_value;
12804     }
12805 #endif
12806 #ifdef TARGET_NR_atomic_barrier
12807     case TARGET_NR_atomic_barrier:
12808         /* Like the kernel implementation and the
12809            qemu arm barrier, no-op this? */
12810         return 0;
12811 #endif
12812 
12813 #ifdef TARGET_NR_timer_create
12814     case TARGET_NR_timer_create:
12815     {
12816         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
12817 
12818         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
12819 
12820         int clkid = arg1;
12821         int timer_index = next_free_host_timer();
12822 
12823         if (timer_index < 0) {
12824             ret = -TARGET_EAGAIN;
12825         } else {
12826             timer_t *phtimer = g_posix_timers  + timer_index;
12827 
12828             if (arg2) {
12829                 phost_sevp = &host_sevp;
12830                 ret = target_to_host_sigevent(phost_sevp, arg2);
12831                 if (ret != 0) {
12832                     return ret;
12833                 }
12834             }
12835 
12836             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
12837             if (ret) {
12838                 phtimer = NULL;
12839             } else {
12840                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
12841                     return -TARGET_EFAULT;
12842                 }
12843             }
12844         }
12845         return ret;
12846     }
12847 #endif
12848 
12849 #ifdef TARGET_NR_timer_settime
12850     case TARGET_NR_timer_settime:
12851     {
12852         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
12853          * struct itimerspec * old_value */
12854         target_timer_t timerid = get_timer_id(arg1);
12855 
12856         if (timerid < 0) {
12857             ret = timerid;
12858         } else if (arg3 == 0) {
12859             ret = -TARGET_EINVAL;
12860         } else {
12861             timer_t htimer = g_posix_timers[timerid];
12862             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12863 
12864             if (target_to_host_itimerspec(&hspec_new, arg3)) {
12865                 return -TARGET_EFAULT;
12866             }
12867             ret = get_errno(
12868                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12869             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
12870                 return -TARGET_EFAULT;
12871             }
12872         }
12873         return ret;
12874     }
12875 #endif
12876 
12877 #ifdef TARGET_NR_timer_settime64
12878     case TARGET_NR_timer_settime64:
12879     {
12880         target_timer_t timerid = get_timer_id(arg1);
12881 
12882         if (timerid < 0) {
12883             ret = timerid;
12884         } else if (arg3 == 0) {
12885             ret = -TARGET_EINVAL;
12886         } else {
12887             timer_t htimer = g_posix_timers[timerid];
12888             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
12889 
12890             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
12891                 return -TARGET_EFAULT;
12892             }
12893             ret = get_errno(
12894                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
12895             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
12896                 return -TARGET_EFAULT;
12897             }
12898         }
12899         return ret;
12900     }
12901 #endif
12902 
12903 #ifdef TARGET_NR_timer_gettime
12904     case TARGET_NR_timer_gettime:
12905     {
12906         /* args: timer_t timerid, struct itimerspec *curr_value */
12907         target_timer_t timerid = get_timer_id(arg1);
12908 
12909         if (timerid < 0) {
12910             ret = timerid;
12911         } else if (!arg2) {
12912             ret = -TARGET_EFAULT;
12913         } else {
12914             timer_t htimer = g_posix_timers[timerid];
12915             struct itimerspec hspec;
12916             ret = get_errno(timer_gettime(htimer, &hspec));
12917 
12918             if (host_to_target_itimerspec(arg2, &hspec)) {
12919                 ret = -TARGET_EFAULT;
12920             }
12921         }
12922         return ret;
12923     }
12924 #endif
12925 
12926 #ifdef TARGET_NR_timer_gettime64
12927     case TARGET_NR_timer_gettime64:
12928     {
12929         /* args: timer_t timerid, struct itimerspec64 *curr_value */
12930         target_timer_t timerid = get_timer_id(arg1);
12931 
12932         if (timerid < 0) {
12933             ret = timerid;
12934         } else if (!arg2) {
12935             ret = -TARGET_EFAULT;
12936         } else {
12937             timer_t htimer = g_posix_timers[timerid];
12938             struct itimerspec hspec;
12939             ret = get_errno(timer_gettime(htimer, &hspec));
12940 
12941             if (host_to_target_itimerspec64(arg2, &hspec)) {
12942                 ret = -TARGET_EFAULT;
12943             }
12944         }
12945         return ret;
12946     }
12947 #endif
12948 
12949 #ifdef TARGET_NR_timer_getoverrun
12950     case TARGET_NR_timer_getoverrun:
12951     {
12952         /* args: timer_t timerid */
12953         target_timer_t timerid = get_timer_id(arg1);
12954 
12955         if (timerid < 0) {
12956             ret = timerid;
12957         } else {
12958             timer_t htimer = g_posix_timers[timerid];
12959             ret = get_errno(timer_getoverrun(htimer));
12960         }
12961         return ret;
12962     }
12963 #endif
12964 
12965 #ifdef TARGET_NR_timer_delete
12966     case TARGET_NR_timer_delete:
12967     {
12968         /* args: timer_t timerid */
12969         target_timer_t timerid = get_timer_id(arg1);
12970 
12971         if (timerid < 0) {
12972             ret = timerid;
12973         } else {
12974             timer_t htimer = g_posix_timers[timerid];
12975             ret = get_errno(timer_delete(htimer));
12976             g_posix_timers[timerid] = 0;
12977         }
12978         return ret;
12979     }
12980 #endif
12981 
12982 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
12983     case TARGET_NR_timerfd_create:
12984         return get_errno(timerfd_create(arg1,
12985                           target_to_host_bitmask(arg2, fcntl_flags_tbl)));
12986 #endif
12987 
12988 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
12989     case TARGET_NR_timerfd_gettime:
12990         {
12991             struct itimerspec its_curr;
12992 
12993             ret = get_errno(timerfd_gettime(arg1, &its_curr));
12994 
12995             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
12996                 return -TARGET_EFAULT;
12997             }
12998         }
12999         return ret;
13000 #endif
13001 
13002 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13003     case TARGET_NR_timerfd_gettime64:
13004         {
13005             struct itimerspec its_curr;
13006 
13007             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13008 
13009             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13010                 return -TARGET_EFAULT;
13011             }
13012         }
13013         return ret;
13014 #endif
13015 
13016 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13017     case TARGET_NR_timerfd_settime:
13018         {
13019             struct itimerspec its_new, its_old, *p_new;
13020 
13021             if (arg3) {
13022                 if (target_to_host_itimerspec(&its_new, arg3)) {
13023                     return -TARGET_EFAULT;
13024                 }
13025                 p_new = &its_new;
13026             } else {
13027                 p_new = NULL;
13028             }
13029 
13030             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13031 
13032             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13033                 return -TARGET_EFAULT;
13034             }
13035         }
13036         return ret;
13037 #endif
13038 
13039 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13040     case TARGET_NR_timerfd_settime64:
13041         {
13042             struct itimerspec its_new, its_old, *p_new;
13043 
13044             if (arg3) {
13045                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13046                     return -TARGET_EFAULT;
13047                 }
13048                 p_new = &its_new;
13049             } else {
13050                 p_new = NULL;
13051             }
13052 
13053             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13054 
13055             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13056                 return -TARGET_EFAULT;
13057             }
13058         }
13059         return ret;
13060 #endif
13061 
13062 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13063     case TARGET_NR_ioprio_get:
13064         return get_errno(ioprio_get(arg1, arg2));
13065 #endif
13066 
13067 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13068     case TARGET_NR_ioprio_set:
13069         return get_errno(ioprio_set(arg1, arg2, arg3));
13070 #endif
13071 
13072 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13073     case TARGET_NR_setns:
13074         return get_errno(setns(arg1, arg2));
13075 #endif
13076 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13077     case TARGET_NR_unshare:
13078         return get_errno(unshare(arg1));
13079 #endif
13080 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13081     case TARGET_NR_kcmp:
13082         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13083 #endif
13084 #ifdef TARGET_NR_swapcontext
13085     case TARGET_NR_swapcontext:
13086         /* PowerPC specific.  */
13087         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13088 #endif
13089 #ifdef TARGET_NR_memfd_create
13090     case TARGET_NR_memfd_create:
13091         p = lock_user_string(arg1);
13092         if (!p) {
13093             return -TARGET_EFAULT;
13094         }
13095         ret = get_errno(memfd_create(p, arg2));
13096         fd_trans_unregister(ret);
13097         unlock_user(p, arg1, 0);
13098         return ret;
13099 #endif
13100 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13101     case TARGET_NR_membarrier:
13102         return get_errno(membarrier(arg1, arg2));
13103 #endif
13104 
13105 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13106     case TARGET_NR_copy_file_range:
13107         {
13108             loff_t inoff, outoff;
13109             loff_t *pinoff = NULL, *poutoff = NULL;
13110 
13111             if (arg2) {
13112                 if (get_user_u64(inoff, arg2)) {
13113                     return -TARGET_EFAULT;
13114                 }
13115                 pinoff = &inoff;
13116             }
13117             if (arg4) {
13118                 if (get_user_u64(outoff, arg4)) {
13119                     return -TARGET_EFAULT;
13120                 }
13121                 poutoff = &outoff;
13122             }
13123             /* Do not sign-extend the count parameter. */
13124             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13125                                                  (abi_ulong)arg5, arg6));
13126             if (!is_error(ret) && ret > 0) {
13127                 if (arg2) {
13128                     if (put_user_u64(inoff, arg2)) {
13129                         return -TARGET_EFAULT;
13130                     }
13131                 }
13132                 if (arg4) {
13133                     if (put_user_u64(outoff, arg4)) {
13134                         return -TARGET_EFAULT;
13135                     }
13136                 }
13137             }
13138         }
13139         return ret;
13140 #endif
13141 
13142 #if defined(TARGET_NR_pivot_root)
13143     case TARGET_NR_pivot_root:
13144         {
13145             void *p2;
13146             p = lock_user_string(arg1); /* new_root */
13147             p2 = lock_user_string(arg2); /* put_old */
13148             if (!p || !p2) {
13149                 ret = -TARGET_EFAULT;
13150             } else {
13151                 ret = get_errno(pivot_root(p, p2));
13152             }
13153             unlock_user(p2, arg2, 0);
13154             unlock_user(p, arg1, 0);
13155         }
13156         return ret;
13157 #endif
13158 
13159     default:
13160         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13161         return -TARGET_ENOSYS;
13162     }
13163     return ret;
13164 }
13165 
13166 abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
13167                     abi_long arg2, abi_long arg3, abi_long arg4,
13168                     abi_long arg5, abi_long arg6, abi_long arg7,
13169                     abi_long arg8)
13170 {
13171     CPUState *cpu = env_cpu(cpu_env);
13172     abi_long ret;
13173 
13174 #ifdef DEBUG_ERESTARTSYS
13175     /* Debug-only code for exercising the syscall-restart code paths
13176      * in the per-architecture cpu main loops: restart every syscall
13177      * the guest makes once before letting it through.
13178      */
13179     {
13180         static bool flag;
13181         flag = !flag;
13182         if (flag) {
13183             return -QEMU_ERESTARTSYS;
13184         }
13185     }
13186 #endif
13187 
13188     record_syscall_start(cpu, num, arg1,
13189                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13190 
13191     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13192         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13193     }
13194 
13195     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13196                       arg5, arg6, arg7, arg8);
13197 
13198     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13199         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13200                           arg3, arg4, arg5, arg6);
13201     }
13202 
13203     record_syscall_return(cpu, num, ret);
13204     return ret;
13205 }
13206