xref: /openbmc/qemu/linux-user/syscall.c (revision f7214f99)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include <elf.h>
30 #include <endian.h>
31 #include <grp.h>
32 #include <sys/ipc.h>
33 #include <sys/msg.h>
34 #include <sys/wait.h>
35 #include <sys/mount.h>
36 #include <sys/file.h>
37 #include <sys/fsuid.h>
38 #include <sys/personality.h>
39 #include <sys/prctl.h>
40 #include <sys/resource.h>
41 #include <sys/swap.h>
42 #include <linux/capability.h>
43 #include <sched.h>
44 #include <sys/timex.h>
45 #include <sys/socket.h>
46 #include <linux/sockios.h>
47 #include <sys/un.h>
48 #include <sys/uio.h>
49 #include <poll.h>
50 #include <sys/times.h>
51 #include <sys/shm.h>
52 #include <sys/sem.h>
53 #include <sys/statfs.h>
54 #include <utime.h>
55 #include <sys/sysinfo.h>
56 #include <sys/signalfd.h>
57 //#include <sys/user.h>
58 #include <netinet/in.h>
59 #include <netinet/ip.h>
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
62 #include <linux/wireless.h>
63 #include <linux/icmp.h>
64 #include <linux/icmpv6.h>
65 #include <linux/if_tun.h>
66 #include <linux/in6.h>
67 #include <linux/errqueue.h>
68 #include <linux/random.h>
69 #ifdef CONFIG_TIMERFD
70 #include <sys/timerfd.h>
71 #endif
72 #ifdef CONFIG_EVENTFD
73 #include <sys/eventfd.h>
74 #endif
75 #ifdef CONFIG_EPOLL
76 #include <sys/epoll.h>
77 #endif
78 #ifdef CONFIG_ATTR
79 #include "qemu/xattr.h"
80 #endif
81 #ifdef CONFIG_SENDFILE
82 #include <sys/sendfile.h>
83 #endif
84 #ifdef HAVE_SYS_KCOV_H
85 #include <sys/kcov.h>
86 #endif
87 
88 #define termios host_termios
89 #define winsize host_winsize
90 #define termio host_termio
91 #define sgttyb host_sgttyb /* same as target */
92 #define tchars host_tchars /* same as target */
93 #define ltchars host_ltchars /* same as target */
94 
95 #include <linux/termios.h>
96 #include <linux/unistd.h>
97 #include <linux/cdrom.h>
98 #include <linux/hdreg.h>
99 #include <linux/soundcard.h>
100 #include <linux/kd.h>
101 #include <linux/mtio.h>
102 #include <linux/fs.h>
103 #include <linux/fd.h>
104 #if defined(CONFIG_FIEMAP)
105 #include <linux/fiemap.h>
106 #endif
107 #include <linux/fb.h>
108 #if defined(CONFIG_USBFS)
109 #include <linux/usbdevice_fs.h>
110 #include <linux/usb/ch9.h>
111 #endif
112 #include <linux/vt.h>
113 #include <linux/dm-ioctl.h>
114 #include <linux/reboot.h>
115 #include <linux/route.h>
116 #include <linux/filter.h>
117 #include <linux/blkpg.h>
118 #include <netpacket/packet.h>
119 #include <linux/netlink.h>
120 #include <linux/if_alg.h>
121 #include <linux/rtc.h>
122 #include <sound/asound.h>
123 #ifdef HAVE_BTRFS_H
124 #include <linux/btrfs.h>
125 #endif
126 #ifdef HAVE_DRM_H
127 #include <libdrm/drm.h>
128 #include <libdrm/i915_drm.h>
129 #endif
130 #include "linux_loop.h"
131 #include "uname.h"
132 
133 #include "qemu.h"
134 #include "user-internals.h"
135 #include "strace.h"
136 #include "signal-common.h"
137 #include "loader.h"
138 #include "user-mmap.h"
139 #include "user/safe-syscall.h"
140 #include "qemu/guest-random.h"
141 #include "qemu/selfmap.h"
142 #include "user/syscall-trace.h"
143 #include "special-errno.h"
144 #include "qapi/error.h"
145 #include "fd-trans.h"
146 #include "cpu_loop-common.h"
147 
148 #ifndef CLONE_IO
149 #define CLONE_IO                0x80000000      /* Clone io context */
150 #endif
151 
152 /* We can't directly call the host clone syscall, because this will
153  * badly confuse libc (breaking mutexes, for example). So we must
154  * divide clone flags into:
155  *  * flag combinations that look like pthread_create()
156  *  * flag combinations that look like fork()
157  *  * flags we can implement within QEMU itself
158  *  * flags we can't support and will return an error for
159  */
160 /* For thread creation, all these flags must be present; for
161  * fork, none must be present.
162  */
163 #define CLONE_THREAD_FLAGS                              \
164     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
165      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
166 
167 /* These flags are ignored:
168  * CLONE_DETACHED is now ignored by the kernel;
169  * CLONE_IO is just an optimisation hint to the I/O scheduler
170  */
171 #define CLONE_IGNORED_FLAGS                     \
172     (CLONE_DETACHED | CLONE_IO)
173 
174 #ifndef CLONE_PIDFD
175 # define CLONE_PIDFD 0x00001000
176 #endif
177 
178 /* Flags for fork which we can implement within QEMU itself */
179 #define CLONE_OPTIONAL_FORK_FLAGS               \
180     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
181      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
182 
183 /* Flags for thread creation which we can implement within QEMU itself */
184 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
185     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
186      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
187 
188 #define CLONE_INVALID_FORK_FLAGS                                        \
189     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
190 
191 #define CLONE_INVALID_THREAD_FLAGS                                      \
192     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
193        CLONE_IGNORED_FLAGS))
194 
195 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
196  * have almost all been allocated. We cannot support any of
197  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
198  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
199  * The checks against the invalid thread masks above will catch these.
200  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
201  */
202 
203 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
204  * once. This exercises the codepaths for restart.
205  */
206 //#define DEBUG_ERESTARTSYS
207 
208 //#include <linux/msdos_fs.h>
209 #define VFAT_IOCTL_READDIR_BOTH \
210     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
211 #define VFAT_IOCTL_READDIR_SHORT \
212     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
213 
214 #undef _syscall0
215 #undef _syscall1
216 #undef _syscall2
217 #undef _syscall3
218 #undef _syscall4
219 #undef _syscall5
220 #undef _syscall6
221 
222 #define _syscall0(type,name)		\
223 static type name (void)			\
224 {					\
225 	return syscall(__NR_##name);	\
226 }
227 
228 #define _syscall1(type,name,type1,arg1)		\
229 static type name (type1 arg1)			\
230 {						\
231 	return syscall(__NR_##name, arg1);	\
232 }
233 
234 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
235 static type name (type1 arg1,type2 arg2)		\
236 {							\
237 	return syscall(__NR_##name, arg1, arg2);	\
238 }
239 
240 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
241 static type name (type1 arg1,type2 arg2,type3 arg3)		\
242 {								\
243 	return syscall(__NR_##name, arg1, arg2, arg3);		\
244 }
245 
246 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
247 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
248 {										\
249 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
250 }
251 
252 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
253 		  type5,arg5)							\
254 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
255 {										\
256 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
257 }
258 
259 
260 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
261 		  type5,arg5,type6,arg6)					\
262 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
263                   type6 arg6)							\
264 {										\
265 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
266 }
267 
268 
269 #define __NR_sys_uname __NR_uname
270 #define __NR_sys_getcwd1 __NR_getcwd
271 #define __NR_sys_getdents __NR_getdents
272 #define __NR_sys_getdents64 __NR_getdents64
273 #define __NR_sys_getpriority __NR_getpriority
274 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
275 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
276 #define __NR_sys_syslog __NR_syslog
277 #if defined(__NR_futex)
278 # define __NR_sys_futex __NR_futex
279 #endif
280 #if defined(__NR_futex_time64)
281 # define __NR_sys_futex_time64 __NR_futex_time64
282 #endif
283 #define __NR_sys_statx __NR_statx
284 
285 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
286 #define __NR__llseek __NR_lseek
287 #endif
288 
289 /* Newer kernel ports have llseek() instead of _llseek() */
290 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
291 #define TARGET_NR__llseek TARGET_NR_llseek
292 #endif
293 
294 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
295 #ifndef TARGET_O_NONBLOCK_MASK
296 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
297 #endif
298 
299 #define __NR_sys_gettid __NR_gettid
300 _syscall0(int, sys_gettid)
301 
302 /* For the 64-bit guest on 32-bit host case we must emulate
303  * getdents using getdents64, because otherwise the host
304  * might hand us back more dirent records than we can fit
305  * into the guest buffer after structure format conversion.
306  * Otherwise we emulate getdents with getdents if the host has it.
307  */
308 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
309 #define EMULATE_GETDENTS_WITH_GETDENTS
310 #endif
311 
312 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
313 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
314 #endif
315 #if (defined(TARGET_NR_getdents) && \
316       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
317     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
318 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
319 #endif
320 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
321 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
322           loff_t *, res, unsigned int, wh);
323 #endif
324 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
325 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
326           siginfo_t *, uinfo)
327 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
328 #ifdef __NR_exit_group
329 _syscall1(int,exit_group,int,error_code)
330 #endif
331 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
332 #define __NR_sys_close_range __NR_close_range
333 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
334 #ifndef CLOSE_RANGE_CLOEXEC
335 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
336 #endif
337 #endif
338 #if defined(__NR_futex)
339 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
340           const struct timespec *,timeout,int *,uaddr2,int,val3)
341 #endif
342 #if defined(__NR_futex_time64)
343 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
344           const struct timespec *,timeout,int *,uaddr2,int,val3)
345 #endif
346 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
347 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
348 #endif
349 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
350 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
351                              unsigned int, flags);
352 #endif
353 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
354 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
355 #endif
356 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
357 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
358           unsigned long *, user_mask_ptr);
359 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
360 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
361           unsigned long *, user_mask_ptr);
362 /* sched_attr is not defined in glibc */
363 struct sched_attr {
364     uint32_t size;
365     uint32_t sched_policy;
366     uint64_t sched_flags;
367     int32_t sched_nice;
368     uint32_t sched_priority;
369     uint64_t sched_runtime;
370     uint64_t sched_deadline;
371     uint64_t sched_period;
372     uint32_t sched_util_min;
373     uint32_t sched_util_max;
374 };
375 #define __NR_sys_sched_getattr __NR_sched_getattr
376 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
377           unsigned int, size, unsigned int, flags);
378 #define __NR_sys_sched_setattr __NR_sched_setattr
379 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
380           unsigned int, flags);
381 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
382 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
383 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
384 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
385           const struct sched_param *, param);
386 #define __NR_sys_sched_getparam __NR_sched_getparam
387 _syscall2(int, sys_sched_getparam, pid_t, pid,
388           struct sched_param *, param);
389 #define __NR_sys_sched_setparam __NR_sched_setparam
390 _syscall2(int, sys_sched_setparam, pid_t, pid,
391           const struct sched_param *, param);
392 #define __NR_sys_getcpu __NR_getcpu
393 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
394 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
395           void *, arg);
396 _syscall2(int, capget, struct __user_cap_header_struct *, header,
397           struct __user_cap_data_struct *, data);
398 _syscall2(int, capset, struct __user_cap_header_struct *, header,
399           struct __user_cap_data_struct *, data);
400 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
401 _syscall2(int, ioprio_get, int, which, int, who)
402 #endif
403 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
404 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
405 #endif
406 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
407 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
408 #endif
409 
410 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
411 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
412           unsigned long, idx1, unsigned long, idx2)
413 #endif
414 
415 /*
416  * It is assumed that struct statx is architecture independent.
417  */
418 #if defined(TARGET_NR_statx) && defined(__NR_statx)
419 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
420           unsigned int, mask, struct target_statx *, statxbuf)
421 #endif
422 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
423 _syscall2(int, membarrier, int, cmd, int, flags)
424 #endif
425 
426 static const bitmask_transtbl fcntl_flags_tbl[] = {
427   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
428   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
429   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
430   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
431   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
432   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
433   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
434   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
435   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
436   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
437   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
438   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
439   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
440 #if defined(O_DIRECT)
441   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
442 #endif
443 #if defined(O_NOATIME)
444   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
445 #endif
446 #if defined(O_CLOEXEC)
447   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
448 #endif
449 #if defined(O_PATH)
450   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
451 #endif
452 #if defined(O_TMPFILE)
453   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
454 #endif
455   /* Don't terminate the list prematurely on 64-bit host+guest.  */
456 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
457   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
458 #endif
459 };
460 
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462 
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467           const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470                          const struct timespec times[2], int flags)
471 {
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477 
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482           const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485                          int newfd, const char *new, int flags)
486 {
487     if (flags == 0) {
488         return renameat(oldfd, old, newfd, new);
489     }
490     errno = ENOSYS;
491     return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495 
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY  */
505 
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513     uint64_t rlim_cur;
514     uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517           const struct host_rlimit64 *, new_limit,
518           struct host_rlimit64 *, old_limit)
519 #endif
520 
521 
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527 
528 static inline int next_free_host_timer(void)
529 {
530     int k;
531     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533             return k;
534         }
535     }
536     return -1;
537 }
538 
539 static inline void free_host_timer_slot(int id)
540 {
541     qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544 
545 static inline int host_to_target_errno(int host_errno)
546 {
547     switch (host_errno) {
548 #define E(X)  case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return host_errno;
553     }
554 }
555 
556 static inline int target_to_host_errno(int target_errno)
557 {
558     switch (target_errno) {
559 #define E(X)  case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562     default:
563         return target_errno;
564     }
565 }
566 
567 abi_long get_errno(abi_long ret)
568 {
569     if (ret == -1)
570         return -host_to_target_errno(errno);
571     else
572         return ret;
573 }
574 
575 const char *target_strerror(int err)
576 {
577     if (err == QEMU_ERESTARTSYS) {
578         return "To be restarted";
579     }
580     if (err == QEMU_ESIGRETURN) {
581         return "Successful exit from sigreturn";
582     }
583 
584     return strerror(target_to_host_errno(err));
585 }
586 
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589     int i;
590     uint8_t b;
591     if (usize <= ksize) {
592         return 1;
593     }
594     for (i = ksize; i < usize; i++) {
595         if (get_user_u8(b, addr + i)) {
596             return -TARGET_EFAULT;
597         }
598         if (b != 0) {
599             return 0;
600         }
601     }
602     return 1;
603 }
604 
605 /*
606  * Copies a target struct to a host struct, in a way that guarantees
607  * backwards-compatibility for struct syscall arguments.
608  *
609  * Similar to kernels uaccess.h:copy_struct_from_user()
610  */
611 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
612 {
613     size_t size = MIN(ksize, usize);
614     size_t rest = MAX(ksize, usize) - size;
615 
616     /* Deal with trailing bytes. */
617     if (usize < ksize) {
618         memset(dst + size, 0, rest);
619     } else if (usize > ksize) {
620         int ret = check_zeroed_user(src, ksize, usize);
621         if (ret <= 0) {
622             return ret ?: -TARGET_E2BIG;
623         }
624     }
625     /* Copy the interoperable parts of the struct. */
626     if (copy_from_user(dst, src, size)) {
627         return -TARGET_EFAULT;
628     }
629     return 0;
630 }
631 
632 #define safe_syscall0(type, name) \
633 static type safe_##name(void) \
634 { \
635     return safe_syscall(__NR_##name); \
636 }
637 
638 #define safe_syscall1(type, name, type1, arg1) \
639 static type safe_##name(type1 arg1) \
640 { \
641     return safe_syscall(__NR_##name, arg1); \
642 }
643 
644 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
645 static type safe_##name(type1 arg1, type2 arg2) \
646 { \
647     return safe_syscall(__NR_##name, arg1, arg2); \
648 }
649 
650 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
651 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
652 { \
653     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
654 }
655 
656 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
657     type4, arg4) \
658 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
659 { \
660     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
661 }
662 
663 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
664     type4, arg4, type5, arg5) \
665 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
666     type5 arg5) \
667 { \
668     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
669 }
670 
671 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
672     type4, arg4, type5, arg5, type6, arg6) \
673 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
674     type5 arg5, type6 arg6) \
675 { \
676     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
677 }
678 
679 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
680 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
681 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
682               int, flags, mode_t, mode)
683 
684 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
685               const struct open_how_ver0 *, how, size_t, size)
686 
687 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
688 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
689               struct rusage *, rusage)
690 #endif
691 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
692               int, options, struct rusage *, rusage)
693 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
694 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
695               char **, argv, char **, envp, int, flags)
696 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
697     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
698 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
699               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
700 #endif
701 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
702 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
703               struct timespec *, tsp, const sigset_t *, sigmask,
704               size_t, sigsetsize)
705 #endif
706 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
707               int, maxevents, int, timeout, const sigset_t *, sigmask,
708               size_t, sigsetsize)
709 #if defined(__NR_futex)
710 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
711               const struct timespec *,timeout,int *,uaddr2,int,val3)
712 #endif
713 #if defined(__NR_futex_time64)
714 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
715               const struct timespec *,timeout,int *,uaddr2,int,val3)
716 #endif
717 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
718 safe_syscall2(int, kill, pid_t, pid, int, sig)
719 safe_syscall2(int, tkill, int, tid, int, sig)
720 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
721 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
722 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
723 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
724               unsigned long, pos_l, unsigned long, pos_h)
725 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
726               unsigned long, pos_l, unsigned long, pos_h)
727 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
728               socklen_t, addrlen)
729 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
730               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
731 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
732               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
733 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
734 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
735 safe_syscall2(int, flock, int, fd, int, operation)
736 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
737 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
738               const struct timespec *, uts, size_t, sigsetsize)
739 #endif
740 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
741               int, flags)
742 #if defined(TARGET_NR_nanosleep)
743 safe_syscall2(int, nanosleep, const struct timespec *, req,
744               struct timespec *, rem)
745 #endif
746 #if defined(TARGET_NR_clock_nanosleep) || \
747     defined(TARGET_NR_clock_nanosleep_time64)
748 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
749               const struct timespec *, req, struct timespec *, rem)
750 #endif
751 #ifdef __NR_ipc
752 #ifdef __s390x__
753 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
754               void *, ptr)
755 #else
756 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
757               void *, ptr, long, fifth)
758 #endif
759 #endif
760 #ifdef __NR_msgsnd
761 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
762               int, flags)
763 #endif
764 #ifdef __NR_msgrcv
765 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
766               long, msgtype, int, flags)
767 #endif
768 #ifdef __NR_semtimedop
769 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
770               unsigned, nsops, const struct timespec *, timeout)
771 #endif
772 #if defined(TARGET_NR_mq_timedsend) || \
773     defined(TARGET_NR_mq_timedsend_time64)
774 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
775               size_t, len, unsigned, prio, const struct timespec *, timeout)
776 #endif
777 #if defined(TARGET_NR_mq_timedreceive) || \
778     defined(TARGET_NR_mq_timedreceive_time64)
779 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
780               size_t, len, unsigned *, prio, const struct timespec *, timeout)
781 #endif
782 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
783 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
784               int, outfd, loff_t *, poutoff, size_t, length,
785               unsigned int, flags)
786 #endif
787 
788 /* We do ioctl like this rather than via safe_syscall3 to preserve the
789  * "third argument might be integer or pointer or not present" behaviour of
790  * the libc function.
791  */
792 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
793 /* Similarly for fcntl. Since we always build with LFS enabled,
794  * we should be using the 64-bit structures automatically.
795  */
796 #ifdef __NR_fcntl64
797 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
798 #else
799 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
800 #endif
801 
802 static inline int host_to_target_sock_type(int host_type)
803 {
804     int target_type;
805 
806     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
807     case SOCK_DGRAM:
808         target_type = TARGET_SOCK_DGRAM;
809         break;
810     case SOCK_STREAM:
811         target_type = TARGET_SOCK_STREAM;
812         break;
813     default:
814         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
815         break;
816     }
817 
818 #if defined(SOCK_CLOEXEC)
819     if (host_type & SOCK_CLOEXEC) {
820         target_type |= TARGET_SOCK_CLOEXEC;
821     }
822 #endif
823 
824 #if defined(SOCK_NONBLOCK)
825     if (host_type & SOCK_NONBLOCK) {
826         target_type |= TARGET_SOCK_NONBLOCK;
827     }
828 #endif
829 
830     return target_type;
831 }
832 
833 static abi_ulong target_brk, initial_target_brk;
834 
835 void target_set_brk(abi_ulong new_brk)
836 {
837     target_brk = TARGET_PAGE_ALIGN(new_brk);
838     initial_target_brk = target_brk;
839 }
840 
841 /* do_brk() must return target values and target errnos. */
842 abi_long do_brk(abi_ulong brk_val)
843 {
844     abi_long mapped_addr;
845     abi_ulong new_brk;
846     abi_ulong old_brk;
847 
848     /* brk pointers are always untagged */
849 
850     /* do not allow to shrink below initial brk value */
851     if (brk_val < initial_target_brk) {
852         return target_brk;
853     }
854 
855     new_brk = TARGET_PAGE_ALIGN(brk_val);
856     old_brk = TARGET_PAGE_ALIGN(target_brk);
857 
858     /* new and old target_brk might be on the same page */
859     if (new_brk == old_brk) {
860         target_brk = brk_val;
861         return target_brk;
862     }
863 
864     /* Release heap if necessary */
865     if (new_brk < old_brk) {
866         target_munmap(new_brk, old_brk - new_brk);
867 
868         target_brk = brk_val;
869         return target_brk;
870     }
871 
872     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
873                               PROT_READ | PROT_WRITE,
874                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
875                               -1, 0);
876 
877     if (mapped_addr == old_brk) {
878         target_brk = brk_val;
879         return target_brk;
880     }
881 
882 #if defined(TARGET_ALPHA)
883     /* We (partially) emulate OSF/1 on Alpha, which requires we
884        return a proper errno, not an unchanged brk value.  */
885     return -TARGET_ENOMEM;
886 #endif
887     /* For everything else, return the previous break. */
888     return target_brk;
889 }
890 
891 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
892     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
893 static inline abi_long copy_from_user_fdset(fd_set *fds,
894                                             abi_ulong target_fds_addr,
895                                             int n)
896 {
897     int i, nw, j, k;
898     abi_ulong b, *target_fds;
899 
900     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
901     if (!(target_fds = lock_user(VERIFY_READ,
902                                  target_fds_addr,
903                                  sizeof(abi_ulong) * nw,
904                                  1)))
905         return -TARGET_EFAULT;
906 
907     FD_ZERO(fds);
908     k = 0;
909     for (i = 0; i < nw; i++) {
910         /* grab the abi_ulong */
911         __get_user(b, &target_fds[i]);
912         for (j = 0; j < TARGET_ABI_BITS; j++) {
913             /* check the bit inside the abi_ulong */
914             if ((b >> j) & 1)
915                 FD_SET(k, fds);
916             k++;
917         }
918     }
919 
920     unlock_user(target_fds, target_fds_addr, 0);
921 
922     return 0;
923 }
924 
925 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
926                                                  abi_ulong target_fds_addr,
927                                                  int n)
928 {
929     if (target_fds_addr) {
930         if (copy_from_user_fdset(fds, target_fds_addr, n))
931             return -TARGET_EFAULT;
932         *fds_ptr = fds;
933     } else {
934         *fds_ptr = NULL;
935     }
936     return 0;
937 }
938 
939 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
940                                           const fd_set *fds,
941                                           int n)
942 {
943     int i, nw, j, k;
944     abi_long v;
945     abi_ulong *target_fds;
946 
947     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
948     if (!(target_fds = lock_user(VERIFY_WRITE,
949                                  target_fds_addr,
950                                  sizeof(abi_ulong) * nw,
951                                  0)))
952         return -TARGET_EFAULT;
953 
954     k = 0;
955     for (i = 0; i < nw; i++) {
956         v = 0;
957         for (j = 0; j < TARGET_ABI_BITS; j++) {
958             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
959             k++;
960         }
961         __put_user(v, &target_fds[i]);
962     }
963 
964     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
965 
966     return 0;
967 }
968 #endif
969 
970 #if defined(__alpha__)
971 #define HOST_HZ 1024
972 #else
973 #define HOST_HZ 100
974 #endif
975 
976 static inline abi_long host_to_target_clock_t(long ticks)
977 {
978 #if HOST_HZ == TARGET_HZ
979     return ticks;
980 #else
981     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
982 #endif
983 }
984 
985 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
986                                              const struct rusage *rusage)
987 {
988     struct target_rusage *target_rusage;
989 
990     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
991         return -TARGET_EFAULT;
992     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
993     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
994     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
995     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
996     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
997     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
998     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
999     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1000     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1001     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1002     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1003     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1004     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1005     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1006     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1007     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1008     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1009     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1010     unlock_user_struct(target_rusage, target_addr, 1);
1011 
1012     return 0;
1013 }
1014 
1015 #ifdef TARGET_NR_setrlimit
1016 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1017 {
1018     abi_ulong target_rlim_swap;
1019     rlim_t result;
1020 
1021     target_rlim_swap = tswapal(target_rlim);
1022     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1023         return RLIM_INFINITY;
1024 
1025     result = target_rlim_swap;
1026     if (target_rlim_swap != (rlim_t)result)
1027         return RLIM_INFINITY;
1028 
1029     return result;
1030 }
1031 #endif
1032 
1033 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1034 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1035 {
1036     abi_ulong target_rlim_swap;
1037     abi_ulong result;
1038 
1039     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1040         target_rlim_swap = TARGET_RLIM_INFINITY;
1041     else
1042         target_rlim_swap = rlim;
1043     result = tswapal(target_rlim_swap);
1044 
1045     return result;
1046 }
1047 #endif
1048 
1049 static inline int target_to_host_resource(int code)
1050 {
1051     switch (code) {
1052     case TARGET_RLIMIT_AS:
1053         return RLIMIT_AS;
1054     case TARGET_RLIMIT_CORE:
1055         return RLIMIT_CORE;
1056     case TARGET_RLIMIT_CPU:
1057         return RLIMIT_CPU;
1058     case TARGET_RLIMIT_DATA:
1059         return RLIMIT_DATA;
1060     case TARGET_RLIMIT_FSIZE:
1061         return RLIMIT_FSIZE;
1062     case TARGET_RLIMIT_LOCKS:
1063         return RLIMIT_LOCKS;
1064     case TARGET_RLIMIT_MEMLOCK:
1065         return RLIMIT_MEMLOCK;
1066     case TARGET_RLIMIT_MSGQUEUE:
1067         return RLIMIT_MSGQUEUE;
1068     case TARGET_RLIMIT_NICE:
1069         return RLIMIT_NICE;
1070     case TARGET_RLIMIT_NOFILE:
1071         return RLIMIT_NOFILE;
1072     case TARGET_RLIMIT_NPROC:
1073         return RLIMIT_NPROC;
1074     case TARGET_RLIMIT_RSS:
1075         return RLIMIT_RSS;
1076     case TARGET_RLIMIT_RTPRIO:
1077         return RLIMIT_RTPRIO;
1078 #ifdef RLIMIT_RTTIME
1079     case TARGET_RLIMIT_RTTIME:
1080         return RLIMIT_RTTIME;
1081 #endif
1082     case TARGET_RLIMIT_SIGPENDING:
1083         return RLIMIT_SIGPENDING;
1084     case TARGET_RLIMIT_STACK:
1085         return RLIMIT_STACK;
1086     default:
1087         return code;
1088     }
1089 }
1090 
1091 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1092                                               abi_ulong target_tv_addr)
1093 {
1094     struct target_timeval *target_tv;
1095 
1096     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1097         return -TARGET_EFAULT;
1098     }
1099 
1100     __get_user(tv->tv_sec, &target_tv->tv_sec);
1101     __get_user(tv->tv_usec, &target_tv->tv_usec);
1102 
1103     unlock_user_struct(target_tv, target_tv_addr, 0);
1104 
1105     return 0;
1106 }
1107 
1108 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1109                                             const struct timeval *tv)
1110 {
1111     struct target_timeval *target_tv;
1112 
1113     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1114         return -TARGET_EFAULT;
1115     }
1116 
1117     __put_user(tv->tv_sec, &target_tv->tv_sec);
1118     __put_user(tv->tv_usec, &target_tv->tv_usec);
1119 
1120     unlock_user_struct(target_tv, target_tv_addr, 1);
1121 
1122     return 0;
1123 }
1124 
1125 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1126 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1127                                                 abi_ulong target_tv_addr)
1128 {
1129     struct target__kernel_sock_timeval *target_tv;
1130 
1131     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1132         return -TARGET_EFAULT;
1133     }
1134 
1135     __get_user(tv->tv_sec, &target_tv->tv_sec);
1136     __get_user(tv->tv_usec, &target_tv->tv_usec);
1137 
1138     unlock_user_struct(target_tv, target_tv_addr, 0);
1139 
1140     return 0;
1141 }
1142 #endif
1143 
1144 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1145                                               const struct timeval *tv)
1146 {
1147     struct target__kernel_sock_timeval *target_tv;
1148 
1149     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1150         return -TARGET_EFAULT;
1151     }
1152 
1153     __put_user(tv->tv_sec, &target_tv->tv_sec);
1154     __put_user(tv->tv_usec, &target_tv->tv_usec);
1155 
1156     unlock_user_struct(target_tv, target_tv_addr, 1);
1157 
1158     return 0;
1159 }
1160 
1161 #if defined(TARGET_NR_futex) || \
1162     defined(TARGET_NR_rt_sigtimedwait) || \
1163     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1164     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1165     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1166     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1167     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1168     defined(TARGET_NR_timer_settime) || \
1169     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1170 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1171                                                abi_ulong target_addr)
1172 {
1173     struct target_timespec *target_ts;
1174 
1175     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1176         return -TARGET_EFAULT;
1177     }
1178     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1179     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1180     unlock_user_struct(target_ts, target_addr, 0);
1181     return 0;
1182 }
1183 #endif
1184 
1185 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1186     defined(TARGET_NR_timer_settime64) || \
1187     defined(TARGET_NR_mq_timedsend_time64) || \
1188     defined(TARGET_NR_mq_timedreceive_time64) || \
1189     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1190     defined(TARGET_NR_clock_nanosleep_time64) || \
1191     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1192     defined(TARGET_NR_utimensat) || \
1193     defined(TARGET_NR_utimensat_time64) || \
1194     defined(TARGET_NR_semtimedop_time64) || \
1195     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1196 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1197                                                  abi_ulong target_addr)
1198 {
1199     struct target__kernel_timespec *target_ts;
1200 
1201     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1202         return -TARGET_EFAULT;
1203     }
1204     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1205     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1206     /* in 32bit mode, this drops the padding */
1207     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1208     unlock_user_struct(target_ts, target_addr, 0);
1209     return 0;
1210 }
1211 #endif
1212 
1213 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1214                                                struct timespec *host_ts)
1215 {
1216     struct target_timespec *target_ts;
1217 
1218     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1219         return -TARGET_EFAULT;
1220     }
1221     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1222     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1223     unlock_user_struct(target_ts, target_addr, 1);
1224     return 0;
1225 }
1226 
1227 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1228                                                  struct timespec *host_ts)
1229 {
1230     struct target__kernel_timespec *target_ts;
1231 
1232     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1233         return -TARGET_EFAULT;
1234     }
1235     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1236     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1237     unlock_user_struct(target_ts, target_addr, 1);
1238     return 0;
1239 }
1240 
1241 #if defined(TARGET_NR_gettimeofday)
1242 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1243                                              struct timezone *tz)
1244 {
1245     struct target_timezone *target_tz;
1246 
1247     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1248         return -TARGET_EFAULT;
1249     }
1250 
1251     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1252     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1253 
1254     unlock_user_struct(target_tz, target_tz_addr, 1);
1255 
1256     return 0;
1257 }
1258 #endif
1259 
1260 #if defined(TARGET_NR_settimeofday)
1261 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1262                                                abi_ulong target_tz_addr)
1263 {
1264     struct target_timezone *target_tz;
1265 
1266     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1267         return -TARGET_EFAULT;
1268     }
1269 
1270     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1271     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1272 
1273     unlock_user_struct(target_tz, target_tz_addr, 0);
1274 
1275     return 0;
1276 }
1277 #endif
1278 
1279 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1280 #include <mqueue.h>
1281 
1282 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1283                                               abi_ulong target_mq_attr_addr)
1284 {
1285     struct target_mq_attr *target_mq_attr;
1286 
1287     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1288                           target_mq_attr_addr, 1))
1289         return -TARGET_EFAULT;
1290 
1291     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1292     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1293     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1294     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1295 
1296     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1297 
1298     return 0;
1299 }
1300 
1301 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1302                                             const struct mq_attr *attr)
1303 {
1304     struct target_mq_attr *target_mq_attr;
1305 
1306     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1307                           target_mq_attr_addr, 0))
1308         return -TARGET_EFAULT;
1309 
1310     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1311     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1312     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1313     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1314 
1315     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1316 
1317     return 0;
1318 }
1319 #endif
1320 
1321 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1322 /* do_select() must return target values and target errnos. */
1323 static abi_long do_select(int n,
1324                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1325                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1326 {
1327     fd_set rfds, wfds, efds;
1328     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1329     struct timeval tv;
1330     struct timespec ts, *ts_ptr;
1331     abi_long ret;
1332 
1333     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1334     if (ret) {
1335         return ret;
1336     }
1337     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1338     if (ret) {
1339         return ret;
1340     }
1341     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1342     if (ret) {
1343         return ret;
1344     }
1345 
1346     if (target_tv_addr) {
1347         if (copy_from_user_timeval(&tv, target_tv_addr))
1348             return -TARGET_EFAULT;
1349         ts.tv_sec = tv.tv_sec;
1350         ts.tv_nsec = tv.tv_usec * 1000;
1351         ts_ptr = &ts;
1352     } else {
1353         ts_ptr = NULL;
1354     }
1355 
1356     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1357                                   ts_ptr, NULL));
1358 
1359     if (!is_error(ret)) {
1360         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1361             return -TARGET_EFAULT;
1362         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1363             return -TARGET_EFAULT;
1364         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1365             return -TARGET_EFAULT;
1366 
1367         if (target_tv_addr) {
1368             tv.tv_sec = ts.tv_sec;
1369             tv.tv_usec = ts.tv_nsec / 1000;
1370             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1371                 return -TARGET_EFAULT;
1372             }
1373         }
1374     }
1375 
1376     return ret;
1377 }
1378 
1379 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1380 static abi_long do_old_select(abi_ulong arg1)
1381 {
1382     struct target_sel_arg_struct *sel;
1383     abi_ulong inp, outp, exp, tvp;
1384     long nsel;
1385 
1386     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1387         return -TARGET_EFAULT;
1388     }
1389 
1390     nsel = tswapal(sel->n);
1391     inp = tswapal(sel->inp);
1392     outp = tswapal(sel->outp);
1393     exp = tswapal(sel->exp);
1394     tvp = tswapal(sel->tvp);
1395 
1396     unlock_user_struct(sel, arg1, 0);
1397 
1398     return do_select(nsel, inp, outp, exp, tvp);
1399 }
1400 #endif
1401 #endif
1402 
1403 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1404 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1405                             abi_long arg4, abi_long arg5, abi_long arg6,
1406                             bool time64)
1407 {
1408     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1409     fd_set rfds, wfds, efds;
1410     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1411     struct timespec ts, *ts_ptr;
1412     abi_long ret;
1413 
1414     /*
1415      * The 6th arg is actually two args smashed together,
1416      * so we cannot use the C library.
1417      */
1418     struct {
1419         sigset_t *set;
1420         size_t size;
1421     } sig, *sig_ptr;
1422 
1423     abi_ulong arg_sigset, arg_sigsize, *arg7;
1424 
1425     n = arg1;
1426     rfd_addr = arg2;
1427     wfd_addr = arg3;
1428     efd_addr = arg4;
1429     ts_addr = arg5;
1430 
1431     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1432     if (ret) {
1433         return ret;
1434     }
1435     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1436     if (ret) {
1437         return ret;
1438     }
1439     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1440     if (ret) {
1441         return ret;
1442     }
1443 
1444     /*
1445      * This takes a timespec, and not a timeval, so we cannot
1446      * use the do_select() helper ...
1447      */
1448     if (ts_addr) {
1449         if (time64) {
1450             if (target_to_host_timespec64(&ts, ts_addr)) {
1451                 return -TARGET_EFAULT;
1452             }
1453         } else {
1454             if (target_to_host_timespec(&ts, ts_addr)) {
1455                 return -TARGET_EFAULT;
1456             }
1457         }
1458             ts_ptr = &ts;
1459     } else {
1460         ts_ptr = NULL;
1461     }
1462 
1463     /* Extract the two packed args for the sigset */
1464     sig_ptr = NULL;
1465     if (arg6) {
1466         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1467         if (!arg7) {
1468             return -TARGET_EFAULT;
1469         }
1470         arg_sigset = tswapal(arg7[0]);
1471         arg_sigsize = tswapal(arg7[1]);
1472         unlock_user(arg7, arg6, 0);
1473 
1474         if (arg_sigset) {
1475             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1476             if (ret != 0) {
1477                 return ret;
1478             }
1479             sig_ptr = &sig;
1480             sig.size = SIGSET_T_SIZE;
1481         }
1482     }
1483 
1484     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1485                                   ts_ptr, sig_ptr));
1486 
1487     if (sig_ptr) {
1488         finish_sigsuspend_mask(ret);
1489     }
1490 
1491     if (!is_error(ret)) {
1492         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1493             return -TARGET_EFAULT;
1494         }
1495         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1496             return -TARGET_EFAULT;
1497         }
1498         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1499             return -TARGET_EFAULT;
1500         }
1501         if (time64) {
1502             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1503                 return -TARGET_EFAULT;
1504             }
1505         } else {
1506             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1507                 return -TARGET_EFAULT;
1508             }
1509         }
1510     }
1511     return ret;
1512 }
1513 #endif
1514 
1515 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1516     defined(TARGET_NR_ppoll_time64)
1517 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1518                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1519 {
1520     struct target_pollfd *target_pfd;
1521     unsigned int nfds = arg2;
1522     struct pollfd *pfd;
1523     unsigned int i;
1524     abi_long ret;
1525 
1526     pfd = NULL;
1527     target_pfd = NULL;
1528     if (nfds) {
1529         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1530             return -TARGET_EINVAL;
1531         }
1532         target_pfd = lock_user(VERIFY_WRITE, arg1,
1533                                sizeof(struct target_pollfd) * nfds, 1);
1534         if (!target_pfd) {
1535             return -TARGET_EFAULT;
1536         }
1537 
1538         pfd = alloca(sizeof(struct pollfd) * nfds);
1539         for (i = 0; i < nfds; i++) {
1540             pfd[i].fd = tswap32(target_pfd[i].fd);
1541             pfd[i].events = tswap16(target_pfd[i].events);
1542         }
1543     }
1544     if (ppoll) {
1545         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1546         sigset_t *set = NULL;
1547 
1548         if (arg3) {
1549             if (time64) {
1550                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1551                     unlock_user(target_pfd, arg1, 0);
1552                     return -TARGET_EFAULT;
1553                 }
1554             } else {
1555                 if (target_to_host_timespec(timeout_ts, arg3)) {
1556                     unlock_user(target_pfd, arg1, 0);
1557                     return -TARGET_EFAULT;
1558                 }
1559             }
1560         } else {
1561             timeout_ts = NULL;
1562         }
1563 
1564         if (arg4) {
1565             ret = process_sigsuspend_mask(&set, arg4, arg5);
1566             if (ret != 0) {
1567                 unlock_user(target_pfd, arg1, 0);
1568                 return ret;
1569             }
1570         }
1571 
1572         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1573                                    set, SIGSET_T_SIZE));
1574 
1575         if (set) {
1576             finish_sigsuspend_mask(ret);
1577         }
1578         if (!is_error(ret) && arg3) {
1579             if (time64) {
1580                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1581                     return -TARGET_EFAULT;
1582                 }
1583             } else {
1584                 if (host_to_target_timespec(arg3, timeout_ts)) {
1585                     return -TARGET_EFAULT;
1586                 }
1587             }
1588         }
1589     } else {
1590           struct timespec ts, *pts;
1591 
1592           if (arg3 >= 0) {
1593               /* Convert ms to secs, ns */
1594               ts.tv_sec = arg3 / 1000;
1595               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1596               pts = &ts;
1597           } else {
1598               /* -ve poll() timeout means "infinite" */
1599               pts = NULL;
1600           }
1601           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1602     }
1603 
1604     if (!is_error(ret)) {
1605         for (i = 0; i < nfds; i++) {
1606             target_pfd[i].revents = tswap16(pfd[i].revents);
1607         }
1608     }
1609     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1610     return ret;
1611 }
1612 #endif
1613 
1614 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1615                         int flags, int is_pipe2)
1616 {
1617     int host_pipe[2];
1618     abi_long ret;
1619     ret = pipe2(host_pipe, flags);
1620 
1621     if (is_error(ret))
1622         return get_errno(ret);
1623 
1624     /* Several targets have special calling conventions for the original
1625        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1626     if (!is_pipe2) {
1627 #if defined(TARGET_ALPHA)
1628         cpu_env->ir[IR_A4] = host_pipe[1];
1629         return host_pipe[0];
1630 #elif defined(TARGET_MIPS)
1631         cpu_env->active_tc.gpr[3] = host_pipe[1];
1632         return host_pipe[0];
1633 #elif defined(TARGET_SH4)
1634         cpu_env->gregs[1] = host_pipe[1];
1635         return host_pipe[0];
1636 #elif defined(TARGET_SPARC)
1637         cpu_env->regwptr[1] = host_pipe[1];
1638         return host_pipe[0];
1639 #endif
1640     }
1641 
1642     if (put_user_s32(host_pipe[0], pipedes)
1643         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1644         return -TARGET_EFAULT;
1645     return get_errno(ret);
1646 }
1647 
1648 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1649                                                abi_ulong target_addr,
1650                                                socklen_t len)
1651 {
1652     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1653     sa_family_t sa_family;
1654     struct target_sockaddr *target_saddr;
1655 
1656     if (fd_trans_target_to_host_addr(fd)) {
1657         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1658     }
1659 
1660     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1661     if (!target_saddr)
1662         return -TARGET_EFAULT;
1663 
1664     sa_family = tswap16(target_saddr->sa_family);
1665 
1666     /* Oops. The caller might send a incomplete sun_path; sun_path
1667      * must be terminated by \0 (see the manual page), but
1668      * unfortunately it is quite common to specify sockaddr_un
1669      * length as "strlen(x->sun_path)" while it should be
1670      * "strlen(...) + 1". We'll fix that here if needed.
1671      * Linux kernel has a similar feature.
1672      */
1673 
1674     if (sa_family == AF_UNIX) {
1675         if (len < unix_maxlen && len > 0) {
1676             char *cp = (char*)target_saddr;
1677 
1678             if ( cp[len-1] && !cp[len] )
1679                 len++;
1680         }
1681         if (len > unix_maxlen)
1682             len = unix_maxlen;
1683     }
1684 
1685     memcpy(addr, target_saddr, len);
1686     addr->sa_family = sa_family;
1687     if (sa_family == AF_NETLINK) {
1688         struct sockaddr_nl *nladdr;
1689 
1690         nladdr = (struct sockaddr_nl *)addr;
1691         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1692         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1693     } else if (sa_family == AF_PACKET) {
1694 	struct target_sockaddr_ll *lladdr;
1695 
1696 	lladdr = (struct target_sockaddr_ll *)addr;
1697 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1698 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1699     } else if (sa_family == AF_INET6) {
1700         struct sockaddr_in6 *in6addr;
1701 
1702         in6addr = (struct sockaddr_in6 *)addr;
1703         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1704     }
1705     unlock_user(target_saddr, target_addr, 0);
1706 
1707     return 0;
1708 }
1709 
1710 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1711                                                struct sockaddr *addr,
1712                                                socklen_t len)
1713 {
1714     struct target_sockaddr *target_saddr;
1715 
1716     if (len == 0) {
1717         return 0;
1718     }
1719     assert(addr);
1720 
1721     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1722     if (!target_saddr)
1723         return -TARGET_EFAULT;
1724     memcpy(target_saddr, addr, len);
1725     if (len >= offsetof(struct target_sockaddr, sa_family) +
1726         sizeof(target_saddr->sa_family)) {
1727         target_saddr->sa_family = tswap16(addr->sa_family);
1728     }
1729     if (addr->sa_family == AF_NETLINK &&
1730         len >= sizeof(struct target_sockaddr_nl)) {
1731         struct target_sockaddr_nl *target_nl =
1732                (struct target_sockaddr_nl *)target_saddr;
1733         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1734         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1735     } else if (addr->sa_family == AF_PACKET) {
1736         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1737         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1738         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1739     } else if (addr->sa_family == AF_INET6 &&
1740                len >= sizeof(struct target_sockaddr_in6)) {
1741         struct target_sockaddr_in6 *target_in6 =
1742                (struct target_sockaddr_in6 *)target_saddr;
1743         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1744     }
1745     unlock_user(target_saddr, target_addr, len);
1746 
1747     return 0;
1748 }
1749 
1750 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1751                                            struct target_msghdr *target_msgh)
1752 {
1753     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1754     abi_long msg_controllen;
1755     abi_ulong target_cmsg_addr;
1756     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1757     socklen_t space = 0;
1758 
1759     msg_controllen = tswapal(target_msgh->msg_controllen);
1760     if (msg_controllen < sizeof (struct target_cmsghdr))
1761         goto the_end;
1762     target_cmsg_addr = tswapal(target_msgh->msg_control);
1763     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1764     target_cmsg_start = target_cmsg;
1765     if (!target_cmsg)
1766         return -TARGET_EFAULT;
1767 
1768     while (cmsg && target_cmsg) {
1769         void *data = CMSG_DATA(cmsg);
1770         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1771 
1772         int len = tswapal(target_cmsg->cmsg_len)
1773             - sizeof(struct target_cmsghdr);
1774 
1775         space += CMSG_SPACE(len);
1776         if (space > msgh->msg_controllen) {
1777             space -= CMSG_SPACE(len);
1778             /* This is a QEMU bug, since we allocated the payload
1779              * area ourselves (unlike overflow in host-to-target
1780              * conversion, which is just the guest giving us a buffer
1781              * that's too small). It can't happen for the payload types
1782              * we currently support; if it becomes an issue in future
1783              * we would need to improve our allocation strategy to
1784              * something more intelligent than "twice the size of the
1785              * target buffer we're reading from".
1786              */
1787             qemu_log_mask(LOG_UNIMP,
1788                           ("Unsupported ancillary data %d/%d: "
1789                            "unhandled msg size\n"),
1790                           tswap32(target_cmsg->cmsg_level),
1791                           tswap32(target_cmsg->cmsg_type));
1792             break;
1793         }
1794 
1795         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1796             cmsg->cmsg_level = SOL_SOCKET;
1797         } else {
1798             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1799         }
1800         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1801         cmsg->cmsg_len = CMSG_LEN(len);
1802 
1803         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1804             int *fd = (int *)data;
1805             int *target_fd = (int *)target_data;
1806             int i, numfds = len / sizeof(int);
1807 
1808             for (i = 0; i < numfds; i++) {
1809                 __get_user(fd[i], target_fd + i);
1810             }
1811         } else if (cmsg->cmsg_level == SOL_SOCKET
1812                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1813             struct ucred *cred = (struct ucred *)data;
1814             struct target_ucred *target_cred =
1815                 (struct target_ucred *)target_data;
1816 
1817             __get_user(cred->pid, &target_cred->pid);
1818             __get_user(cred->uid, &target_cred->uid);
1819             __get_user(cred->gid, &target_cred->gid);
1820         } else if (cmsg->cmsg_level == SOL_ALG) {
1821             uint32_t *dst = (uint32_t *)data;
1822 
1823             memcpy(dst, target_data, len);
1824             /* fix endianness of first 32-bit word */
1825             if (len >= sizeof(uint32_t)) {
1826                 *dst = tswap32(*dst);
1827             }
1828         } else {
1829             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1830                           cmsg->cmsg_level, cmsg->cmsg_type);
1831             memcpy(data, target_data, len);
1832         }
1833 
1834         cmsg = CMSG_NXTHDR(msgh, cmsg);
1835         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1836                                          target_cmsg_start);
1837     }
1838     unlock_user(target_cmsg, target_cmsg_addr, 0);
1839  the_end:
1840     msgh->msg_controllen = space;
1841     return 0;
1842 }
1843 
1844 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1845                                            struct msghdr *msgh)
1846 {
1847     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1848     abi_long msg_controllen;
1849     abi_ulong target_cmsg_addr;
1850     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1851     socklen_t space = 0;
1852 
1853     msg_controllen = tswapal(target_msgh->msg_controllen);
1854     if (msg_controllen < sizeof (struct target_cmsghdr))
1855         goto the_end;
1856     target_cmsg_addr = tswapal(target_msgh->msg_control);
1857     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1858     target_cmsg_start = target_cmsg;
1859     if (!target_cmsg)
1860         return -TARGET_EFAULT;
1861 
1862     while (cmsg && target_cmsg) {
1863         void *data = CMSG_DATA(cmsg);
1864         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1865 
1866         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1867         int tgt_len, tgt_space;
1868 
1869         /* We never copy a half-header but may copy half-data;
1870          * this is Linux's behaviour in put_cmsg(). Note that
1871          * truncation here is a guest problem (which we report
1872          * to the guest via the CTRUNC bit), unlike truncation
1873          * in target_to_host_cmsg, which is a QEMU bug.
1874          */
1875         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1876             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1877             break;
1878         }
1879 
1880         if (cmsg->cmsg_level == SOL_SOCKET) {
1881             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1882         } else {
1883             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1884         }
1885         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1886 
1887         /* Payload types which need a different size of payload on
1888          * the target must adjust tgt_len here.
1889          */
1890         tgt_len = len;
1891         switch (cmsg->cmsg_level) {
1892         case SOL_SOCKET:
1893             switch (cmsg->cmsg_type) {
1894             case SO_TIMESTAMP:
1895                 tgt_len = sizeof(struct target_timeval);
1896                 break;
1897             default:
1898                 break;
1899             }
1900             break;
1901         default:
1902             break;
1903         }
1904 
1905         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1906             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1907             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1908         }
1909 
1910         /* We must now copy-and-convert len bytes of payload
1911          * into tgt_len bytes of destination space. Bear in mind
1912          * that in both source and destination we may be dealing
1913          * with a truncated value!
1914          */
1915         switch (cmsg->cmsg_level) {
1916         case SOL_SOCKET:
1917             switch (cmsg->cmsg_type) {
1918             case SCM_RIGHTS:
1919             {
1920                 int *fd = (int *)data;
1921                 int *target_fd = (int *)target_data;
1922                 int i, numfds = tgt_len / sizeof(int);
1923 
1924                 for (i = 0; i < numfds; i++) {
1925                     __put_user(fd[i], target_fd + i);
1926                 }
1927                 break;
1928             }
1929             case SO_TIMESTAMP:
1930             {
1931                 struct timeval *tv = (struct timeval *)data;
1932                 struct target_timeval *target_tv =
1933                     (struct target_timeval *)target_data;
1934 
1935                 if (len != sizeof(struct timeval) ||
1936                     tgt_len != sizeof(struct target_timeval)) {
1937                     goto unimplemented;
1938                 }
1939 
1940                 /* copy struct timeval to target */
1941                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1942                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1943                 break;
1944             }
1945             case SCM_CREDENTIALS:
1946             {
1947                 struct ucred *cred = (struct ucred *)data;
1948                 struct target_ucred *target_cred =
1949                     (struct target_ucred *)target_data;
1950 
1951                 __put_user(cred->pid, &target_cred->pid);
1952                 __put_user(cred->uid, &target_cred->uid);
1953                 __put_user(cred->gid, &target_cred->gid);
1954                 break;
1955             }
1956             default:
1957                 goto unimplemented;
1958             }
1959             break;
1960 
1961         case SOL_IP:
1962             switch (cmsg->cmsg_type) {
1963             case IP_TTL:
1964             {
1965                 uint32_t *v = (uint32_t *)data;
1966                 uint32_t *t_int = (uint32_t *)target_data;
1967 
1968                 if (len != sizeof(uint32_t) ||
1969                     tgt_len != sizeof(uint32_t)) {
1970                     goto unimplemented;
1971                 }
1972                 __put_user(*v, t_int);
1973                 break;
1974             }
1975             case IP_RECVERR:
1976             {
1977                 struct errhdr_t {
1978                    struct sock_extended_err ee;
1979                    struct sockaddr_in offender;
1980                 };
1981                 struct errhdr_t *errh = (struct errhdr_t *)data;
1982                 struct errhdr_t *target_errh =
1983                     (struct errhdr_t *)target_data;
1984 
1985                 if (len != sizeof(struct errhdr_t) ||
1986                     tgt_len != sizeof(struct errhdr_t)) {
1987                     goto unimplemented;
1988                 }
1989                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1990                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1991                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1992                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1993                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1994                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1995                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1996                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1997                     (void *) &errh->offender, sizeof(errh->offender));
1998                 break;
1999             }
2000             default:
2001                 goto unimplemented;
2002             }
2003             break;
2004 
2005         case SOL_IPV6:
2006             switch (cmsg->cmsg_type) {
2007             case IPV6_HOPLIMIT:
2008             {
2009                 uint32_t *v = (uint32_t *)data;
2010                 uint32_t *t_int = (uint32_t *)target_data;
2011 
2012                 if (len != sizeof(uint32_t) ||
2013                     tgt_len != sizeof(uint32_t)) {
2014                     goto unimplemented;
2015                 }
2016                 __put_user(*v, t_int);
2017                 break;
2018             }
2019             case IPV6_RECVERR:
2020             {
2021                 struct errhdr6_t {
2022                    struct sock_extended_err ee;
2023                    struct sockaddr_in6 offender;
2024                 };
2025                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2026                 struct errhdr6_t *target_errh =
2027                     (struct errhdr6_t *)target_data;
2028 
2029                 if (len != sizeof(struct errhdr6_t) ||
2030                     tgt_len != sizeof(struct errhdr6_t)) {
2031                     goto unimplemented;
2032                 }
2033                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2034                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2035                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2036                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2037                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2038                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2039                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2040                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2041                     (void *) &errh->offender, sizeof(errh->offender));
2042                 break;
2043             }
2044             default:
2045                 goto unimplemented;
2046             }
2047             break;
2048 
2049         default:
2050         unimplemented:
2051             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2052                           cmsg->cmsg_level, cmsg->cmsg_type);
2053             memcpy(target_data, data, MIN(len, tgt_len));
2054             if (tgt_len > len) {
2055                 memset(target_data + len, 0, tgt_len - len);
2056             }
2057         }
2058 
2059         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2060         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2061         if (msg_controllen < tgt_space) {
2062             tgt_space = msg_controllen;
2063         }
2064         msg_controllen -= tgt_space;
2065         space += tgt_space;
2066         cmsg = CMSG_NXTHDR(msgh, cmsg);
2067         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2068                                          target_cmsg_start);
2069     }
2070     unlock_user(target_cmsg, target_cmsg_addr, space);
2071  the_end:
2072     target_msgh->msg_controllen = tswapal(space);
2073     return 0;
2074 }
2075 
2076 /* do_setsockopt() Must return target values and target errnos. */
2077 static abi_long do_setsockopt(int sockfd, int level, int optname,
2078                               abi_ulong optval_addr, socklen_t optlen)
2079 {
2080     abi_long ret;
2081     int val;
2082 
2083     switch(level) {
2084     case SOL_TCP:
2085     case SOL_UDP:
2086         /* TCP and UDP options all take an 'int' value.  */
2087         if (optlen < sizeof(uint32_t))
2088             return -TARGET_EINVAL;
2089 
2090         if (get_user_u32(val, optval_addr))
2091             return -TARGET_EFAULT;
2092         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2093         break;
2094     case SOL_IP:
2095         switch(optname) {
2096         case IP_TOS:
2097         case IP_TTL:
2098         case IP_HDRINCL:
2099         case IP_ROUTER_ALERT:
2100         case IP_RECVOPTS:
2101         case IP_RETOPTS:
2102         case IP_PKTINFO:
2103         case IP_MTU_DISCOVER:
2104         case IP_RECVERR:
2105         case IP_RECVTTL:
2106         case IP_RECVTOS:
2107 #ifdef IP_FREEBIND
2108         case IP_FREEBIND:
2109 #endif
2110         case IP_MULTICAST_TTL:
2111         case IP_MULTICAST_LOOP:
2112             val = 0;
2113             if (optlen >= sizeof(uint32_t)) {
2114                 if (get_user_u32(val, optval_addr))
2115                     return -TARGET_EFAULT;
2116             } else if (optlen >= 1) {
2117                 if (get_user_u8(val, optval_addr))
2118                     return -TARGET_EFAULT;
2119             }
2120             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2121             break;
2122         case IP_ADD_MEMBERSHIP:
2123         case IP_DROP_MEMBERSHIP:
2124         {
2125             struct ip_mreqn ip_mreq;
2126             struct target_ip_mreqn *target_smreqn;
2127 
2128             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2129                               sizeof(struct target_ip_mreq));
2130 
2131             if (optlen < sizeof (struct target_ip_mreq) ||
2132                 optlen > sizeof (struct target_ip_mreqn)) {
2133                 return -TARGET_EINVAL;
2134             }
2135 
2136             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2137             if (!target_smreqn) {
2138                 return -TARGET_EFAULT;
2139             }
2140             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2141             ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2142             if (optlen == sizeof(struct target_ip_mreqn)) {
2143                 ip_mreq.imr_ifindex = tswapal(target_smreqn->imr_ifindex);
2144                 optlen = sizeof(struct ip_mreqn);
2145             }
2146             unlock_user(target_smreqn, optval_addr, 0);
2147 
2148             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2149             break;
2150         }
2151         case IP_BLOCK_SOURCE:
2152         case IP_UNBLOCK_SOURCE:
2153         case IP_ADD_SOURCE_MEMBERSHIP:
2154         case IP_DROP_SOURCE_MEMBERSHIP:
2155         {
2156             struct ip_mreq_source *ip_mreq_source;
2157 
2158             if (optlen != sizeof (struct target_ip_mreq_source))
2159                 return -TARGET_EINVAL;
2160 
2161             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2162             if (!ip_mreq_source) {
2163                 return -TARGET_EFAULT;
2164             }
2165             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2166             unlock_user (ip_mreq_source, optval_addr, 0);
2167             break;
2168         }
2169         default:
2170             goto unimplemented;
2171         }
2172         break;
2173     case SOL_IPV6:
2174         switch (optname) {
2175         case IPV6_MTU_DISCOVER:
2176         case IPV6_MTU:
2177         case IPV6_V6ONLY:
2178         case IPV6_RECVPKTINFO:
2179         case IPV6_UNICAST_HOPS:
2180         case IPV6_MULTICAST_HOPS:
2181         case IPV6_MULTICAST_LOOP:
2182         case IPV6_RECVERR:
2183         case IPV6_RECVHOPLIMIT:
2184         case IPV6_2292HOPLIMIT:
2185         case IPV6_CHECKSUM:
2186         case IPV6_ADDRFORM:
2187         case IPV6_2292PKTINFO:
2188         case IPV6_RECVTCLASS:
2189         case IPV6_RECVRTHDR:
2190         case IPV6_2292RTHDR:
2191         case IPV6_RECVHOPOPTS:
2192         case IPV6_2292HOPOPTS:
2193         case IPV6_RECVDSTOPTS:
2194         case IPV6_2292DSTOPTS:
2195         case IPV6_TCLASS:
2196         case IPV6_ADDR_PREFERENCES:
2197 #ifdef IPV6_RECVPATHMTU
2198         case IPV6_RECVPATHMTU:
2199 #endif
2200 #ifdef IPV6_TRANSPARENT
2201         case IPV6_TRANSPARENT:
2202 #endif
2203 #ifdef IPV6_FREEBIND
2204         case IPV6_FREEBIND:
2205 #endif
2206 #ifdef IPV6_RECVORIGDSTADDR
2207         case IPV6_RECVORIGDSTADDR:
2208 #endif
2209             val = 0;
2210             if (optlen < sizeof(uint32_t)) {
2211                 return -TARGET_EINVAL;
2212             }
2213             if (get_user_u32(val, optval_addr)) {
2214                 return -TARGET_EFAULT;
2215             }
2216             ret = get_errno(setsockopt(sockfd, level, optname,
2217                                        &val, sizeof(val)));
2218             break;
2219         case IPV6_PKTINFO:
2220         {
2221             struct in6_pktinfo pki;
2222 
2223             if (optlen < sizeof(pki)) {
2224                 return -TARGET_EINVAL;
2225             }
2226 
2227             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2228                 return -TARGET_EFAULT;
2229             }
2230 
2231             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2232 
2233             ret = get_errno(setsockopt(sockfd, level, optname,
2234                                        &pki, sizeof(pki)));
2235             break;
2236         }
2237         case IPV6_ADD_MEMBERSHIP:
2238         case IPV6_DROP_MEMBERSHIP:
2239         {
2240             struct ipv6_mreq ipv6mreq;
2241 
2242             if (optlen < sizeof(ipv6mreq)) {
2243                 return -TARGET_EINVAL;
2244             }
2245 
2246             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2247                 return -TARGET_EFAULT;
2248             }
2249 
2250             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2251 
2252             ret = get_errno(setsockopt(sockfd, level, optname,
2253                                        &ipv6mreq, sizeof(ipv6mreq)));
2254             break;
2255         }
2256         default:
2257             goto unimplemented;
2258         }
2259         break;
2260     case SOL_ICMPV6:
2261         switch (optname) {
2262         case ICMPV6_FILTER:
2263         {
2264             struct icmp6_filter icmp6f;
2265 
2266             if (optlen > sizeof(icmp6f)) {
2267                 optlen = sizeof(icmp6f);
2268             }
2269 
2270             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2271                 return -TARGET_EFAULT;
2272             }
2273 
2274             for (val = 0; val < 8; val++) {
2275                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2276             }
2277 
2278             ret = get_errno(setsockopt(sockfd, level, optname,
2279                                        &icmp6f, optlen));
2280             break;
2281         }
2282         default:
2283             goto unimplemented;
2284         }
2285         break;
2286     case SOL_RAW:
2287         switch (optname) {
2288         case ICMP_FILTER:
2289         case IPV6_CHECKSUM:
2290             /* those take an u32 value */
2291             if (optlen < sizeof(uint32_t)) {
2292                 return -TARGET_EINVAL;
2293             }
2294 
2295             if (get_user_u32(val, optval_addr)) {
2296                 return -TARGET_EFAULT;
2297             }
2298             ret = get_errno(setsockopt(sockfd, level, optname,
2299                                        &val, sizeof(val)));
2300             break;
2301 
2302         default:
2303             goto unimplemented;
2304         }
2305         break;
2306 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2307     case SOL_ALG:
2308         switch (optname) {
2309         case ALG_SET_KEY:
2310         {
2311             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2312             if (!alg_key) {
2313                 return -TARGET_EFAULT;
2314             }
2315             ret = get_errno(setsockopt(sockfd, level, optname,
2316                                        alg_key, optlen));
2317             unlock_user(alg_key, optval_addr, optlen);
2318             break;
2319         }
2320         case ALG_SET_AEAD_AUTHSIZE:
2321         {
2322             ret = get_errno(setsockopt(sockfd, level, optname,
2323                                        NULL, optlen));
2324             break;
2325         }
2326         default:
2327             goto unimplemented;
2328         }
2329         break;
2330 #endif
2331     case TARGET_SOL_SOCKET:
2332         switch (optname) {
2333         case TARGET_SO_RCVTIMEO:
2334         case TARGET_SO_SNDTIMEO:
2335         {
2336                 struct timeval tv;
2337 
2338                 if (optlen != sizeof(struct target_timeval)) {
2339                     return -TARGET_EINVAL;
2340                 }
2341 
2342                 if (copy_from_user_timeval(&tv, optval_addr)) {
2343                     return -TARGET_EFAULT;
2344                 }
2345 
2346                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2347                                 optname == TARGET_SO_RCVTIMEO ?
2348                                     SO_RCVTIMEO : SO_SNDTIMEO,
2349                                 &tv, sizeof(tv)));
2350                 return ret;
2351         }
2352         case TARGET_SO_ATTACH_FILTER:
2353         {
2354                 struct target_sock_fprog *tfprog;
2355                 struct target_sock_filter *tfilter;
2356                 struct sock_fprog fprog;
2357                 struct sock_filter *filter;
2358                 int i;
2359 
2360                 if (optlen != sizeof(*tfprog)) {
2361                     return -TARGET_EINVAL;
2362                 }
2363                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2364                     return -TARGET_EFAULT;
2365                 }
2366                 if (!lock_user_struct(VERIFY_READ, tfilter,
2367                                       tswapal(tfprog->filter), 0)) {
2368                     unlock_user_struct(tfprog, optval_addr, 1);
2369                     return -TARGET_EFAULT;
2370                 }
2371 
2372                 fprog.len = tswap16(tfprog->len);
2373                 filter = g_try_new(struct sock_filter, fprog.len);
2374                 if (filter == NULL) {
2375                     unlock_user_struct(tfilter, tfprog->filter, 1);
2376                     unlock_user_struct(tfprog, optval_addr, 1);
2377                     return -TARGET_ENOMEM;
2378                 }
2379                 for (i = 0; i < fprog.len; i++) {
2380                     filter[i].code = tswap16(tfilter[i].code);
2381                     filter[i].jt = tfilter[i].jt;
2382                     filter[i].jf = tfilter[i].jf;
2383                     filter[i].k = tswap32(tfilter[i].k);
2384                 }
2385                 fprog.filter = filter;
2386 
2387                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2388                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2389                 g_free(filter);
2390 
2391                 unlock_user_struct(tfilter, tfprog->filter, 1);
2392                 unlock_user_struct(tfprog, optval_addr, 1);
2393                 return ret;
2394         }
2395 	case TARGET_SO_BINDTODEVICE:
2396 	{
2397 		char *dev_ifname, *addr_ifname;
2398 
2399 		if (optlen > IFNAMSIZ - 1) {
2400 		    optlen = IFNAMSIZ - 1;
2401 		}
2402 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2403 		if (!dev_ifname) {
2404 		    return -TARGET_EFAULT;
2405 		}
2406 		optname = SO_BINDTODEVICE;
2407 		addr_ifname = alloca(IFNAMSIZ);
2408 		memcpy(addr_ifname, dev_ifname, optlen);
2409 		addr_ifname[optlen] = 0;
2410 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2411                                            addr_ifname, optlen));
2412 		unlock_user (dev_ifname, optval_addr, 0);
2413 		return ret;
2414 	}
2415         case TARGET_SO_LINGER:
2416         {
2417                 struct linger lg;
2418                 struct target_linger *tlg;
2419 
2420                 if (optlen != sizeof(struct target_linger)) {
2421                     return -TARGET_EINVAL;
2422                 }
2423                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2424                     return -TARGET_EFAULT;
2425                 }
2426                 __get_user(lg.l_onoff, &tlg->l_onoff);
2427                 __get_user(lg.l_linger, &tlg->l_linger);
2428                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2429                                 &lg, sizeof(lg)));
2430                 unlock_user_struct(tlg, optval_addr, 0);
2431                 return ret;
2432         }
2433             /* Options with 'int' argument.  */
2434         case TARGET_SO_DEBUG:
2435 		optname = SO_DEBUG;
2436 		break;
2437         case TARGET_SO_REUSEADDR:
2438 		optname = SO_REUSEADDR;
2439 		break;
2440 #ifdef SO_REUSEPORT
2441         case TARGET_SO_REUSEPORT:
2442                 optname = SO_REUSEPORT;
2443                 break;
2444 #endif
2445         case TARGET_SO_TYPE:
2446 		optname = SO_TYPE;
2447 		break;
2448         case TARGET_SO_ERROR:
2449 		optname = SO_ERROR;
2450 		break;
2451         case TARGET_SO_DONTROUTE:
2452 		optname = SO_DONTROUTE;
2453 		break;
2454         case TARGET_SO_BROADCAST:
2455 		optname = SO_BROADCAST;
2456 		break;
2457         case TARGET_SO_SNDBUF:
2458 		optname = SO_SNDBUF;
2459 		break;
2460         case TARGET_SO_SNDBUFFORCE:
2461                 optname = SO_SNDBUFFORCE;
2462                 break;
2463         case TARGET_SO_RCVBUF:
2464 		optname = SO_RCVBUF;
2465 		break;
2466         case TARGET_SO_RCVBUFFORCE:
2467                 optname = SO_RCVBUFFORCE;
2468                 break;
2469         case TARGET_SO_KEEPALIVE:
2470 		optname = SO_KEEPALIVE;
2471 		break;
2472         case TARGET_SO_OOBINLINE:
2473 		optname = SO_OOBINLINE;
2474 		break;
2475         case TARGET_SO_NO_CHECK:
2476 		optname = SO_NO_CHECK;
2477 		break;
2478         case TARGET_SO_PRIORITY:
2479 		optname = SO_PRIORITY;
2480 		break;
2481 #ifdef SO_BSDCOMPAT
2482         case TARGET_SO_BSDCOMPAT:
2483 		optname = SO_BSDCOMPAT;
2484 		break;
2485 #endif
2486         case TARGET_SO_PASSCRED:
2487 		optname = SO_PASSCRED;
2488 		break;
2489         case TARGET_SO_PASSSEC:
2490                 optname = SO_PASSSEC;
2491                 break;
2492         case TARGET_SO_TIMESTAMP:
2493 		optname = SO_TIMESTAMP;
2494 		break;
2495         case TARGET_SO_RCVLOWAT:
2496 		optname = SO_RCVLOWAT;
2497 		break;
2498         default:
2499             goto unimplemented;
2500         }
2501 	if (optlen < sizeof(uint32_t))
2502             return -TARGET_EINVAL;
2503 
2504 	if (get_user_u32(val, optval_addr))
2505             return -TARGET_EFAULT;
2506 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2507         break;
2508 #ifdef SOL_NETLINK
2509     case SOL_NETLINK:
2510         switch (optname) {
2511         case NETLINK_PKTINFO:
2512         case NETLINK_ADD_MEMBERSHIP:
2513         case NETLINK_DROP_MEMBERSHIP:
2514         case NETLINK_BROADCAST_ERROR:
2515         case NETLINK_NO_ENOBUFS:
2516 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2517         case NETLINK_LISTEN_ALL_NSID:
2518         case NETLINK_CAP_ACK:
2519 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2520 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2521         case NETLINK_EXT_ACK:
2522 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2523 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2524         case NETLINK_GET_STRICT_CHK:
2525 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2526             break;
2527         default:
2528             goto unimplemented;
2529         }
2530         val = 0;
2531         if (optlen < sizeof(uint32_t)) {
2532             return -TARGET_EINVAL;
2533         }
2534         if (get_user_u32(val, optval_addr)) {
2535             return -TARGET_EFAULT;
2536         }
2537         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2538                                    sizeof(val)));
2539         break;
2540 #endif /* SOL_NETLINK */
2541     default:
2542     unimplemented:
2543         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2544                       level, optname);
2545         ret = -TARGET_ENOPROTOOPT;
2546     }
2547     return ret;
2548 }
2549 
2550 /* do_getsockopt() Must return target values and target errnos. */
2551 static abi_long do_getsockopt(int sockfd, int level, int optname,
2552                               abi_ulong optval_addr, abi_ulong optlen)
2553 {
2554     abi_long ret;
2555     int len, val;
2556     socklen_t lv;
2557 
2558     switch(level) {
2559     case TARGET_SOL_SOCKET:
2560         level = SOL_SOCKET;
2561         switch (optname) {
2562         /* These don't just return a single integer */
2563         case TARGET_SO_PEERNAME:
2564             goto unimplemented;
2565         case TARGET_SO_RCVTIMEO: {
2566             struct timeval tv;
2567             socklen_t tvlen;
2568 
2569             optname = SO_RCVTIMEO;
2570 
2571 get_timeout:
2572             if (get_user_u32(len, optlen)) {
2573                 return -TARGET_EFAULT;
2574             }
2575             if (len < 0) {
2576                 return -TARGET_EINVAL;
2577             }
2578 
2579             tvlen = sizeof(tv);
2580             ret = get_errno(getsockopt(sockfd, level, optname,
2581                                        &tv, &tvlen));
2582             if (ret < 0) {
2583                 return ret;
2584             }
2585             if (len > sizeof(struct target_timeval)) {
2586                 len = sizeof(struct target_timeval);
2587             }
2588             if (copy_to_user_timeval(optval_addr, &tv)) {
2589                 return -TARGET_EFAULT;
2590             }
2591             if (put_user_u32(len, optlen)) {
2592                 return -TARGET_EFAULT;
2593             }
2594             break;
2595         }
2596         case TARGET_SO_SNDTIMEO:
2597             optname = SO_SNDTIMEO;
2598             goto get_timeout;
2599         case TARGET_SO_PEERCRED: {
2600             struct ucred cr;
2601             socklen_t crlen;
2602             struct target_ucred *tcr;
2603 
2604             if (get_user_u32(len, optlen)) {
2605                 return -TARGET_EFAULT;
2606             }
2607             if (len < 0) {
2608                 return -TARGET_EINVAL;
2609             }
2610 
2611             crlen = sizeof(cr);
2612             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2613                                        &cr, &crlen));
2614             if (ret < 0) {
2615                 return ret;
2616             }
2617             if (len > crlen) {
2618                 len = crlen;
2619             }
2620             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2621                 return -TARGET_EFAULT;
2622             }
2623             __put_user(cr.pid, &tcr->pid);
2624             __put_user(cr.uid, &tcr->uid);
2625             __put_user(cr.gid, &tcr->gid);
2626             unlock_user_struct(tcr, optval_addr, 1);
2627             if (put_user_u32(len, optlen)) {
2628                 return -TARGET_EFAULT;
2629             }
2630             break;
2631         }
2632         case TARGET_SO_PEERSEC: {
2633             char *name;
2634 
2635             if (get_user_u32(len, optlen)) {
2636                 return -TARGET_EFAULT;
2637             }
2638             if (len < 0) {
2639                 return -TARGET_EINVAL;
2640             }
2641             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2642             if (!name) {
2643                 return -TARGET_EFAULT;
2644             }
2645             lv = len;
2646             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2647                                        name, &lv));
2648             if (put_user_u32(lv, optlen)) {
2649                 ret = -TARGET_EFAULT;
2650             }
2651             unlock_user(name, optval_addr, lv);
2652             break;
2653         }
2654         case TARGET_SO_LINGER:
2655         {
2656             struct linger lg;
2657             socklen_t lglen;
2658             struct target_linger *tlg;
2659 
2660             if (get_user_u32(len, optlen)) {
2661                 return -TARGET_EFAULT;
2662             }
2663             if (len < 0) {
2664                 return -TARGET_EINVAL;
2665             }
2666 
2667             lglen = sizeof(lg);
2668             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2669                                        &lg, &lglen));
2670             if (ret < 0) {
2671                 return ret;
2672             }
2673             if (len > lglen) {
2674                 len = lglen;
2675             }
2676             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2677                 return -TARGET_EFAULT;
2678             }
2679             __put_user(lg.l_onoff, &tlg->l_onoff);
2680             __put_user(lg.l_linger, &tlg->l_linger);
2681             unlock_user_struct(tlg, optval_addr, 1);
2682             if (put_user_u32(len, optlen)) {
2683                 return -TARGET_EFAULT;
2684             }
2685             break;
2686         }
2687         /* Options with 'int' argument.  */
2688         case TARGET_SO_DEBUG:
2689             optname = SO_DEBUG;
2690             goto int_case;
2691         case TARGET_SO_REUSEADDR:
2692             optname = SO_REUSEADDR;
2693             goto int_case;
2694 #ifdef SO_REUSEPORT
2695         case TARGET_SO_REUSEPORT:
2696             optname = SO_REUSEPORT;
2697             goto int_case;
2698 #endif
2699         case TARGET_SO_TYPE:
2700             optname = SO_TYPE;
2701             goto int_case;
2702         case TARGET_SO_ERROR:
2703             optname = SO_ERROR;
2704             goto int_case;
2705         case TARGET_SO_DONTROUTE:
2706             optname = SO_DONTROUTE;
2707             goto int_case;
2708         case TARGET_SO_BROADCAST:
2709             optname = SO_BROADCAST;
2710             goto int_case;
2711         case TARGET_SO_SNDBUF:
2712             optname = SO_SNDBUF;
2713             goto int_case;
2714         case TARGET_SO_RCVBUF:
2715             optname = SO_RCVBUF;
2716             goto int_case;
2717         case TARGET_SO_KEEPALIVE:
2718             optname = SO_KEEPALIVE;
2719             goto int_case;
2720         case TARGET_SO_OOBINLINE:
2721             optname = SO_OOBINLINE;
2722             goto int_case;
2723         case TARGET_SO_NO_CHECK:
2724             optname = SO_NO_CHECK;
2725             goto int_case;
2726         case TARGET_SO_PRIORITY:
2727             optname = SO_PRIORITY;
2728             goto int_case;
2729 #ifdef SO_BSDCOMPAT
2730         case TARGET_SO_BSDCOMPAT:
2731             optname = SO_BSDCOMPAT;
2732             goto int_case;
2733 #endif
2734         case TARGET_SO_PASSCRED:
2735             optname = SO_PASSCRED;
2736             goto int_case;
2737         case TARGET_SO_TIMESTAMP:
2738             optname = SO_TIMESTAMP;
2739             goto int_case;
2740         case TARGET_SO_RCVLOWAT:
2741             optname = SO_RCVLOWAT;
2742             goto int_case;
2743         case TARGET_SO_ACCEPTCONN:
2744             optname = SO_ACCEPTCONN;
2745             goto int_case;
2746         case TARGET_SO_PROTOCOL:
2747             optname = SO_PROTOCOL;
2748             goto int_case;
2749         case TARGET_SO_DOMAIN:
2750             optname = SO_DOMAIN;
2751             goto int_case;
2752         default:
2753             goto int_case;
2754         }
2755         break;
2756     case SOL_TCP:
2757     case SOL_UDP:
2758         /* TCP and UDP options all take an 'int' value.  */
2759     int_case:
2760         if (get_user_u32(len, optlen))
2761             return -TARGET_EFAULT;
2762         if (len < 0)
2763             return -TARGET_EINVAL;
2764         lv = sizeof(lv);
2765         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2766         if (ret < 0)
2767             return ret;
2768         switch (optname) {
2769         case SO_TYPE:
2770             val = host_to_target_sock_type(val);
2771             break;
2772         case SO_ERROR:
2773             val = host_to_target_errno(val);
2774             break;
2775         }
2776         if (len > lv)
2777             len = lv;
2778         if (len == 4) {
2779             if (put_user_u32(val, optval_addr))
2780                 return -TARGET_EFAULT;
2781         } else {
2782             if (put_user_u8(val, optval_addr))
2783                 return -TARGET_EFAULT;
2784         }
2785         if (put_user_u32(len, optlen))
2786             return -TARGET_EFAULT;
2787         break;
2788     case SOL_IP:
2789         switch(optname) {
2790         case IP_TOS:
2791         case IP_TTL:
2792         case IP_HDRINCL:
2793         case IP_ROUTER_ALERT:
2794         case IP_RECVOPTS:
2795         case IP_RETOPTS:
2796         case IP_PKTINFO:
2797         case IP_MTU_DISCOVER:
2798         case IP_RECVERR:
2799         case IP_RECVTOS:
2800 #ifdef IP_FREEBIND
2801         case IP_FREEBIND:
2802 #endif
2803         case IP_MULTICAST_TTL:
2804         case IP_MULTICAST_LOOP:
2805             if (get_user_u32(len, optlen))
2806                 return -TARGET_EFAULT;
2807             if (len < 0)
2808                 return -TARGET_EINVAL;
2809             lv = sizeof(lv);
2810             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2811             if (ret < 0)
2812                 return ret;
2813             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2814                 len = 1;
2815                 if (put_user_u32(len, optlen)
2816                     || put_user_u8(val, optval_addr))
2817                     return -TARGET_EFAULT;
2818             } else {
2819                 if (len > sizeof(int))
2820                     len = sizeof(int);
2821                 if (put_user_u32(len, optlen)
2822                     || put_user_u32(val, optval_addr))
2823                     return -TARGET_EFAULT;
2824             }
2825             break;
2826         default:
2827             ret = -TARGET_ENOPROTOOPT;
2828             break;
2829         }
2830         break;
2831     case SOL_IPV6:
2832         switch (optname) {
2833         case IPV6_MTU_DISCOVER:
2834         case IPV6_MTU:
2835         case IPV6_V6ONLY:
2836         case IPV6_RECVPKTINFO:
2837         case IPV6_UNICAST_HOPS:
2838         case IPV6_MULTICAST_HOPS:
2839         case IPV6_MULTICAST_LOOP:
2840         case IPV6_RECVERR:
2841         case IPV6_RECVHOPLIMIT:
2842         case IPV6_2292HOPLIMIT:
2843         case IPV6_CHECKSUM:
2844         case IPV6_ADDRFORM:
2845         case IPV6_2292PKTINFO:
2846         case IPV6_RECVTCLASS:
2847         case IPV6_RECVRTHDR:
2848         case IPV6_2292RTHDR:
2849         case IPV6_RECVHOPOPTS:
2850         case IPV6_2292HOPOPTS:
2851         case IPV6_RECVDSTOPTS:
2852         case IPV6_2292DSTOPTS:
2853         case IPV6_TCLASS:
2854         case IPV6_ADDR_PREFERENCES:
2855 #ifdef IPV6_RECVPATHMTU
2856         case IPV6_RECVPATHMTU:
2857 #endif
2858 #ifdef IPV6_TRANSPARENT
2859         case IPV6_TRANSPARENT:
2860 #endif
2861 #ifdef IPV6_FREEBIND
2862         case IPV6_FREEBIND:
2863 #endif
2864 #ifdef IPV6_RECVORIGDSTADDR
2865         case IPV6_RECVORIGDSTADDR:
2866 #endif
2867             if (get_user_u32(len, optlen))
2868                 return -TARGET_EFAULT;
2869             if (len < 0)
2870                 return -TARGET_EINVAL;
2871             lv = sizeof(lv);
2872             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2873             if (ret < 0)
2874                 return ret;
2875             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2876                 len = 1;
2877                 if (put_user_u32(len, optlen)
2878                     || put_user_u8(val, optval_addr))
2879                     return -TARGET_EFAULT;
2880             } else {
2881                 if (len > sizeof(int))
2882                     len = sizeof(int);
2883                 if (put_user_u32(len, optlen)
2884                     || put_user_u32(val, optval_addr))
2885                     return -TARGET_EFAULT;
2886             }
2887             break;
2888         default:
2889             ret = -TARGET_ENOPROTOOPT;
2890             break;
2891         }
2892         break;
2893 #ifdef SOL_NETLINK
2894     case SOL_NETLINK:
2895         switch (optname) {
2896         case NETLINK_PKTINFO:
2897         case NETLINK_BROADCAST_ERROR:
2898         case NETLINK_NO_ENOBUFS:
2899 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2900         case NETLINK_LISTEN_ALL_NSID:
2901         case NETLINK_CAP_ACK:
2902 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2904         case NETLINK_EXT_ACK:
2905 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2906 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2907         case NETLINK_GET_STRICT_CHK:
2908 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2909             if (get_user_u32(len, optlen)) {
2910                 return -TARGET_EFAULT;
2911             }
2912             if (len != sizeof(val)) {
2913                 return -TARGET_EINVAL;
2914             }
2915             lv = len;
2916             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2917             if (ret < 0) {
2918                 return ret;
2919             }
2920             if (put_user_u32(lv, optlen)
2921                 || put_user_u32(val, optval_addr)) {
2922                 return -TARGET_EFAULT;
2923             }
2924             break;
2925 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2926         case NETLINK_LIST_MEMBERSHIPS:
2927         {
2928             uint32_t *results;
2929             int i;
2930             if (get_user_u32(len, optlen)) {
2931                 return -TARGET_EFAULT;
2932             }
2933             if (len < 0) {
2934                 return -TARGET_EINVAL;
2935             }
2936             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2937             if (!results && len > 0) {
2938                 return -TARGET_EFAULT;
2939             }
2940             lv = len;
2941             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2942             if (ret < 0) {
2943                 unlock_user(results, optval_addr, 0);
2944                 return ret;
2945             }
2946             /* swap host endianness to target endianness. */
2947             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2948                 results[i] = tswap32(results[i]);
2949             }
2950             if (put_user_u32(lv, optlen)) {
2951                 return -TARGET_EFAULT;
2952             }
2953             unlock_user(results, optval_addr, 0);
2954             break;
2955         }
2956 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2957         default:
2958             goto unimplemented;
2959         }
2960         break;
2961 #endif /* SOL_NETLINK */
2962     default:
2963     unimplemented:
2964         qemu_log_mask(LOG_UNIMP,
2965                       "getsockopt level=%d optname=%d not yet supported\n",
2966                       level, optname);
2967         ret = -TARGET_EOPNOTSUPP;
2968         break;
2969     }
2970     return ret;
2971 }
2972 
2973 /* Convert target low/high pair representing file offset into the host
2974  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2975  * as the kernel doesn't handle them either.
2976  */
2977 static void target_to_host_low_high(abi_ulong tlow,
2978                                     abi_ulong thigh,
2979                                     unsigned long *hlow,
2980                                     unsigned long *hhigh)
2981 {
2982     uint64_t off = tlow |
2983         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2984         TARGET_LONG_BITS / 2;
2985 
2986     *hlow = off;
2987     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2988 }
2989 
2990 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2991                                 abi_ulong count, int copy)
2992 {
2993     struct target_iovec *target_vec;
2994     struct iovec *vec;
2995     abi_ulong total_len, max_len;
2996     int i;
2997     int err = 0;
2998     bool bad_address = false;
2999 
3000     if (count == 0) {
3001         errno = 0;
3002         return NULL;
3003     }
3004     if (count > IOV_MAX) {
3005         errno = EINVAL;
3006         return NULL;
3007     }
3008 
3009     vec = g_try_new0(struct iovec, count);
3010     if (vec == NULL) {
3011         errno = ENOMEM;
3012         return NULL;
3013     }
3014 
3015     target_vec = lock_user(VERIFY_READ, target_addr,
3016                            count * sizeof(struct target_iovec), 1);
3017     if (target_vec == NULL) {
3018         err = EFAULT;
3019         goto fail2;
3020     }
3021 
3022     /* ??? If host page size > target page size, this will result in a
3023        value larger than what we can actually support.  */
3024     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3025     total_len = 0;
3026 
3027     for (i = 0; i < count; i++) {
3028         abi_ulong base = tswapal(target_vec[i].iov_base);
3029         abi_long len = tswapal(target_vec[i].iov_len);
3030 
3031         if (len < 0) {
3032             err = EINVAL;
3033             goto fail;
3034         } else if (len == 0) {
3035             /* Zero length pointer is ignored.  */
3036             vec[i].iov_base = 0;
3037         } else {
3038             vec[i].iov_base = lock_user(type, base, len, copy);
3039             /* If the first buffer pointer is bad, this is a fault.  But
3040              * subsequent bad buffers will result in a partial write; this
3041              * is realized by filling the vector with null pointers and
3042              * zero lengths. */
3043             if (!vec[i].iov_base) {
3044                 if (i == 0) {
3045                     err = EFAULT;
3046                     goto fail;
3047                 } else {
3048                     bad_address = true;
3049                 }
3050             }
3051             if (bad_address) {
3052                 len = 0;
3053             }
3054             if (len > max_len - total_len) {
3055                 len = max_len - total_len;
3056             }
3057         }
3058         vec[i].iov_len = len;
3059         total_len += len;
3060     }
3061 
3062     unlock_user(target_vec, target_addr, 0);
3063     return vec;
3064 
3065  fail:
3066     while (--i >= 0) {
3067         if (tswapal(target_vec[i].iov_len) > 0) {
3068             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3069         }
3070     }
3071     unlock_user(target_vec, target_addr, 0);
3072  fail2:
3073     g_free(vec);
3074     errno = err;
3075     return NULL;
3076 }
3077 
3078 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3079                          abi_ulong count, int copy)
3080 {
3081     struct target_iovec *target_vec;
3082     int i;
3083 
3084     target_vec = lock_user(VERIFY_READ, target_addr,
3085                            count * sizeof(struct target_iovec), 1);
3086     if (target_vec) {
3087         for (i = 0; i < count; i++) {
3088             abi_ulong base = tswapal(target_vec[i].iov_base);
3089             abi_long len = tswapal(target_vec[i].iov_len);
3090             if (len < 0) {
3091                 break;
3092             }
3093             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3094         }
3095         unlock_user(target_vec, target_addr, 0);
3096     }
3097 
3098     g_free(vec);
3099 }
3100 
3101 static inline int target_to_host_sock_type(int *type)
3102 {
3103     int host_type = 0;
3104     int target_type = *type;
3105 
3106     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3107     case TARGET_SOCK_DGRAM:
3108         host_type = SOCK_DGRAM;
3109         break;
3110     case TARGET_SOCK_STREAM:
3111         host_type = SOCK_STREAM;
3112         break;
3113     default:
3114         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3115         break;
3116     }
3117     if (target_type & TARGET_SOCK_CLOEXEC) {
3118 #if defined(SOCK_CLOEXEC)
3119         host_type |= SOCK_CLOEXEC;
3120 #else
3121         return -TARGET_EINVAL;
3122 #endif
3123     }
3124     if (target_type & TARGET_SOCK_NONBLOCK) {
3125 #if defined(SOCK_NONBLOCK)
3126         host_type |= SOCK_NONBLOCK;
3127 #elif !defined(O_NONBLOCK)
3128         return -TARGET_EINVAL;
3129 #endif
3130     }
3131     *type = host_type;
3132     return 0;
3133 }
3134 
3135 /* Try to emulate socket type flags after socket creation.  */
3136 static int sock_flags_fixup(int fd, int target_type)
3137 {
3138 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3139     if (target_type & TARGET_SOCK_NONBLOCK) {
3140         int flags = fcntl(fd, F_GETFL);
3141         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3142             close(fd);
3143             return -TARGET_EINVAL;
3144         }
3145     }
3146 #endif
3147     return fd;
3148 }
3149 
3150 /* do_socket() Must return target values and target errnos. */
3151 static abi_long do_socket(int domain, int type, int protocol)
3152 {
3153     int target_type = type;
3154     int ret;
3155 
3156     ret = target_to_host_sock_type(&type);
3157     if (ret) {
3158         return ret;
3159     }
3160 
3161     if (domain == PF_NETLINK && !(
3162 #ifdef CONFIG_RTNETLINK
3163          protocol == NETLINK_ROUTE ||
3164 #endif
3165          protocol == NETLINK_KOBJECT_UEVENT ||
3166          protocol == NETLINK_AUDIT)) {
3167         return -TARGET_EPROTONOSUPPORT;
3168     }
3169 
3170     if (domain == AF_PACKET ||
3171         (domain == AF_INET && type == SOCK_PACKET)) {
3172         protocol = tswap16(protocol);
3173     }
3174 
3175     ret = get_errno(socket(domain, type, protocol));
3176     if (ret >= 0) {
3177         ret = sock_flags_fixup(ret, target_type);
3178         if (type == SOCK_PACKET) {
3179             /* Manage an obsolete case :
3180              * if socket type is SOCK_PACKET, bind by name
3181              */
3182             fd_trans_register(ret, &target_packet_trans);
3183         } else if (domain == PF_NETLINK) {
3184             switch (protocol) {
3185 #ifdef CONFIG_RTNETLINK
3186             case NETLINK_ROUTE:
3187                 fd_trans_register(ret, &target_netlink_route_trans);
3188                 break;
3189 #endif
3190             case NETLINK_KOBJECT_UEVENT:
3191                 /* nothing to do: messages are strings */
3192                 break;
3193             case NETLINK_AUDIT:
3194                 fd_trans_register(ret, &target_netlink_audit_trans);
3195                 break;
3196             default:
3197                 g_assert_not_reached();
3198             }
3199         }
3200     }
3201     return ret;
3202 }
3203 
3204 /* do_bind() Must return target values and target errnos. */
3205 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3206                         socklen_t addrlen)
3207 {
3208     void *addr;
3209     abi_long ret;
3210 
3211     if ((int)addrlen < 0) {
3212         return -TARGET_EINVAL;
3213     }
3214 
3215     addr = alloca(addrlen+1);
3216 
3217     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3218     if (ret)
3219         return ret;
3220 
3221     return get_errno(bind(sockfd, addr, addrlen));
3222 }
3223 
3224 /* do_connect() Must return target values and target errnos. */
3225 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3226                            socklen_t addrlen)
3227 {
3228     void *addr;
3229     abi_long ret;
3230 
3231     if ((int)addrlen < 0) {
3232         return -TARGET_EINVAL;
3233     }
3234 
3235     addr = alloca(addrlen+1);
3236 
3237     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3238     if (ret)
3239         return ret;
3240 
3241     return get_errno(safe_connect(sockfd, addr, addrlen));
3242 }
3243 
3244 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3245 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3246                                       int flags, int send)
3247 {
3248     abi_long ret, len;
3249     struct msghdr msg;
3250     abi_ulong count;
3251     struct iovec *vec;
3252     abi_ulong target_vec;
3253 
3254     if (msgp->msg_name) {
3255         msg.msg_namelen = tswap32(msgp->msg_namelen);
3256         msg.msg_name = alloca(msg.msg_namelen+1);
3257         ret = target_to_host_sockaddr(fd, msg.msg_name,
3258                                       tswapal(msgp->msg_name),
3259                                       msg.msg_namelen);
3260         if (ret == -TARGET_EFAULT) {
3261             /* For connected sockets msg_name and msg_namelen must
3262              * be ignored, so returning EFAULT immediately is wrong.
3263              * Instead, pass a bad msg_name to the host kernel, and
3264              * let it decide whether to return EFAULT or not.
3265              */
3266             msg.msg_name = (void *)-1;
3267         } else if (ret) {
3268             goto out2;
3269         }
3270     } else {
3271         msg.msg_name = NULL;
3272         msg.msg_namelen = 0;
3273     }
3274     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3275     msg.msg_control = alloca(msg.msg_controllen);
3276     memset(msg.msg_control, 0, msg.msg_controllen);
3277 
3278     msg.msg_flags = tswap32(msgp->msg_flags);
3279 
3280     count = tswapal(msgp->msg_iovlen);
3281     target_vec = tswapal(msgp->msg_iov);
3282 
3283     if (count > IOV_MAX) {
3284         /* sendrcvmsg returns a different errno for this condition than
3285          * readv/writev, so we must catch it here before lock_iovec() does.
3286          */
3287         ret = -TARGET_EMSGSIZE;
3288         goto out2;
3289     }
3290 
3291     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3292                      target_vec, count, send);
3293     if (vec == NULL) {
3294         ret = -host_to_target_errno(errno);
3295         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3296         if (!send || ret) {
3297             goto out2;
3298         }
3299     }
3300     msg.msg_iovlen = count;
3301     msg.msg_iov = vec;
3302 
3303     if (send) {
3304         if (fd_trans_target_to_host_data(fd)) {
3305             void *host_msg;
3306 
3307             host_msg = g_malloc(msg.msg_iov->iov_len);
3308             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3309             ret = fd_trans_target_to_host_data(fd)(host_msg,
3310                                                    msg.msg_iov->iov_len);
3311             if (ret >= 0) {
3312                 msg.msg_iov->iov_base = host_msg;
3313                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3314             }
3315             g_free(host_msg);
3316         } else {
3317             ret = target_to_host_cmsg(&msg, msgp);
3318             if (ret == 0) {
3319                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3320             }
3321         }
3322     } else {
3323         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3324         if (!is_error(ret)) {
3325             len = ret;
3326             if (fd_trans_host_to_target_data(fd)) {
3327                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3328                                                MIN(msg.msg_iov->iov_len, len));
3329             }
3330             if (!is_error(ret)) {
3331                 ret = host_to_target_cmsg(msgp, &msg);
3332             }
3333             if (!is_error(ret)) {
3334                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3335                 msgp->msg_flags = tswap32(msg.msg_flags);
3336                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3337                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3338                                     msg.msg_name, msg.msg_namelen);
3339                     if (ret) {
3340                         goto out;
3341                     }
3342                 }
3343 
3344                 ret = len;
3345             }
3346         }
3347     }
3348 
3349 out:
3350     if (vec) {
3351         unlock_iovec(vec, target_vec, count, !send);
3352     }
3353 out2:
3354     return ret;
3355 }
3356 
3357 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3358                                int flags, int send)
3359 {
3360     abi_long ret;
3361     struct target_msghdr *msgp;
3362 
3363     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3364                           msgp,
3365                           target_msg,
3366                           send ? 1 : 0)) {
3367         return -TARGET_EFAULT;
3368     }
3369     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3370     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3371     return ret;
3372 }
3373 
3374 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3375  * so it might not have this *mmsg-specific flag either.
3376  */
3377 #ifndef MSG_WAITFORONE
3378 #define MSG_WAITFORONE 0x10000
3379 #endif
3380 
3381 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3382                                 unsigned int vlen, unsigned int flags,
3383                                 int send)
3384 {
3385     struct target_mmsghdr *mmsgp;
3386     abi_long ret = 0;
3387     int i;
3388 
3389     if (vlen > UIO_MAXIOV) {
3390         vlen = UIO_MAXIOV;
3391     }
3392 
3393     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3394     if (!mmsgp) {
3395         return -TARGET_EFAULT;
3396     }
3397 
3398     for (i = 0; i < vlen; i++) {
3399         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3400         if (is_error(ret)) {
3401             break;
3402         }
3403         mmsgp[i].msg_len = tswap32(ret);
3404         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3405         if (flags & MSG_WAITFORONE) {
3406             flags |= MSG_DONTWAIT;
3407         }
3408     }
3409 
3410     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3411 
3412     /* Return number of datagrams sent if we sent any at all;
3413      * otherwise return the error.
3414      */
3415     if (i) {
3416         return i;
3417     }
3418     return ret;
3419 }
3420 
3421 /* do_accept4() Must return target values and target errnos. */
3422 static abi_long do_accept4(int fd, abi_ulong target_addr,
3423                            abi_ulong target_addrlen_addr, int flags)
3424 {
3425     socklen_t addrlen, ret_addrlen;
3426     void *addr;
3427     abi_long ret;
3428     int host_flags;
3429 
3430     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3431         return -TARGET_EINVAL;
3432     }
3433 
3434     host_flags = 0;
3435     if (flags & TARGET_SOCK_NONBLOCK) {
3436         host_flags |= SOCK_NONBLOCK;
3437     }
3438     if (flags & TARGET_SOCK_CLOEXEC) {
3439         host_flags |= SOCK_CLOEXEC;
3440     }
3441 
3442     if (target_addr == 0) {
3443         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3444     }
3445 
3446     /* linux returns EFAULT if addrlen pointer is invalid */
3447     if (get_user_u32(addrlen, target_addrlen_addr))
3448         return -TARGET_EFAULT;
3449 
3450     if ((int)addrlen < 0) {
3451         return -TARGET_EINVAL;
3452     }
3453 
3454     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3455         return -TARGET_EFAULT;
3456     }
3457 
3458     addr = alloca(addrlen);
3459 
3460     ret_addrlen = addrlen;
3461     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3462     if (!is_error(ret)) {
3463         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3464         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3465             ret = -TARGET_EFAULT;
3466         }
3467     }
3468     return ret;
3469 }
3470 
3471 /* do_getpeername() Must return target values and target errnos. */
3472 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3473                                abi_ulong target_addrlen_addr)
3474 {
3475     socklen_t addrlen, ret_addrlen;
3476     void *addr;
3477     abi_long ret;
3478 
3479     if (get_user_u32(addrlen, target_addrlen_addr))
3480         return -TARGET_EFAULT;
3481 
3482     if ((int)addrlen < 0) {
3483         return -TARGET_EINVAL;
3484     }
3485 
3486     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3487         return -TARGET_EFAULT;
3488     }
3489 
3490     addr = alloca(addrlen);
3491 
3492     ret_addrlen = addrlen;
3493     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3494     if (!is_error(ret)) {
3495         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3496         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3497             ret = -TARGET_EFAULT;
3498         }
3499     }
3500     return ret;
3501 }
3502 
3503 /* do_getsockname() Must return target values and target errnos. */
3504 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3505                                abi_ulong target_addrlen_addr)
3506 {
3507     socklen_t addrlen, ret_addrlen;
3508     void *addr;
3509     abi_long ret;
3510 
3511     if (get_user_u32(addrlen, target_addrlen_addr))
3512         return -TARGET_EFAULT;
3513 
3514     if ((int)addrlen < 0) {
3515         return -TARGET_EINVAL;
3516     }
3517 
3518     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3519         return -TARGET_EFAULT;
3520     }
3521 
3522     addr = alloca(addrlen);
3523 
3524     ret_addrlen = addrlen;
3525     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3526     if (!is_error(ret)) {
3527         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3528         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3529             ret = -TARGET_EFAULT;
3530         }
3531     }
3532     return ret;
3533 }
3534 
3535 /* do_socketpair() Must return target values and target errnos. */
3536 static abi_long do_socketpair(int domain, int type, int protocol,
3537                               abi_ulong target_tab_addr)
3538 {
3539     int tab[2];
3540     abi_long ret;
3541 
3542     target_to_host_sock_type(&type);
3543 
3544     ret = get_errno(socketpair(domain, type, protocol, tab));
3545     if (!is_error(ret)) {
3546         if (put_user_s32(tab[0], target_tab_addr)
3547             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3548             ret = -TARGET_EFAULT;
3549     }
3550     return ret;
3551 }
3552 
3553 /* do_sendto() Must return target values and target errnos. */
3554 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3555                           abi_ulong target_addr, socklen_t addrlen)
3556 {
3557     void *addr;
3558     void *host_msg;
3559     void *copy_msg = NULL;
3560     abi_long ret;
3561 
3562     if ((int)addrlen < 0) {
3563         return -TARGET_EINVAL;
3564     }
3565 
3566     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3567     if (!host_msg)
3568         return -TARGET_EFAULT;
3569     if (fd_trans_target_to_host_data(fd)) {
3570         copy_msg = host_msg;
3571         host_msg = g_malloc(len);
3572         memcpy(host_msg, copy_msg, len);
3573         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3574         if (ret < 0) {
3575             goto fail;
3576         }
3577     }
3578     if (target_addr) {
3579         addr = alloca(addrlen+1);
3580         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3581         if (ret) {
3582             goto fail;
3583         }
3584         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3585     } else {
3586         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3587     }
3588 fail:
3589     if (copy_msg) {
3590         g_free(host_msg);
3591         host_msg = copy_msg;
3592     }
3593     unlock_user(host_msg, msg, 0);
3594     return ret;
3595 }
3596 
3597 /* do_recvfrom() Must return target values and target errnos. */
3598 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3599                             abi_ulong target_addr,
3600                             abi_ulong target_addrlen)
3601 {
3602     socklen_t addrlen, ret_addrlen;
3603     void *addr;
3604     void *host_msg;
3605     abi_long ret;
3606 
3607     if (!msg) {
3608         host_msg = NULL;
3609     } else {
3610         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3611         if (!host_msg) {
3612             return -TARGET_EFAULT;
3613         }
3614     }
3615     if (target_addr) {
3616         if (get_user_u32(addrlen, target_addrlen)) {
3617             ret = -TARGET_EFAULT;
3618             goto fail;
3619         }
3620         if ((int)addrlen < 0) {
3621             ret = -TARGET_EINVAL;
3622             goto fail;
3623         }
3624         addr = alloca(addrlen);
3625         ret_addrlen = addrlen;
3626         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3627                                       addr, &ret_addrlen));
3628     } else {
3629         addr = NULL; /* To keep compiler quiet.  */
3630         addrlen = 0; /* To keep compiler quiet.  */
3631         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3632     }
3633     if (!is_error(ret)) {
3634         if (fd_trans_host_to_target_data(fd)) {
3635             abi_long trans;
3636             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3637             if (is_error(trans)) {
3638                 ret = trans;
3639                 goto fail;
3640             }
3641         }
3642         if (target_addr) {
3643             host_to_target_sockaddr(target_addr, addr,
3644                                     MIN(addrlen, ret_addrlen));
3645             if (put_user_u32(ret_addrlen, target_addrlen)) {
3646                 ret = -TARGET_EFAULT;
3647                 goto fail;
3648             }
3649         }
3650         unlock_user(host_msg, msg, len);
3651     } else {
3652 fail:
3653         unlock_user(host_msg, msg, 0);
3654     }
3655     return ret;
3656 }
3657 
3658 #ifdef TARGET_NR_socketcall
3659 /* do_socketcall() must return target values and target errnos. */
3660 static abi_long do_socketcall(int num, abi_ulong vptr)
3661 {
3662     static const unsigned nargs[] = { /* number of arguments per operation */
3663         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3664         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3665         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3666         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3667         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3668         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3669         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3670         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3671         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3672         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3673         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3674         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3675         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3676         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3677         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3678         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3679         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3680         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3681         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3682         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3683     };
3684     abi_long a[6]; /* max 6 args */
3685     unsigned i;
3686 
3687     /* check the range of the first argument num */
3688     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3689     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3690         return -TARGET_EINVAL;
3691     }
3692     /* ensure we have space for args */
3693     if (nargs[num] > ARRAY_SIZE(a)) {
3694         return -TARGET_EINVAL;
3695     }
3696     /* collect the arguments in a[] according to nargs[] */
3697     for (i = 0; i < nargs[num]; ++i) {
3698         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3699             return -TARGET_EFAULT;
3700         }
3701     }
3702     /* now when we have the args, invoke the appropriate underlying function */
3703     switch (num) {
3704     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3705         return do_socket(a[0], a[1], a[2]);
3706     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3707         return do_bind(a[0], a[1], a[2]);
3708     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3709         return do_connect(a[0], a[1], a[2]);
3710     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3711         return get_errno(listen(a[0], a[1]));
3712     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3713         return do_accept4(a[0], a[1], a[2], 0);
3714     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3715         return do_getsockname(a[0], a[1], a[2]);
3716     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3717         return do_getpeername(a[0], a[1], a[2]);
3718     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3719         return do_socketpair(a[0], a[1], a[2], a[3]);
3720     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3721         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3722     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3723         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3724     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3725         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3726     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3727         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3728     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3729         return get_errno(shutdown(a[0], a[1]));
3730     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3731         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3732     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3733         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3734     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3735         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3736     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3737         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3738     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3739         return do_accept4(a[0], a[1], a[2], a[3]);
3740     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3741         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3742     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3743         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3744     default:
3745         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3746         return -TARGET_EINVAL;
3747     }
3748 }
3749 #endif
3750 
3751 #ifndef TARGET_SEMID64_DS
3752 /* asm-generic version of this struct */
3753 struct target_semid64_ds
3754 {
3755   struct target_ipc_perm sem_perm;
3756   abi_ulong sem_otime;
3757 #if TARGET_ABI_BITS == 32
3758   abi_ulong __unused1;
3759 #endif
3760   abi_ulong sem_ctime;
3761 #if TARGET_ABI_BITS == 32
3762   abi_ulong __unused2;
3763 #endif
3764   abi_ulong sem_nsems;
3765   abi_ulong __unused3;
3766   abi_ulong __unused4;
3767 };
3768 #endif
3769 
3770 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3771                                                abi_ulong target_addr)
3772 {
3773     struct target_ipc_perm *target_ip;
3774     struct target_semid64_ds *target_sd;
3775 
3776     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3777         return -TARGET_EFAULT;
3778     target_ip = &(target_sd->sem_perm);
3779     host_ip->__key = tswap32(target_ip->__key);
3780     host_ip->uid = tswap32(target_ip->uid);
3781     host_ip->gid = tswap32(target_ip->gid);
3782     host_ip->cuid = tswap32(target_ip->cuid);
3783     host_ip->cgid = tswap32(target_ip->cgid);
3784 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3785     host_ip->mode = tswap32(target_ip->mode);
3786 #else
3787     host_ip->mode = tswap16(target_ip->mode);
3788 #endif
3789 #if defined(TARGET_PPC)
3790     host_ip->__seq = tswap32(target_ip->__seq);
3791 #else
3792     host_ip->__seq = tswap16(target_ip->__seq);
3793 #endif
3794     unlock_user_struct(target_sd, target_addr, 0);
3795     return 0;
3796 }
3797 
3798 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3799                                                struct ipc_perm *host_ip)
3800 {
3801     struct target_ipc_perm *target_ip;
3802     struct target_semid64_ds *target_sd;
3803 
3804     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3805         return -TARGET_EFAULT;
3806     target_ip = &(target_sd->sem_perm);
3807     target_ip->__key = tswap32(host_ip->__key);
3808     target_ip->uid = tswap32(host_ip->uid);
3809     target_ip->gid = tswap32(host_ip->gid);
3810     target_ip->cuid = tswap32(host_ip->cuid);
3811     target_ip->cgid = tswap32(host_ip->cgid);
3812 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3813     target_ip->mode = tswap32(host_ip->mode);
3814 #else
3815     target_ip->mode = tswap16(host_ip->mode);
3816 #endif
3817 #if defined(TARGET_PPC)
3818     target_ip->__seq = tswap32(host_ip->__seq);
3819 #else
3820     target_ip->__seq = tswap16(host_ip->__seq);
3821 #endif
3822     unlock_user_struct(target_sd, target_addr, 1);
3823     return 0;
3824 }
3825 
3826 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3827                                                abi_ulong target_addr)
3828 {
3829     struct target_semid64_ds *target_sd;
3830 
3831     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3832         return -TARGET_EFAULT;
3833     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3834         return -TARGET_EFAULT;
3835     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3836     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3837     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3838     unlock_user_struct(target_sd, target_addr, 0);
3839     return 0;
3840 }
3841 
3842 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3843                                                struct semid_ds *host_sd)
3844 {
3845     struct target_semid64_ds *target_sd;
3846 
3847     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3848         return -TARGET_EFAULT;
3849     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3850         return -TARGET_EFAULT;
3851     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3852     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3853     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3854     unlock_user_struct(target_sd, target_addr, 1);
3855     return 0;
3856 }
3857 
3858 struct target_seminfo {
3859     int semmap;
3860     int semmni;
3861     int semmns;
3862     int semmnu;
3863     int semmsl;
3864     int semopm;
3865     int semume;
3866     int semusz;
3867     int semvmx;
3868     int semaem;
3869 };
3870 
3871 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3872                                               struct seminfo *host_seminfo)
3873 {
3874     struct target_seminfo *target_seminfo;
3875     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3876         return -TARGET_EFAULT;
3877     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3878     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3879     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3880     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3881     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3882     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3883     __put_user(host_seminfo->semume, &target_seminfo->semume);
3884     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3885     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3886     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3887     unlock_user_struct(target_seminfo, target_addr, 1);
3888     return 0;
3889 }
3890 
3891 union semun {
3892 	int val;
3893 	struct semid_ds *buf;
3894 	unsigned short *array;
3895 	struct seminfo *__buf;
3896 };
3897 
3898 union target_semun {
3899 	int val;
3900 	abi_ulong buf;
3901 	abi_ulong array;
3902 	abi_ulong __buf;
3903 };
3904 
3905 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3906                                                abi_ulong target_addr)
3907 {
3908     int nsems;
3909     unsigned short *array;
3910     union semun semun;
3911     struct semid_ds semid_ds;
3912     int i, ret;
3913 
3914     semun.buf = &semid_ds;
3915 
3916     ret = semctl(semid, 0, IPC_STAT, semun);
3917     if (ret == -1)
3918         return get_errno(ret);
3919 
3920     nsems = semid_ds.sem_nsems;
3921 
3922     *host_array = g_try_new(unsigned short, nsems);
3923     if (!*host_array) {
3924         return -TARGET_ENOMEM;
3925     }
3926     array = lock_user(VERIFY_READ, target_addr,
3927                       nsems*sizeof(unsigned short), 1);
3928     if (!array) {
3929         g_free(*host_array);
3930         return -TARGET_EFAULT;
3931     }
3932 
3933     for(i=0; i<nsems; i++) {
3934         __get_user((*host_array)[i], &array[i]);
3935     }
3936     unlock_user(array, target_addr, 0);
3937 
3938     return 0;
3939 }
3940 
3941 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3942                                                unsigned short **host_array)
3943 {
3944     int nsems;
3945     unsigned short *array;
3946     union semun semun;
3947     struct semid_ds semid_ds;
3948     int i, ret;
3949 
3950     semun.buf = &semid_ds;
3951 
3952     ret = semctl(semid, 0, IPC_STAT, semun);
3953     if (ret == -1)
3954         return get_errno(ret);
3955 
3956     nsems = semid_ds.sem_nsems;
3957 
3958     array = lock_user(VERIFY_WRITE, target_addr,
3959                       nsems*sizeof(unsigned short), 0);
3960     if (!array)
3961         return -TARGET_EFAULT;
3962 
3963     for(i=0; i<nsems; i++) {
3964         __put_user((*host_array)[i], &array[i]);
3965     }
3966     g_free(*host_array);
3967     unlock_user(array, target_addr, 1);
3968 
3969     return 0;
3970 }
3971 
3972 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3973                                  abi_ulong target_arg)
3974 {
3975     union target_semun target_su = { .buf = target_arg };
3976     union semun arg;
3977     struct semid_ds dsarg;
3978     unsigned short *array = NULL;
3979     struct seminfo seminfo;
3980     abi_long ret = -TARGET_EINVAL;
3981     abi_long err;
3982     cmd &= 0xff;
3983 
3984     switch( cmd ) {
3985 	case GETVAL:
3986 	case SETVAL:
3987             /* In 64 bit cross-endian situations, we will erroneously pick up
3988              * the wrong half of the union for the "val" element.  To rectify
3989              * this, the entire 8-byte structure is byteswapped, followed by
3990 	     * a swap of the 4 byte val field. In other cases, the data is
3991 	     * already in proper host byte order. */
3992 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3993 		target_su.buf = tswapal(target_su.buf);
3994 		arg.val = tswap32(target_su.val);
3995 	    } else {
3996 		arg.val = target_su.val;
3997 	    }
3998             ret = get_errno(semctl(semid, semnum, cmd, arg));
3999             break;
4000 	case GETALL:
4001 	case SETALL:
4002             err = target_to_host_semarray(semid, &array, target_su.array);
4003             if (err)
4004                 return err;
4005             arg.array = array;
4006             ret = get_errno(semctl(semid, semnum, cmd, arg));
4007             err = host_to_target_semarray(semid, target_su.array, &array);
4008             if (err)
4009                 return err;
4010             break;
4011 	case IPC_STAT:
4012 	case IPC_SET:
4013 	case SEM_STAT:
4014             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4015             if (err)
4016                 return err;
4017             arg.buf = &dsarg;
4018             ret = get_errno(semctl(semid, semnum, cmd, arg));
4019             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4020             if (err)
4021                 return err;
4022             break;
4023 	case IPC_INFO:
4024 	case SEM_INFO:
4025             arg.__buf = &seminfo;
4026             ret = get_errno(semctl(semid, semnum, cmd, arg));
4027             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4028             if (err)
4029                 return err;
4030             break;
4031 	case IPC_RMID:
4032 	case GETPID:
4033 	case GETNCNT:
4034 	case GETZCNT:
4035             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4036             break;
4037     }
4038 
4039     return ret;
4040 }
4041 
4042 struct target_sembuf {
4043     unsigned short sem_num;
4044     short sem_op;
4045     short sem_flg;
4046 };
4047 
4048 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4049                                              abi_ulong target_addr,
4050                                              unsigned nsops)
4051 {
4052     struct target_sembuf *target_sembuf;
4053     int i;
4054 
4055     target_sembuf = lock_user(VERIFY_READ, target_addr,
4056                               nsops*sizeof(struct target_sembuf), 1);
4057     if (!target_sembuf)
4058         return -TARGET_EFAULT;
4059 
4060     for(i=0; i<nsops; i++) {
4061         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4062         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4063         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4064     }
4065 
4066     unlock_user(target_sembuf, target_addr, 0);
4067 
4068     return 0;
4069 }
4070 
4071 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4072     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4073 
4074 /*
4075  * This macro is required to handle the s390 variants, which passes the
4076  * arguments in a different order than default.
4077  */
4078 #ifdef __s390x__
4079 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4080   (__nsops), (__timeout), (__sops)
4081 #else
4082 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4083   (__nsops), 0, (__sops), (__timeout)
4084 #endif
4085 
4086 static inline abi_long do_semtimedop(int semid,
4087                                      abi_long ptr,
4088                                      unsigned nsops,
4089                                      abi_long timeout, bool time64)
4090 {
4091     struct sembuf *sops;
4092     struct timespec ts, *pts = NULL;
4093     abi_long ret;
4094 
4095     if (timeout) {
4096         pts = &ts;
4097         if (time64) {
4098             if (target_to_host_timespec64(pts, timeout)) {
4099                 return -TARGET_EFAULT;
4100             }
4101         } else {
4102             if (target_to_host_timespec(pts, timeout)) {
4103                 return -TARGET_EFAULT;
4104             }
4105         }
4106     }
4107 
4108     if (nsops > TARGET_SEMOPM) {
4109         return -TARGET_E2BIG;
4110     }
4111 
4112     sops = g_new(struct sembuf, nsops);
4113 
4114     if (target_to_host_sembuf(sops, ptr, nsops)) {
4115         g_free(sops);
4116         return -TARGET_EFAULT;
4117     }
4118 
4119     ret = -TARGET_ENOSYS;
4120 #ifdef __NR_semtimedop
4121     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4122 #endif
4123 #ifdef __NR_ipc
4124     if (ret == -TARGET_ENOSYS) {
4125         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4126                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4127     }
4128 #endif
4129     g_free(sops);
4130     return ret;
4131 }
4132 #endif
4133 
4134 struct target_msqid_ds
4135 {
4136     struct target_ipc_perm msg_perm;
4137     abi_ulong msg_stime;
4138 #if TARGET_ABI_BITS == 32
4139     abi_ulong __unused1;
4140 #endif
4141     abi_ulong msg_rtime;
4142 #if TARGET_ABI_BITS == 32
4143     abi_ulong __unused2;
4144 #endif
4145     abi_ulong msg_ctime;
4146 #if TARGET_ABI_BITS == 32
4147     abi_ulong __unused3;
4148 #endif
4149     abi_ulong __msg_cbytes;
4150     abi_ulong msg_qnum;
4151     abi_ulong msg_qbytes;
4152     abi_ulong msg_lspid;
4153     abi_ulong msg_lrpid;
4154     abi_ulong __unused4;
4155     abi_ulong __unused5;
4156 };
4157 
4158 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4159                                                abi_ulong target_addr)
4160 {
4161     struct target_msqid_ds *target_md;
4162 
4163     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4164         return -TARGET_EFAULT;
4165     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4166         return -TARGET_EFAULT;
4167     host_md->msg_stime = tswapal(target_md->msg_stime);
4168     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4169     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4170     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4171     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4172     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4173     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4174     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4175     unlock_user_struct(target_md, target_addr, 0);
4176     return 0;
4177 }
4178 
4179 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4180                                                struct msqid_ds *host_md)
4181 {
4182     struct target_msqid_ds *target_md;
4183 
4184     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4185         return -TARGET_EFAULT;
4186     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4187         return -TARGET_EFAULT;
4188     target_md->msg_stime = tswapal(host_md->msg_stime);
4189     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4190     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4191     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4192     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4193     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4194     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4195     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4196     unlock_user_struct(target_md, target_addr, 1);
4197     return 0;
4198 }
4199 
4200 struct target_msginfo {
4201     int msgpool;
4202     int msgmap;
4203     int msgmax;
4204     int msgmnb;
4205     int msgmni;
4206     int msgssz;
4207     int msgtql;
4208     unsigned short int msgseg;
4209 };
4210 
4211 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4212                                               struct msginfo *host_msginfo)
4213 {
4214     struct target_msginfo *target_msginfo;
4215     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4216         return -TARGET_EFAULT;
4217     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4218     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4219     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4220     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4221     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4222     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4223     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4224     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4225     unlock_user_struct(target_msginfo, target_addr, 1);
4226     return 0;
4227 }
4228 
4229 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4230 {
4231     struct msqid_ds dsarg;
4232     struct msginfo msginfo;
4233     abi_long ret = -TARGET_EINVAL;
4234 
4235     cmd &= 0xff;
4236 
4237     switch (cmd) {
4238     case IPC_STAT:
4239     case IPC_SET:
4240     case MSG_STAT:
4241         if (target_to_host_msqid_ds(&dsarg,ptr))
4242             return -TARGET_EFAULT;
4243         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4244         if (host_to_target_msqid_ds(ptr,&dsarg))
4245             return -TARGET_EFAULT;
4246         break;
4247     case IPC_RMID:
4248         ret = get_errno(msgctl(msgid, cmd, NULL));
4249         break;
4250     case IPC_INFO:
4251     case MSG_INFO:
4252         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4253         if (host_to_target_msginfo(ptr, &msginfo))
4254             return -TARGET_EFAULT;
4255         break;
4256     }
4257 
4258     return ret;
4259 }
4260 
4261 struct target_msgbuf {
4262     abi_long mtype;
4263     char	mtext[1];
4264 };
4265 
4266 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4267                                  ssize_t msgsz, int msgflg)
4268 {
4269     struct target_msgbuf *target_mb;
4270     struct msgbuf *host_mb;
4271     abi_long ret = 0;
4272 
4273     if (msgsz < 0) {
4274         return -TARGET_EINVAL;
4275     }
4276 
4277     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4278         return -TARGET_EFAULT;
4279     host_mb = g_try_malloc(msgsz + sizeof(long));
4280     if (!host_mb) {
4281         unlock_user_struct(target_mb, msgp, 0);
4282         return -TARGET_ENOMEM;
4283     }
4284     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4285     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4286     ret = -TARGET_ENOSYS;
4287 #ifdef __NR_msgsnd
4288     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4289 #endif
4290 #ifdef __NR_ipc
4291     if (ret == -TARGET_ENOSYS) {
4292 #ifdef __s390x__
4293         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4294                                  host_mb));
4295 #else
4296         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4297                                  host_mb, 0));
4298 #endif
4299     }
4300 #endif
4301     g_free(host_mb);
4302     unlock_user_struct(target_mb, msgp, 0);
4303 
4304     return ret;
4305 }
4306 
4307 #ifdef __NR_ipc
4308 #if defined(__sparc__)
4309 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4310 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4311 #elif defined(__s390x__)
4312 /* The s390 sys_ipc variant has only five parameters.  */
4313 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4314     ((long int[]){(long int)__msgp, __msgtyp})
4315 #else
4316 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4317     ((long int[]){(long int)__msgp, __msgtyp}), 0
4318 #endif
4319 #endif
4320 
4321 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4322                                  ssize_t msgsz, abi_long msgtyp,
4323                                  int msgflg)
4324 {
4325     struct target_msgbuf *target_mb;
4326     char *target_mtext;
4327     struct msgbuf *host_mb;
4328     abi_long ret = 0;
4329 
4330     if (msgsz < 0) {
4331         return -TARGET_EINVAL;
4332     }
4333 
4334     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4335         return -TARGET_EFAULT;
4336 
4337     host_mb = g_try_malloc(msgsz + sizeof(long));
4338     if (!host_mb) {
4339         ret = -TARGET_ENOMEM;
4340         goto end;
4341     }
4342     ret = -TARGET_ENOSYS;
4343 #ifdef __NR_msgrcv
4344     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4345 #endif
4346 #ifdef __NR_ipc
4347     if (ret == -TARGET_ENOSYS) {
4348         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4349                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4350     }
4351 #endif
4352 
4353     if (ret > 0) {
4354         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4355         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4356         if (!target_mtext) {
4357             ret = -TARGET_EFAULT;
4358             goto end;
4359         }
4360         memcpy(target_mb->mtext, host_mb->mtext, ret);
4361         unlock_user(target_mtext, target_mtext_addr, ret);
4362     }
4363 
4364     target_mb->mtype = tswapal(host_mb->mtype);
4365 
4366 end:
4367     if (target_mb)
4368         unlock_user_struct(target_mb, msgp, 1);
4369     g_free(host_mb);
4370     return ret;
4371 }
4372 
4373 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4374                                                abi_ulong target_addr)
4375 {
4376     struct target_shmid_ds *target_sd;
4377 
4378     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4379         return -TARGET_EFAULT;
4380     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4381         return -TARGET_EFAULT;
4382     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4383     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4384     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4385     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4386     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4387     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4388     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4389     unlock_user_struct(target_sd, target_addr, 0);
4390     return 0;
4391 }
4392 
4393 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4394                                                struct shmid_ds *host_sd)
4395 {
4396     struct target_shmid_ds *target_sd;
4397 
4398     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4399         return -TARGET_EFAULT;
4400     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4401         return -TARGET_EFAULT;
4402     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4403     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4404     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4405     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4406     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4407     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4408     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4409     unlock_user_struct(target_sd, target_addr, 1);
4410     return 0;
4411 }
4412 
4413 struct  target_shminfo {
4414     abi_ulong shmmax;
4415     abi_ulong shmmin;
4416     abi_ulong shmmni;
4417     abi_ulong shmseg;
4418     abi_ulong shmall;
4419 };
4420 
4421 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4422                                               struct shminfo *host_shminfo)
4423 {
4424     struct target_shminfo *target_shminfo;
4425     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4426         return -TARGET_EFAULT;
4427     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4428     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4429     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4430     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4431     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4432     unlock_user_struct(target_shminfo, target_addr, 1);
4433     return 0;
4434 }
4435 
4436 struct target_shm_info {
4437     int used_ids;
4438     abi_ulong shm_tot;
4439     abi_ulong shm_rss;
4440     abi_ulong shm_swp;
4441     abi_ulong swap_attempts;
4442     abi_ulong swap_successes;
4443 };
4444 
4445 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4446                                                struct shm_info *host_shm_info)
4447 {
4448     struct target_shm_info *target_shm_info;
4449     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4450         return -TARGET_EFAULT;
4451     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4452     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4453     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4454     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4455     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4456     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4457     unlock_user_struct(target_shm_info, target_addr, 1);
4458     return 0;
4459 }
4460 
4461 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4462 {
4463     struct shmid_ds dsarg;
4464     struct shminfo shminfo;
4465     struct shm_info shm_info;
4466     abi_long ret = -TARGET_EINVAL;
4467 
4468     cmd &= 0xff;
4469 
4470     switch(cmd) {
4471     case IPC_STAT:
4472     case IPC_SET:
4473     case SHM_STAT:
4474         if (target_to_host_shmid_ds(&dsarg, buf))
4475             return -TARGET_EFAULT;
4476         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4477         if (host_to_target_shmid_ds(buf, &dsarg))
4478             return -TARGET_EFAULT;
4479         break;
4480     case IPC_INFO:
4481         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4482         if (host_to_target_shminfo(buf, &shminfo))
4483             return -TARGET_EFAULT;
4484         break;
4485     case SHM_INFO:
4486         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4487         if (host_to_target_shm_info(buf, &shm_info))
4488             return -TARGET_EFAULT;
4489         break;
4490     case IPC_RMID:
4491     case SHM_LOCK:
4492     case SHM_UNLOCK:
4493         ret = get_errno(shmctl(shmid, cmd, NULL));
4494         break;
4495     }
4496 
4497     return ret;
4498 }
4499 
4500 #ifdef TARGET_NR_ipc
4501 /* ??? This only works with linear mappings.  */
4502 /* do_ipc() must return target values and target errnos. */
4503 static abi_long do_ipc(CPUArchState *cpu_env,
4504                        unsigned int call, abi_long first,
4505                        abi_long second, abi_long third,
4506                        abi_long ptr, abi_long fifth)
4507 {
4508     int version;
4509     abi_long ret = 0;
4510 
4511     version = call >> 16;
4512     call &= 0xffff;
4513 
4514     switch (call) {
4515     case IPCOP_semop:
4516         ret = do_semtimedop(first, ptr, second, 0, false);
4517         break;
4518     case IPCOP_semtimedop:
4519     /*
4520      * The s390 sys_ipc variant has only five parameters instead of six
4521      * (as for default variant) and the only difference is the handling of
4522      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4523      * to a struct timespec where the generic variant uses fifth parameter.
4524      */
4525 #if defined(TARGET_S390X)
4526         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4527 #else
4528         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4529 #endif
4530         break;
4531 
4532     case IPCOP_semget:
4533         ret = get_errno(semget(first, second, third));
4534         break;
4535 
4536     case IPCOP_semctl: {
4537         /* The semun argument to semctl is passed by value, so dereference the
4538          * ptr argument. */
4539         abi_ulong atptr;
4540         get_user_ual(atptr, ptr);
4541         ret = do_semctl(first, second, third, atptr);
4542         break;
4543     }
4544 
4545     case IPCOP_msgget:
4546         ret = get_errno(msgget(first, second));
4547         break;
4548 
4549     case IPCOP_msgsnd:
4550         ret = do_msgsnd(first, ptr, second, third);
4551         break;
4552 
4553     case IPCOP_msgctl:
4554         ret = do_msgctl(first, second, ptr);
4555         break;
4556 
4557     case IPCOP_msgrcv:
4558         switch (version) {
4559         case 0:
4560             {
4561                 struct target_ipc_kludge {
4562                     abi_long msgp;
4563                     abi_long msgtyp;
4564                 } *tmp;
4565 
4566                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4567                     ret = -TARGET_EFAULT;
4568                     break;
4569                 }
4570 
4571                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4572 
4573                 unlock_user_struct(tmp, ptr, 0);
4574                 break;
4575             }
4576         default:
4577             ret = do_msgrcv(first, ptr, second, fifth, third);
4578         }
4579         break;
4580 
4581     case IPCOP_shmat:
4582         switch (version) {
4583         default:
4584         {
4585             abi_ulong raddr;
4586             raddr = target_shmat(cpu_env, first, ptr, second);
4587             if (is_error(raddr))
4588                 return get_errno(raddr);
4589             if (put_user_ual(raddr, third))
4590                 return -TARGET_EFAULT;
4591             break;
4592         }
4593         case 1:
4594             ret = -TARGET_EINVAL;
4595             break;
4596         }
4597 	break;
4598     case IPCOP_shmdt:
4599         ret = target_shmdt(ptr);
4600 	break;
4601 
4602     case IPCOP_shmget:
4603 	/* IPC_* flag values are the same on all linux platforms */
4604 	ret = get_errno(shmget(first, second, third));
4605 	break;
4606 
4607 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4608     case IPCOP_shmctl:
4609         ret = do_shmctl(first, second, ptr);
4610         break;
4611     default:
4612         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4613                       call, version);
4614 	ret = -TARGET_ENOSYS;
4615 	break;
4616     }
4617     return ret;
4618 }
4619 #endif
4620 
4621 /* kernel structure types definitions */
4622 
4623 #define STRUCT(name, ...) STRUCT_ ## name,
4624 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4625 enum {
4626 #include "syscall_types.h"
4627 STRUCT_MAX
4628 };
4629 #undef STRUCT
4630 #undef STRUCT_SPECIAL
4631 
4632 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4633 #define STRUCT_SPECIAL(name)
4634 #include "syscall_types.h"
4635 #undef STRUCT
4636 #undef STRUCT_SPECIAL
4637 
4638 #define MAX_STRUCT_SIZE 4096
4639 
4640 #ifdef CONFIG_FIEMAP
4641 /* So fiemap access checks don't overflow on 32 bit systems.
4642  * This is very slightly smaller than the limit imposed by
4643  * the underlying kernel.
4644  */
4645 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4646                             / sizeof(struct fiemap_extent))
4647 
4648 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4649                                        int fd, int cmd, abi_long arg)
4650 {
4651     /* The parameter for this ioctl is a struct fiemap followed
4652      * by an array of struct fiemap_extent whose size is set
4653      * in fiemap->fm_extent_count. The array is filled in by the
4654      * ioctl.
4655      */
4656     int target_size_in, target_size_out;
4657     struct fiemap *fm;
4658     const argtype *arg_type = ie->arg_type;
4659     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4660     void *argptr, *p;
4661     abi_long ret;
4662     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4663     uint32_t outbufsz;
4664     int free_fm = 0;
4665 
4666     assert(arg_type[0] == TYPE_PTR);
4667     assert(ie->access == IOC_RW);
4668     arg_type++;
4669     target_size_in = thunk_type_size(arg_type, 0);
4670     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4671     if (!argptr) {
4672         return -TARGET_EFAULT;
4673     }
4674     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4675     unlock_user(argptr, arg, 0);
4676     fm = (struct fiemap *)buf_temp;
4677     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4678         return -TARGET_EINVAL;
4679     }
4680 
4681     outbufsz = sizeof (*fm) +
4682         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4683 
4684     if (outbufsz > MAX_STRUCT_SIZE) {
4685         /* We can't fit all the extents into the fixed size buffer.
4686          * Allocate one that is large enough and use it instead.
4687          */
4688         fm = g_try_malloc(outbufsz);
4689         if (!fm) {
4690             return -TARGET_ENOMEM;
4691         }
4692         memcpy(fm, buf_temp, sizeof(struct fiemap));
4693         free_fm = 1;
4694     }
4695     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4696     if (!is_error(ret)) {
4697         target_size_out = target_size_in;
4698         /* An extent_count of 0 means we were only counting the extents
4699          * so there are no structs to copy
4700          */
4701         if (fm->fm_extent_count != 0) {
4702             target_size_out += fm->fm_mapped_extents * extent_size;
4703         }
4704         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4705         if (!argptr) {
4706             ret = -TARGET_EFAULT;
4707         } else {
4708             /* Convert the struct fiemap */
4709             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4710             if (fm->fm_extent_count != 0) {
4711                 p = argptr + target_size_in;
4712                 /* ...and then all the struct fiemap_extents */
4713                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4714                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4715                                   THUNK_TARGET);
4716                     p += extent_size;
4717                 }
4718             }
4719             unlock_user(argptr, arg, target_size_out);
4720         }
4721     }
4722     if (free_fm) {
4723         g_free(fm);
4724     }
4725     return ret;
4726 }
4727 #endif
4728 
4729 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4730                                 int fd, int cmd, abi_long arg)
4731 {
4732     const argtype *arg_type = ie->arg_type;
4733     int target_size;
4734     void *argptr;
4735     int ret;
4736     struct ifconf *host_ifconf;
4737     uint32_t outbufsz;
4738     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4739     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4740     int target_ifreq_size;
4741     int nb_ifreq;
4742     int free_buf = 0;
4743     int i;
4744     int target_ifc_len;
4745     abi_long target_ifc_buf;
4746     int host_ifc_len;
4747     char *host_ifc_buf;
4748 
4749     assert(arg_type[0] == TYPE_PTR);
4750     assert(ie->access == IOC_RW);
4751 
4752     arg_type++;
4753     target_size = thunk_type_size(arg_type, 0);
4754 
4755     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4756     if (!argptr)
4757         return -TARGET_EFAULT;
4758     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4759     unlock_user(argptr, arg, 0);
4760 
4761     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4762     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4763     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4764 
4765     if (target_ifc_buf != 0) {
4766         target_ifc_len = host_ifconf->ifc_len;
4767         nb_ifreq = target_ifc_len / target_ifreq_size;
4768         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4769 
4770         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4771         if (outbufsz > MAX_STRUCT_SIZE) {
4772             /*
4773              * We can't fit all the extents into the fixed size buffer.
4774              * Allocate one that is large enough and use it instead.
4775              */
4776             host_ifconf = g_try_malloc(outbufsz);
4777             if (!host_ifconf) {
4778                 return -TARGET_ENOMEM;
4779             }
4780             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4781             free_buf = 1;
4782         }
4783         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4784 
4785         host_ifconf->ifc_len = host_ifc_len;
4786     } else {
4787       host_ifc_buf = NULL;
4788     }
4789     host_ifconf->ifc_buf = host_ifc_buf;
4790 
4791     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4792     if (!is_error(ret)) {
4793 	/* convert host ifc_len to target ifc_len */
4794 
4795         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4796         target_ifc_len = nb_ifreq * target_ifreq_size;
4797         host_ifconf->ifc_len = target_ifc_len;
4798 
4799 	/* restore target ifc_buf */
4800 
4801         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4802 
4803 	/* copy struct ifconf to target user */
4804 
4805         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4806         if (!argptr)
4807             return -TARGET_EFAULT;
4808         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4809         unlock_user(argptr, arg, target_size);
4810 
4811         if (target_ifc_buf != 0) {
4812             /* copy ifreq[] to target user */
4813             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4814             for (i = 0; i < nb_ifreq ; i++) {
4815                 thunk_convert(argptr + i * target_ifreq_size,
4816                               host_ifc_buf + i * sizeof(struct ifreq),
4817                               ifreq_arg_type, THUNK_TARGET);
4818             }
4819             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4820         }
4821     }
4822 
4823     if (free_buf) {
4824         g_free(host_ifconf);
4825     }
4826 
4827     return ret;
4828 }
4829 
4830 #if defined(CONFIG_USBFS)
4831 #if HOST_LONG_BITS > 64
4832 #error USBDEVFS thunks do not support >64 bit hosts yet.
4833 #endif
4834 struct live_urb {
4835     uint64_t target_urb_adr;
4836     uint64_t target_buf_adr;
4837     char *target_buf_ptr;
4838     struct usbdevfs_urb host_urb;
4839 };
4840 
4841 static GHashTable *usbdevfs_urb_hashtable(void)
4842 {
4843     static GHashTable *urb_hashtable;
4844 
4845     if (!urb_hashtable) {
4846         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4847     }
4848     return urb_hashtable;
4849 }
4850 
4851 static void urb_hashtable_insert(struct live_urb *urb)
4852 {
4853     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4854     g_hash_table_insert(urb_hashtable, urb, urb);
4855 }
4856 
4857 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4858 {
4859     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4860     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4861 }
4862 
4863 static void urb_hashtable_remove(struct live_urb *urb)
4864 {
4865     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4866     g_hash_table_remove(urb_hashtable, urb);
4867 }
4868 
4869 static abi_long
4870 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4871                           int fd, int cmd, abi_long arg)
4872 {
4873     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4874     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4875     struct live_urb *lurb;
4876     void *argptr;
4877     uint64_t hurb;
4878     int target_size;
4879     uintptr_t target_urb_adr;
4880     abi_long ret;
4881 
4882     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4883 
4884     memset(buf_temp, 0, sizeof(uint64_t));
4885     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4886     if (is_error(ret)) {
4887         return ret;
4888     }
4889 
4890     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4891     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4892     if (!lurb->target_urb_adr) {
4893         return -TARGET_EFAULT;
4894     }
4895     urb_hashtable_remove(lurb);
4896     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4897         lurb->host_urb.buffer_length);
4898     lurb->target_buf_ptr = NULL;
4899 
4900     /* restore the guest buffer pointer */
4901     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4902 
4903     /* update the guest urb struct */
4904     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4905     if (!argptr) {
4906         g_free(lurb);
4907         return -TARGET_EFAULT;
4908     }
4909     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4910     unlock_user(argptr, lurb->target_urb_adr, target_size);
4911 
4912     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4913     /* write back the urb handle */
4914     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4915     if (!argptr) {
4916         g_free(lurb);
4917         return -TARGET_EFAULT;
4918     }
4919 
4920     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4921     target_urb_adr = lurb->target_urb_adr;
4922     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4923     unlock_user(argptr, arg, target_size);
4924 
4925     g_free(lurb);
4926     return ret;
4927 }
4928 
4929 static abi_long
4930 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4931                              uint8_t *buf_temp __attribute__((unused)),
4932                              int fd, int cmd, abi_long arg)
4933 {
4934     struct live_urb *lurb;
4935 
4936     /* map target address back to host URB with metadata. */
4937     lurb = urb_hashtable_lookup(arg);
4938     if (!lurb) {
4939         return -TARGET_EFAULT;
4940     }
4941     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4942 }
4943 
4944 static abi_long
4945 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4946                             int fd, int cmd, abi_long arg)
4947 {
4948     const argtype *arg_type = ie->arg_type;
4949     int target_size;
4950     abi_long ret;
4951     void *argptr;
4952     int rw_dir;
4953     struct live_urb *lurb;
4954 
4955     /*
4956      * each submitted URB needs to map to a unique ID for the
4957      * kernel, and that unique ID needs to be a pointer to
4958      * host memory.  hence, we need to malloc for each URB.
4959      * isochronous transfers have a variable length struct.
4960      */
4961     arg_type++;
4962     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4963 
4964     /* construct host copy of urb and metadata */
4965     lurb = g_try_new0(struct live_urb, 1);
4966     if (!lurb) {
4967         return -TARGET_ENOMEM;
4968     }
4969 
4970     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4971     if (!argptr) {
4972         g_free(lurb);
4973         return -TARGET_EFAULT;
4974     }
4975     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
4976     unlock_user(argptr, arg, 0);
4977 
4978     lurb->target_urb_adr = arg;
4979     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
4980 
4981     /* buffer space used depends on endpoint type so lock the entire buffer */
4982     /* control type urbs should check the buffer contents for true direction */
4983     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
4984     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
4985         lurb->host_urb.buffer_length, 1);
4986     if (lurb->target_buf_ptr == NULL) {
4987         g_free(lurb);
4988         return -TARGET_EFAULT;
4989     }
4990 
4991     /* update buffer pointer in host copy */
4992     lurb->host_urb.buffer = lurb->target_buf_ptr;
4993 
4994     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4995     if (is_error(ret)) {
4996         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
4997         g_free(lurb);
4998     } else {
4999         urb_hashtable_insert(lurb);
5000     }
5001 
5002     return ret;
5003 }
5004 #endif /* CONFIG_USBFS */
5005 
5006 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5007                             int cmd, abi_long arg)
5008 {
5009     void *argptr;
5010     struct dm_ioctl *host_dm;
5011     abi_long guest_data;
5012     uint32_t guest_data_size;
5013     int target_size;
5014     const argtype *arg_type = ie->arg_type;
5015     abi_long ret;
5016     void *big_buf = NULL;
5017     char *host_data;
5018 
5019     arg_type++;
5020     target_size = thunk_type_size(arg_type, 0);
5021     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5022     if (!argptr) {
5023         ret = -TARGET_EFAULT;
5024         goto out;
5025     }
5026     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5027     unlock_user(argptr, arg, 0);
5028 
5029     /* buf_temp is too small, so fetch things into a bigger buffer */
5030     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5031     memcpy(big_buf, buf_temp, target_size);
5032     buf_temp = big_buf;
5033     host_dm = big_buf;
5034 
5035     guest_data = arg + host_dm->data_start;
5036     if ((guest_data - arg) < 0) {
5037         ret = -TARGET_EINVAL;
5038         goto out;
5039     }
5040     guest_data_size = host_dm->data_size - host_dm->data_start;
5041     host_data = (char*)host_dm + host_dm->data_start;
5042 
5043     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5044     if (!argptr) {
5045         ret = -TARGET_EFAULT;
5046         goto out;
5047     }
5048 
5049     switch (ie->host_cmd) {
5050     case DM_REMOVE_ALL:
5051     case DM_LIST_DEVICES:
5052     case DM_DEV_CREATE:
5053     case DM_DEV_REMOVE:
5054     case DM_DEV_SUSPEND:
5055     case DM_DEV_STATUS:
5056     case DM_DEV_WAIT:
5057     case DM_TABLE_STATUS:
5058     case DM_TABLE_CLEAR:
5059     case DM_TABLE_DEPS:
5060     case DM_LIST_VERSIONS:
5061         /* no input data */
5062         break;
5063     case DM_DEV_RENAME:
5064     case DM_DEV_SET_GEOMETRY:
5065         /* data contains only strings */
5066         memcpy(host_data, argptr, guest_data_size);
5067         break;
5068     case DM_TARGET_MSG:
5069         memcpy(host_data, argptr, guest_data_size);
5070         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5071         break;
5072     case DM_TABLE_LOAD:
5073     {
5074         void *gspec = argptr;
5075         void *cur_data = host_data;
5076         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5077         int spec_size = thunk_type_size(dm_arg_type, 0);
5078         int i;
5079 
5080         for (i = 0; i < host_dm->target_count; i++) {
5081             struct dm_target_spec *spec = cur_data;
5082             uint32_t next;
5083             int slen;
5084 
5085             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5086             slen = strlen((char*)gspec + spec_size) + 1;
5087             next = spec->next;
5088             spec->next = sizeof(*spec) + slen;
5089             strcpy((char*)&spec[1], gspec + spec_size);
5090             gspec += next;
5091             cur_data += spec->next;
5092         }
5093         break;
5094     }
5095     default:
5096         ret = -TARGET_EINVAL;
5097         unlock_user(argptr, guest_data, 0);
5098         goto out;
5099     }
5100     unlock_user(argptr, guest_data, 0);
5101 
5102     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5103     if (!is_error(ret)) {
5104         guest_data = arg + host_dm->data_start;
5105         guest_data_size = host_dm->data_size - host_dm->data_start;
5106         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5107         switch (ie->host_cmd) {
5108         case DM_REMOVE_ALL:
5109         case DM_DEV_CREATE:
5110         case DM_DEV_REMOVE:
5111         case DM_DEV_RENAME:
5112         case DM_DEV_SUSPEND:
5113         case DM_DEV_STATUS:
5114         case DM_TABLE_LOAD:
5115         case DM_TABLE_CLEAR:
5116         case DM_TARGET_MSG:
5117         case DM_DEV_SET_GEOMETRY:
5118             /* no return data */
5119             break;
5120         case DM_LIST_DEVICES:
5121         {
5122             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5123             uint32_t remaining_data = guest_data_size;
5124             void *cur_data = argptr;
5125             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5126             int nl_size = 12; /* can't use thunk_size due to alignment */
5127 
5128             while (1) {
5129                 uint32_t next = nl->next;
5130                 if (next) {
5131                     nl->next = nl_size + (strlen(nl->name) + 1);
5132                 }
5133                 if (remaining_data < nl->next) {
5134                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5135                     break;
5136                 }
5137                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5138                 strcpy(cur_data + nl_size, nl->name);
5139                 cur_data += nl->next;
5140                 remaining_data -= nl->next;
5141                 if (!next) {
5142                     break;
5143                 }
5144                 nl = (void*)nl + next;
5145             }
5146             break;
5147         }
5148         case DM_DEV_WAIT:
5149         case DM_TABLE_STATUS:
5150         {
5151             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5152             void *cur_data = argptr;
5153             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5154             int spec_size = thunk_type_size(dm_arg_type, 0);
5155             int i;
5156 
5157             for (i = 0; i < host_dm->target_count; i++) {
5158                 uint32_t next = spec->next;
5159                 int slen = strlen((char*)&spec[1]) + 1;
5160                 spec->next = (cur_data - argptr) + spec_size + slen;
5161                 if (guest_data_size < spec->next) {
5162                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5163                     break;
5164                 }
5165                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5166                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5167                 cur_data = argptr + spec->next;
5168                 spec = (void*)host_dm + host_dm->data_start + next;
5169             }
5170             break;
5171         }
5172         case DM_TABLE_DEPS:
5173         {
5174             void *hdata = (void*)host_dm + host_dm->data_start;
5175             int count = *(uint32_t*)hdata;
5176             uint64_t *hdev = hdata + 8;
5177             uint64_t *gdev = argptr + 8;
5178             int i;
5179 
5180             *(uint32_t*)argptr = tswap32(count);
5181             for (i = 0; i < count; i++) {
5182                 *gdev = tswap64(*hdev);
5183                 gdev++;
5184                 hdev++;
5185             }
5186             break;
5187         }
5188         case DM_LIST_VERSIONS:
5189         {
5190             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5191             uint32_t remaining_data = guest_data_size;
5192             void *cur_data = argptr;
5193             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5194             int vers_size = thunk_type_size(dm_arg_type, 0);
5195 
5196             while (1) {
5197                 uint32_t next = vers->next;
5198                 if (next) {
5199                     vers->next = vers_size + (strlen(vers->name) + 1);
5200                 }
5201                 if (remaining_data < vers->next) {
5202                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5203                     break;
5204                 }
5205                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5206                 strcpy(cur_data + vers_size, vers->name);
5207                 cur_data += vers->next;
5208                 remaining_data -= vers->next;
5209                 if (!next) {
5210                     break;
5211                 }
5212                 vers = (void*)vers + next;
5213             }
5214             break;
5215         }
5216         default:
5217             unlock_user(argptr, guest_data, 0);
5218             ret = -TARGET_EINVAL;
5219             goto out;
5220         }
5221         unlock_user(argptr, guest_data, guest_data_size);
5222 
5223         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5224         if (!argptr) {
5225             ret = -TARGET_EFAULT;
5226             goto out;
5227         }
5228         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5229         unlock_user(argptr, arg, target_size);
5230     }
5231 out:
5232     g_free(big_buf);
5233     return ret;
5234 }
5235 
5236 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5237                                int cmd, abi_long arg)
5238 {
5239     void *argptr;
5240     int target_size;
5241     const argtype *arg_type = ie->arg_type;
5242     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5243     abi_long ret;
5244 
5245     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5246     struct blkpg_partition host_part;
5247 
5248     /* Read and convert blkpg */
5249     arg_type++;
5250     target_size = thunk_type_size(arg_type, 0);
5251     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5252     if (!argptr) {
5253         ret = -TARGET_EFAULT;
5254         goto out;
5255     }
5256     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5257     unlock_user(argptr, arg, 0);
5258 
5259     switch (host_blkpg->op) {
5260     case BLKPG_ADD_PARTITION:
5261     case BLKPG_DEL_PARTITION:
5262         /* payload is struct blkpg_partition */
5263         break;
5264     default:
5265         /* Unknown opcode */
5266         ret = -TARGET_EINVAL;
5267         goto out;
5268     }
5269 
5270     /* Read and convert blkpg->data */
5271     arg = (abi_long)(uintptr_t)host_blkpg->data;
5272     target_size = thunk_type_size(part_arg_type, 0);
5273     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5274     if (!argptr) {
5275         ret = -TARGET_EFAULT;
5276         goto out;
5277     }
5278     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5279     unlock_user(argptr, arg, 0);
5280 
5281     /* Swizzle the data pointer to our local copy and call! */
5282     host_blkpg->data = &host_part;
5283     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5284 
5285 out:
5286     return ret;
5287 }
5288 
5289 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5290                                 int fd, int cmd, abi_long arg)
5291 {
5292     const argtype *arg_type = ie->arg_type;
5293     const StructEntry *se;
5294     const argtype *field_types;
5295     const int *dst_offsets, *src_offsets;
5296     int target_size;
5297     void *argptr;
5298     abi_ulong *target_rt_dev_ptr = NULL;
5299     unsigned long *host_rt_dev_ptr = NULL;
5300     abi_long ret;
5301     int i;
5302 
5303     assert(ie->access == IOC_W);
5304     assert(*arg_type == TYPE_PTR);
5305     arg_type++;
5306     assert(*arg_type == TYPE_STRUCT);
5307     target_size = thunk_type_size(arg_type, 0);
5308     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5309     if (!argptr) {
5310         return -TARGET_EFAULT;
5311     }
5312     arg_type++;
5313     assert(*arg_type == (int)STRUCT_rtentry);
5314     se = struct_entries + *arg_type++;
5315     assert(se->convert[0] == NULL);
5316     /* convert struct here to be able to catch rt_dev string */
5317     field_types = se->field_types;
5318     dst_offsets = se->field_offsets[THUNK_HOST];
5319     src_offsets = se->field_offsets[THUNK_TARGET];
5320     for (i = 0; i < se->nb_fields; i++) {
5321         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5322             assert(*field_types == TYPE_PTRVOID);
5323             target_rt_dev_ptr = argptr + src_offsets[i];
5324             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5325             if (*target_rt_dev_ptr != 0) {
5326                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5327                                                   tswapal(*target_rt_dev_ptr));
5328                 if (!*host_rt_dev_ptr) {
5329                     unlock_user(argptr, arg, 0);
5330                     return -TARGET_EFAULT;
5331                 }
5332             } else {
5333                 *host_rt_dev_ptr = 0;
5334             }
5335             field_types++;
5336             continue;
5337         }
5338         field_types = thunk_convert(buf_temp + dst_offsets[i],
5339                                     argptr + src_offsets[i],
5340                                     field_types, THUNK_HOST);
5341     }
5342     unlock_user(argptr, arg, 0);
5343 
5344     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5345 
5346     assert(host_rt_dev_ptr != NULL);
5347     assert(target_rt_dev_ptr != NULL);
5348     if (*host_rt_dev_ptr != 0) {
5349         unlock_user((void *)*host_rt_dev_ptr,
5350                     *target_rt_dev_ptr, 0);
5351     }
5352     return ret;
5353 }
5354 
5355 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5356                                      int fd, int cmd, abi_long arg)
5357 {
5358     int sig = target_to_host_signal(arg);
5359     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5360 }
5361 
5362 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5363                                     int fd, int cmd, abi_long arg)
5364 {
5365     struct timeval tv;
5366     abi_long ret;
5367 
5368     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5369     if (is_error(ret)) {
5370         return ret;
5371     }
5372 
5373     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5374         if (copy_to_user_timeval(arg, &tv)) {
5375             return -TARGET_EFAULT;
5376         }
5377     } else {
5378         if (copy_to_user_timeval64(arg, &tv)) {
5379             return -TARGET_EFAULT;
5380         }
5381     }
5382 
5383     return ret;
5384 }
5385 
5386 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5387                                       int fd, int cmd, abi_long arg)
5388 {
5389     struct timespec ts;
5390     abi_long ret;
5391 
5392     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5393     if (is_error(ret)) {
5394         return ret;
5395     }
5396 
5397     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5398         if (host_to_target_timespec(arg, &ts)) {
5399             return -TARGET_EFAULT;
5400         }
5401     } else{
5402         if (host_to_target_timespec64(arg, &ts)) {
5403             return -TARGET_EFAULT;
5404         }
5405     }
5406 
5407     return ret;
5408 }
5409 
5410 #ifdef TIOCGPTPEER
5411 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5412                                      int fd, int cmd, abi_long arg)
5413 {
5414     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5415     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5416 }
5417 #endif
5418 
5419 #ifdef HAVE_DRM_H
5420 
5421 static void unlock_drm_version(struct drm_version *host_ver,
5422                                struct target_drm_version *target_ver,
5423                                bool copy)
5424 {
5425     unlock_user(host_ver->name, target_ver->name,
5426                                 copy ? host_ver->name_len : 0);
5427     unlock_user(host_ver->date, target_ver->date,
5428                                 copy ? host_ver->date_len : 0);
5429     unlock_user(host_ver->desc, target_ver->desc,
5430                                 copy ? host_ver->desc_len : 0);
5431 }
5432 
5433 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5434                                           struct target_drm_version *target_ver)
5435 {
5436     memset(host_ver, 0, sizeof(*host_ver));
5437 
5438     __get_user(host_ver->name_len, &target_ver->name_len);
5439     if (host_ver->name_len) {
5440         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5441                                    target_ver->name_len, 0);
5442         if (!host_ver->name) {
5443             return -EFAULT;
5444         }
5445     }
5446 
5447     __get_user(host_ver->date_len, &target_ver->date_len);
5448     if (host_ver->date_len) {
5449         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5450                                    target_ver->date_len, 0);
5451         if (!host_ver->date) {
5452             goto err;
5453         }
5454     }
5455 
5456     __get_user(host_ver->desc_len, &target_ver->desc_len);
5457     if (host_ver->desc_len) {
5458         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5459                                    target_ver->desc_len, 0);
5460         if (!host_ver->desc) {
5461             goto err;
5462         }
5463     }
5464 
5465     return 0;
5466 err:
5467     unlock_drm_version(host_ver, target_ver, false);
5468     return -EFAULT;
5469 }
5470 
5471 static inline void host_to_target_drmversion(
5472                                           struct target_drm_version *target_ver,
5473                                           struct drm_version *host_ver)
5474 {
5475     __put_user(host_ver->version_major, &target_ver->version_major);
5476     __put_user(host_ver->version_minor, &target_ver->version_minor);
5477     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5478     __put_user(host_ver->name_len, &target_ver->name_len);
5479     __put_user(host_ver->date_len, &target_ver->date_len);
5480     __put_user(host_ver->desc_len, &target_ver->desc_len);
5481     unlock_drm_version(host_ver, target_ver, true);
5482 }
5483 
5484 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5485                              int fd, int cmd, abi_long arg)
5486 {
5487     struct drm_version *ver;
5488     struct target_drm_version *target_ver;
5489     abi_long ret;
5490 
5491     switch (ie->host_cmd) {
5492     case DRM_IOCTL_VERSION:
5493         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5494             return -TARGET_EFAULT;
5495         }
5496         ver = (struct drm_version *)buf_temp;
5497         ret = target_to_host_drmversion(ver, target_ver);
5498         if (!is_error(ret)) {
5499             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5500             if (is_error(ret)) {
5501                 unlock_drm_version(ver, target_ver, false);
5502             } else {
5503                 host_to_target_drmversion(target_ver, ver);
5504             }
5505         }
5506         unlock_user_struct(target_ver, arg, 0);
5507         return ret;
5508     }
5509     return -TARGET_ENOSYS;
5510 }
5511 
5512 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5513                                            struct drm_i915_getparam *gparam,
5514                                            int fd, abi_long arg)
5515 {
5516     abi_long ret;
5517     int value;
5518     struct target_drm_i915_getparam *target_gparam;
5519 
5520     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5521         return -TARGET_EFAULT;
5522     }
5523 
5524     __get_user(gparam->param, &target_gparam->param);
5525     gparam->value = &value;
5526     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5527     put_user_s32(value, target_gparam->value);
5528 
5529     unlock_user_struct(target_gparam, arg, 0);
5530     return ret;
5531 }
5532 
5533 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5534                                   int fd, int cmd, abi_long arg)
5535 {
5536     switch (ie->host_cmd) {
5537     case DRM_IOCTL_I915_GETPARAM:
5538         return do_ioctl_drm_i915_getparam(ie,
5539                                           (struct drm_i915_getparam *)buf_temp,
5540                                           fd, arg);
5541     default:
5542         return -TARGET_ENOSYS;
5543     }
5544 }
5545 
5546 #endif
5547 
5548 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5549                                         int fd, int cmd, abi_long arg)
5550 {
5551     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5552     struct tun_filter *target_filter;
5553     char *target_addr;
5554 
5555     assert(ie->access == IOC_W);
5556 
5557     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5558     if (!target_filter) {
5559         return -TARGET_EFAULT;
5560     }
5561     filter->flags = tswap16(target_filter->flags);
5562     filter->count = tswap16(target_filter->count);
5563     unlock_user(target_filter, arg, 0);
5564 
5565     if (filter->count) {
5566         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5567             MAX_STRUCT_SIZE) {
5568             return -TARGET_EFAULT;
5569         }
5570 
5571         target_addr = lock_user(VERIFY_READ,
5572                                 arg + offsetof(struct tun_filter, addr),
5573                                 filter->count * ETH_ALEN, 1);
5574         if (!target_addr) {
5575             return -TARGET_EFAULT;
5576         }
5577         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5578         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5579     }
5580 
5581     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5582 }
5583 
5584 IOCTLEntry ioctl_entries[] = {
5585 #define IOCTL(cmd, access, ...) \
5586     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5587 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5588     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5589 #define IOCTL_IGNORE(cmd) \
5590     { TARGET_ ## cmd, 0, #cmd },
5591 #include "ioctls.h"
5592     { 0, 0, },
5593 };
5594 
5595 /* ??? Implement proper locking for ioctls.  */
5596 /* do_ioctl() Must return target values and target errnos. */
5597 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5598 {
5599     const IOCTLEntry *ie;
5600     const argtype *arg_type;
5601     abi_long ret;
5602     uint8_t buf_temp[MAX_STRUCT_SIZE];
5603     int target_size;
5604     void *argptr;
5605 
5606     ie = ioctl_entries;
5607     for(;;) {
5608         if (ie->target_cmd == 0) {
5609             qemu_log_mask(
5610                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5611             return -TARGET_ENOTTY;
5612         }
5613         if (ie->target_cmd == cmd)
5614             break;
5615         ie++;
5616     }
5617     arg_type = ie->arg_type;
5618     if (ie->do_ioctl) {
5619         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5620     } else if (!ie->host_cmd) {
5621         /* Some architectures define BSD ioctls in their headers
5622            that are not implemented in Linux.  */
5623         return -TARGET_ENOTTY;
5624     }
5625 
5626     switch(arg_type[0]) {
5627     case TYPE_NULL:
5628         /* no argument */
5629         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5630         break;
5631     case TYPE_PTRVOID:
5632     case TYPE_INT:
5633     case TYPE_LONG:
5634     case TYPE_ULONG:
5635         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5636         break;
5637     case TYPE_PTR:
5638         arg_type++;
5639         target_size = thunk_type_size(arg_type, 0);
5640         switch(ie->access) {
5641         case IOC_R:
5642             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5643             if (!is_error(ret)) {
5644                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5645                 if (!argptr)
5646                     return -TARGET_EFAULT;
5647                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5648                 unlock_user(argptr, arg, target_size);
5649             }
5650             break;
5651         case IOC_W:
5652             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5653             if (!argptr)
5654                 return -TARGET_EFAULT;
5655             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5656             unlock_user(argptr, arg, 0);
5657             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5658             break;
5659         default:
5660         case IOC_RW:
5661             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5662             if (!argptr)
5663                 return -TARGET_EFAULT;
5664             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5665             unlock_user(argptr, arg, 0);
5666             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5667             if (!is_error(ret)) {
5668                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5669                 if (!argptr)
5670                     return -TARGET_EFAULT;
5671                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5672                 unlock_user(argptr, arg, target_size);
5673             }
5674             break;
5675         }
5676         break;
5677     default:
5678         qemu_log_mask(LOG_UNIMP,
5679                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5680                       (long)cmd, arg_type[0]);
5681         ret = -TARGET_ENOTTY;
5682         break;
5683     }
5684     return ret;
5685 }
5686 
5687 static const bitmask_transtbl iflag_tbl[] = {
5688         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5689         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5690         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5691         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5692         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5693         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5694         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5695         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5696         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5697         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5698         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5699         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5700         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5701         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5702         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5703 };
5704 
5705 static const bitmask_transtbl oflag_tbl[] = {
5706 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5707 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5708 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5709 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5710 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5711 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5712 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5713 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5714 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5715 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5716 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5717 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5718 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5719 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5720 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5721 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5722 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5723 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5724 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5725 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5726 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5727 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5728 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5729 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5730 };
5731 
5732 static const bitmask_transtbl cflag_tbl[] = {
5733 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5734 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5735 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5736 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5737 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5738 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5739 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5740 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5741 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5742 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5743 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5744 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5745 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5746 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5747 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5748 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5749 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5750 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5751 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5752 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5753 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5754 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5755 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5756 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5757 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5758 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5759 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5760 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5761 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5762 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5763 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5764 };
5765 
5766 static const bitmask_transtbl lflag_tbl[] = {
5767   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5768   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5769   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5770   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5771   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5772   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5773   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5774   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5775   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5776   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5777   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5778   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5779   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5780   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5781   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5782   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5783 };
5784 
5785 static void target_to_host_termios (void *dst, const void *src)
5786 {
5787     struct host_termios *host = dst;
5788     const struct target_termios *target = src;
5789 
5790     host->c_iflag =
5791         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5792     host->c_oflag =
5793         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5794     host->c_cflag =
5795         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5796     host->c_lflag =
5797         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5798     host->c_line = target->c_line;
5799 
5800     memset(host->c_cc, 0, sizeof(host->c_cc));
5801     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5802     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5803     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5804     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5805     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5806     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5807     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5808     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5809     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5810     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5811     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5812     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5813     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5814     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5815     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5816     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5817     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5818 }
5819 
5820 static void host_to_target_termios (void *dst, const void *src)
5821 {
5822     struct target_termios *target = dst;
5823     const struct host_termios *host = src;
5824 
5825     target->c_iflag =
5826         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5827     target->c_oflag =
5828         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5829     target->c_cflag =
5830         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5831     target->c_lflag =
5832         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5833     target->c_line = host->c_line;
5834 
5835     memset(target->c_cc, 0, sizeof(target->c_cc));
5836     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5837     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5838     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5839     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5840     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5841     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5842     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5843     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5844     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5845     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5846     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5847     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5848     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5849     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5850     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5851     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5852     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5853 }
5854 
5855 static const StructEntry struct_termios_def = {
5856     .convert = { host_to_target_termios, target_to_host_termios },
5857     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5858     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5859     .print = print_termios,
5860 };
5861 
5862 /* If the host does not provide these bits, they may be safely discarded. */
5863 #ifndef MAP_SYNC
5864 #define MAP_SYNC 0
5865 #endif
5866 #ifndef MAP_UNINITIALIZED
5867 #define MAP_UNINITIALIZED 0
5868 #endif
5869 
5870 static const bitmask_transtbl mmap_flags_tbl[] = {
5871     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5872     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5873       MAP_ANONYMOUS, MAP_ANONYMOUS },
5874     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5875       MAP_GROWSDOWN, MAP_GROWSDOWN },
5876     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5877       MAP_DENYWRITE, MAP_DENYWRITE },
5878     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5879       MAP_EXECUTABLE, MAP_EXECUTABLE },
5880     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5881     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5882       MAP_NORESERVE, MAP_NORESERVE },
5883     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5884     /* MAP_STACK had been ignored by the kernel for quite some time.
5885        Recognize it for the target insofar as we do not want to pass
5886        it through to the host.  */
5887     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5888     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5889     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5890     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5891       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5892     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5893       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5894 };
5895 
5896 /*
5897  * Arrange for legacy / undefined architecture specific flags to be
5898  * ignored by mmap handling code.
5899  */
5900 #ifndef TARGET_MAP_32BIT
5901 #define TARGET_MAP_32BIT 0
5902 #endif
5903 #ifndef TARGET_MAP_HUGE_2MB
5904 #define TARGET_MAP_HUGE_2MB 0
5905 #endif
5906 #ifndef TARGET_MAP_HUGE_1GB
5907 #define TARGET_MAP_HUGE_1GB 0
5908 #endif
5909 
5910 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5911                         int target_flags, int fd, off_t offset)
5912 {
5913     /*
5914      * The historical set of flags that all mmap types implicitly support.
5915      */
5916     enum {
5917         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5918                                | TARGET_MAP_PRIVATE
5919                                | TARGET_MAP_FIXED
5920                                | TARGET_MAP_ANONYMOUS
5921                                | TARGET_MAP_DENYWRITE
5922                                | TARGET_MAP_EXECUTABLE
5923                                | TARGET_MAP_UNINITIALIZED
5924                                | TARGET_MAP_GROWSDOWN
5925                                | TARGET_MAP_LOCKED
5926                                | TARGET_MAP_NORESERVE
5927                                | TARGET_MAP_POPULATE
5928                                | TARGET_MAP_NONBLOCK
5929                                | TARGET_MAP_STACK
5930                                | TARGET_MAP_HUGETLB
5931                                | TARGET_MAP_32BIT
5932                                | TARGET_MAP_HUGE_2MB
5933                                | TARGET_MAP_HUGE_1GB
5934     };
5935     int host_flags;
5936 
5937     switch (target_flags & TARGET_MAP_TYPE) {
5938     case TARGET_MAP_PRIVATE:
5939         host_flags = MAP_PRIVATE;
5940         break;
5941     case TARGET_MAP_SHARED:
5942         host_flags = MAP_SHARED;
5943         break;
5944     case TARGET_MAP_SHARED_VALIDATE:
5945         /*
5946          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5947          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5948          */
5949         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5950             return -TARGET_EOPNOTSUPP;
5951         }
5952         host_flags = MAP_SHARED_VALIDATE;
5953         if (target_flags & TARGET_MAP_SYNC) {
5954             host_flags |= MAP_SYNC;
5955         }
5956         break;
5957     default:
5958         return -TARGET_EINVAL;
5959     }
5960     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5961 
5962     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5963 }
5964 
5965 /*
5966  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5967  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5968  */
5969 #if defined(TARGET_I386)
5970 
5971 /* NOTE: there is really one LDT for all the threads */
5972 static uint8_t *ldt_table;
5973 
5974 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
5975 {
5976     int size;
5977     void *p;
5978 
5979     if (!ldt_table)
5980         return 0;
5981     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
5982     if (size > bytecount)
5983         size = bytecount;
5984     p = lock_user(VERIFY_WRITE, ptr, size, 0);
5985     if (!p)
5986         return -TARGET_EFAULT;
5987     /* ??? Should this by byteswapped?  */
5988     memcpy(p, ldt_table, size);
5989     unlock_user(p, ptr, size);
5990     return size;
5991 }
5992 
5993 /* XXX: add locking support */
5994 static abi_long write_ldt(CPUX86State *env,
5995                           abi_ulong ptr, unsigned long bytecount, int oldmode)
5996 {
5997     struct target_modify_ldt_ldt_s ldt_info;
5998     struct target_modify_ldt_ldt_s *target_ldt_info;
5999     int seg_32bit, contents, read_exec_only, limit_in_pages;
6000     int seg_not_present, useable, lm;
6001     uint32_t *lp, entry_1, entry_2;
6002 
6003     if (bytecount != sizeof(ldt_info))
6004         return -TARGET_EINVAL;
6005     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6006         return -TARGET_EFAULT;
6007     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6008     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6009     ldt_info.limit = tswap32(target_ldt_info->limit);
6010     ldt_info.flags = tswap32(target_ldt_info->flags);
6011     unlock_user_struct(target_ldt_info, ptr, 0);
6012 
6013     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6014         return -TARGET_EINVAL;
6015     seg_32bit = ldt_info.flags & 1;
6016     contents = (ldt_info.flags >> 1) & 3;
6017     read_exec_only = (ldt_info.flags >> 3) & 1;
6018     limit_in_pages = (ldt_info.flags >> 4) & 1;
6019     seg_not_present = (ldt_info.flags >> 5) & 1;
6020     useable = (ldt_info.flags >> 6) & 1;
6021 #ifdef TARGET_ABI32
6022     lm = 0;
6023 #else
6024     lm = (ldt_info.flags >> 7) & 1;
6025 #endif
6026     if (contents == 3) {
6027         if (oldmode)
6028             return -TARGET_EINVAL;
6029         if (seg_not_present == 0)
6030             return -TARGET_EINVAL;
6031     }
6032     /* allocate the LDT */
6033     if (!ldt_table) {
6034         env->ldt.base = target_mmap(0,
6035                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6036                                     PROT_READ|PROT_WRITE,
6037                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6038         if (env->ldt.base == -1)
6039             return -TARGET_ENOMEM;
6040         memset(g2h_untagged(env->ldt.base), 0,
6041                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6042         env->ldt.limit = 0xffff;
6043         ldt_table = g2h_untagged(env->ldt.base);
6044     }
6045 
6046     /* NOTE: same code as Linux kernel */
6047     /* Allow LDTs to be cleared by the user. */
6048     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6049         if (oldmode ||
6050             (contents == 0		&&
6051              read_exec_only == 1	&&
6052              seg_32bit == 0		&&
6053              limit_in_pages == 0	&&
6054              seg_not_present == 1	&&
6055              useable == 0 )) {
6056             entry_1 = 0;
6057             entry_2 = 0;
6058             goto install;
6059         }
6060     }
6061 
6062     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6063         (ldt_info.limit & 0x0ffff);
6064     entry_2 = (ldt_info.base_addr & 0xff000000) |
6065         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6066         (ldt_info.limit & 0xf0000) |
6067         ((read_exec_only ^ 1) << 9) |
6068         (contents << 10) |
6069         ((seg_not_present ^ 1) << 15) |
6070         (seg_32bit << 22) |
6071         (limit_in_pages << 23) |
6072         (lm << 21) |
6073         0x7000;
6074     if (!oldmode)
6075         entry_2 |= (useable << 20);
6076 
6077     /* Install the new entry ...  */
6078 install:
6079     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6080     lp[0] = tswap32(entry_1);
6081     lp[1] = tswap32(entry_2);
6082     return 0;
6083 }
6084 
6085 /* specific and weird i386 syscalls */
6086 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6087                               unsigned long bytecount)
6088 {
6089     abi_long ret;
6090 
6091     switch (func) {
6092     case 0:
6093         ret = read_ldt(ptr, bytecount);
6094         break;
6095     case 1:
6096         ret = write_ldt(env, ptr, bytecount, 1);
6097         break;
6098     case 0x11:
6099         ret = write_ldt(env, ptr, bytecount, 0);
6100         break;
6101     default:
6102         ret = -TARGET_ENOSYS;
6103         break;
6104     }
6105     return ret;
6106 }
6107 
6108 #if defined(TARGET_ABI32)
6109 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6110 {
6111     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6112     struct target_modify_ldt_ldt_s ldt_info;
6113     struct target_modify_ldt_ldt_s *target_ldt_info;
6114     int seg_32bit, contents, read_exec_only, limit_in_pages;
6115     int seg_not_present, useable, lm;
6116     uint32_t *lp, entry_1, entry_2;
6117     int i;
6118 
6119     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6120     if (!target_ldt_info)
6121         return -TARGET_EFAULT;
6122     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6123     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6124     ldt_info.limit = tswap32(target_ldt_info->limit);
6125     ldt_info.flags = tswap32(target_ldt_info->flags);
6126     if (ldt_info.entry_number == -1) {
6127         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6128             if (gdt_table[i] == 0) {
6129                 ldt_info.entry_number = i;
6130                 target_ldt_info->entry_number = tswap32(i);
6131                 break;
6132             }
6133         }
6134     }
6135     unlock_user_struct(target_ldt_info, ptr, 1);
6136 
6137     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6138         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6139            return -TARGET_EINVAL;
6140     seg_32bit = ldt_info.flags & 1;
6141     contents = (ldt_info.flags >> 1) & 3;
6142     read_exec_only = (ldt_info.flags >> 3) & 1;
6143     limit_in_pages = (ldt_info.flags >> 4) & 1;
6144     seg_not_present = (ldt_info.flags >> 5) & 1;
6145     useable = (ldt_info.flags >> 6) & 1;
6146 #ifdef TARGET_ABI32
6147     lm = 0;
6148 #else
6149     lm = (ldt_info.flags >> 7) & 1;
6150 #endif
6151 
6152     if (contents == 3) {
6153         if (seg_not_present == 0)
6154             return -TARGET_EINVAL;
6155     }
6156 
6157     /* NOTE: same code as Linux kernel */
6158     /* Allow LDTs to be cleared by the user. */
6159     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6160         if ((contents == 0             &&
6161              read_exec_only == 1       &&
6162              seg_32bit == 0            &&
6163              limit_in_pages == 0       &&
6164              seg_not_present == 1      &&
6165              useable == 0 )) {
6166             entry_1 = 0;
6167             entry_2 = 0;
6168             goto install;
6169         }
6170     }
6171 
6172     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6173         (ldt_info.limit & 0x0ffff);
6174     entry_2 = (ldt_info.base_addr & 0xff000000) |
6175         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6176         (ldt_info.limit & 0xf0000) |
6177         ((read_exec_only ^ 1) << 9) |
6178         (contents << 10) |
6179         ((seg_not_present ^ 1) << 15) |
6180         (seg_32bit << 22) |
6181         (limit_in_pages << 23) |
6182         (useable << 20) |
6183         (lm << 21) |
6184         0x7000;
6185 
6186     /* Install the new entry ...  */
6187 install:
6188     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6189     lp[0] = tswap32(entry_1);
6190     lp[1] = tswap32(entry_2);
6191     return 0;
6192 }
6193 
6194 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6195 {
6196     struct target_modify_ldt_ldt_s *target_ldt_info;
6197     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6198     uint32_t base_addr, limit, flags;
6199     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6200     int seg_not_present, useable, lm;
6201     uint32_t *lp, entry_1, entry_2;
6202 
6203     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6204     if (!target_ldt_info)
6205         return -TARGET_EFAULT;
6206     idx = tswap32(target_ldt_info->entry_number);
6207     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6208         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6209         unlock_user_struct(target_ldt_info, ptr, 1);
6210         return -TARGET_EINVAL;
6211     }
6212     lp = (uint32_t *)(gdt_table + idx);
6213     entry_1 = tswap32(lp[0]);
6214     entry_2 = tswap32(lp[1]);
6215 
6216     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6217     contents = (entry_2 >> 10) & 3;
6218     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6219     seg_32bit = (entry_2 >> 22) & 1;
6220     limit_in_pages = (entry_2 >> 23) & 1;
6221     useable = (entry_2 >> 20) & 1;
6222 #ifdef TARGET_ABI32
6223     lm = 0;
6224 #else
6225     lm = (entry_2 >> 21) & 1;
6226 #endif
6227     flags = (seg_32bit << 0) | (contents << 1) |
6228         (read_exec_only << 3) | (limit_in_pages << 4) |
6229         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6230     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6231     base_addr = (entry_1 >> 16) |
6232         (entry_2 & 0xff000000) |
6233         ((entry_2 & 0xff) << 16);
6234     target_ldt_info->base_addr = tswapal(base_addr);
6235     target_ldt_info->limit = tswap32(limit);
6236     target_ldt_info->flags = tswap32(flags);
6237     unlock_user_struct(target_ldt_info, ptr, 1);
6238     return 0;
6239 }
6240 
6241 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6242 {
6243     return -TARGET_ENOSYS;
6244 }
6245 #else
6246 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6247 {
6248     abi_long ret = 0;
6249     abi_ulong val;
6250     int idx;
6251 
6252     switch(code) {
6253     case TARGET_ARCH_SET_GS:
6254     case TARGET_ARCH_SET_FS:
6255         if (code == TARGET_ARCH_SET_GS)
6256             idx = R_GS;
6257         else
6258             idx = R_FS;
6259         cpu_x86_load_seg(env, idx, 0);
6260         env->segs[idx].base = addr;
6261         break;
6262     case TARGET_ARCH_GET_GS:
6263     case TARGET_ARCH_GET_FS:
6264         if (code == TARGET_ARCH_GET_GS)
6265             idx = R_GS;
6266         else
6267             idx = R_FS;
6268         val = env->segs[idx].base;
6269         if (put_user(val, addr, abi_ulong))
6270             ret = -TARGET_EFAULT;
6271         break;
6272     default:
6273         ret = -TARGET_EINVAL;
6274         break;
6275     }
6276     return ret;
6277 }
6278 #endif /* defined(TARGET_ABI32 */
6279 #endif /* defined(TARGET_I386) */
6280 
6281 /*
6282  * These constants are generic.  Supply any that are missing from the host.
6283  */
6284 #ifndef PR_SET_NAME
6285 # define PR_SET_NAME    15
6286 # define PR_GET_NAME    16
6287 #endif
6288 #ifndef PR_SET_FP_MODE
6289 # define PR_SET_FP_MODE 45
6290 # define PR_GET_FP_MODE 46
6291 # define PR_FP_MODE_FR   (1 << 0)
6292 # define PR_FP_MODE_FRE  (1 << 1)
6293 #endif
6294 #ifndef PR_SVE_SET_VL
6295 # define PR_SVE_SET_VL  50
6296 # define PR_SVE_GET_VL  51
6297 # define PR_SVE_VL_LEN_MASK  0xffff
6298 # define PR_SVE_VL_INHERIT   (1 << 17)
6299 #endif
6300 #ifndef PR_PAC_RESET_KEYS
6301 # define PR_PAC_RESET_KEYS  54
6302 # define PR_PAC_APIAKEY   (1 << 0)
6303 # define PR_PAC_APIBKEY   (1 << 1)
6304 # define PR_PAC_APDAKEY   (1 << 2)
6305 # define PR_PAC_APDBKEY   (1 << 3)
6306 # define PR_PAC_APGAKEY   (1 << 4)
6307 #endif
6308 #ifndef PR_SET_TAGGED_ADDR_CTRL
6309 # define PR_SET_TAGGED_ADDR_CTRL 55
6310 # define PR_GET_TAGGED_ADDR_CTRL 56
6311 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6312 #endif
6313 #ifndef PR_SET_IO_FLUSHER
6314 # define PR_SET_IO_FLUSHER 57
6315 # define PR_GET_IO_FLUSHER 58
6316 #endif
6317 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6318 # define PR_SET_SYSCALL_USER_DISPATCH 59
6319 #endif
6320 #ifndef PR_SME_SET_VL
6321 # define PR_SME_SET_VL  63
6322 # define PR_SME_GET_VL  64
6323 # define PR_SME_VL_LEN_MASK  0xffff
6324 # define PR_SME_VL_INHERIT   (1 << 17)
6325 #endif
6326 
6327 #include "target_prctl.h"
6328 
6329 static abi_long do_prctl_inval0(CPUArchState *env)
6330 {
6331     return -TARGET_EINVAL;
6332 }
6333 
6334 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6335 {
6336     return -TARGET_EINVAL;
6337 }
6338 
6339 #ifndef do_prctl_get_fp_mode
6340 #define do_prctl_get_fp_mode do_prctl_inval0
6341 #endif
6342 #ifndef do_prctl_set_fp_mode
6343 #define do_prctl_set_fp_mode do_prctl_inval1
6344 #endif
6345 #ifndef do_prctl_sve_get_vl
6346 #define do_prctl_sve_get_vl do_prctl_inval0
6347 #endif
6348 #ifndef do_prctl_sve_set_vl
6349 #define do_prctl_sve_set_vl do_prctl_inval1
6350 #endif
6351 #ifndef do_prctl_reset_keys
6352 #define do_prctl_reset_keys do_prctl_inval1
6353 #endif
6354 #ifndef do_prctl_set_tagged_addr_ctrl
6355 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6356 #endif
6357 #ifndef do_prctl_get_tagged_addr_ctrl
6358 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6359 #endif
6360 #ifndef do_prctl_get_unalign
6361 #define do_prctl_get_unalign do_prctl_inval1
6362 #endif
6363 #ifndef do_prctl_set_unalign
6364 #define do_prctl_set_unalign do_prctl_inval1
6365 #endif
6366 #ifndef do_prctl_sme_get_vl
6367 #define do_prctl_sme_get_vl do_prctl_inval0
6368 #endif
6369 #ifndef do_prctl_sme_set_vl
6370 #define do_prctl_sme_set_vl do_prctl_inval1
6371 #endif
6372 
6373 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6374                          abi_long arg3, abi_long arg4, abi_long arg5)
6375 {
6376     abi_long ret;
6377 
6378     switch (option) {
6379     case PR_GET_PDEATHSIG:
6380         {
6381             int deathsig;
6382             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6383                                   arg3, arg4, arg5));
6384             if (!is_error(ret) &&
6385                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6386                 return -TARGET_EFAULT;
6387             }
6388             return ret;
6389         }
6390     case PR_SET_PDEATHSIG:
6391         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6392                                arg3, arg4, arg5));
6393     case PR_GET_NAME:
6394         {
6395             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6396             if (!name) {
6397                 return -TARGET_EFAULT;
6398             }
6399             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6400                                   arg3, arg4, arg5));
6401             unlock_user(name, arg2, 16);
6402             return ret;
6403         }
6404     case PR_SET_NAME:
6405         {
6406             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6407             if (!name) {
6408                 return -TARGET_EFAULT;
6409             }
6410             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6411                                   arg3, arg4, arg5));
6412             unlock_user(name, arg2, 0);
6413             return ret;
6414         }
6415     case PR_GET_FP_MODE:
6416         return do_prctl_get_fp_mode(env);
6417     case PR_SET_FP_MODE:
6418         return do_prctl_set_fp_mode(env, arg2);
6419     case PR_SVE_GET_VL:
6420         return do_prctl_sve_get_vl(env);
6421     case PR_SVE_SET_VL:
6422         return do_prctl_sve_set_vl(env, arg2);
6423     case PR_SME_GET_VL:
6424         return do_prctl_sme_get_vl(env);
6425     case PR_SME_SET_VL:
6426         return do_prctl_sme_set_vl(env, arg2);
6427     case PR_PAC_RESET_KEYS:
6428         if (arg3 || arg4 || arg5) {
6429             return -TARGET_EINVAL;
6430         }
6431         return do_prctl_reset_keys(env, arg2);
6432     case PR_SET_TAGGED_ADDR_CTRL:
6433         if (arg3 || arg4 || arg5) {
6434             return -TARGET_EINVAL;
6435         }
6436         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6437     case PR_GET_TAGGED_ADDR_CTRL:
6438         if (arg2 || arg3 || arg4 || arg5) {
6439             return -TARGET_EINVAL;
6440         }
6441         return do_prctl_get_tagged_addr_ctrl(env);
6442 
6443     case PR_GET_UNALIGN:
6444         return do_prctl_get_unalign(env, arg2);
6445     case PR_SET_UNALIGN:
6446         return do_prctl_set_unalign(env, arg2);
6447 
6448     case PR_CAP_AMBIENT:
6449     case PR_CAPBSET_READ:
6450     case PR_CAPBSET_DROP:
6451     case PR_GET_DUMPABLE:
6452     case PR_SET_DUMPABLE:
6453     case PR_GET_KEEPCAPS:
6454     case PR_SET_KEEPCAPS:
6455     case PR_GET_SECUREBITS:
6456     case PR_SET_SECUREBITS:
6457     case PR_GET_TIMING:
6458     case PR_SET_TIMING:
6459     case PR_GET_TIMERSLACK:
6460     case PR_SET_TIMERSLACK:
6461     case PR_MCE_KILL:
6462     case PR_MCE_KILL_GET:
6463     case PR_GET_NO_NEW_PRIVS:
6464     case PR_SET_NO_NEW_PRIVS:
6465     case PR_GET_IO_FLUSHER:
6466     case PR_SET_IO_FLUSHER:
6467     case PR_SET_CHILD_SUBREAPER:
6468     case PR_GET_SPECULATION_CTRL:
6469     case PR_SET_SPECULATION_CTRL:
6470         /* Some prctl options have no pointer arguments and we can pass on. */
6471         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6472 
6473     case PR_GET_CHILD_SUBREAPER:
6474         {
6475             int val;
6476             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6477                                   arg3, arg4, arg5));
6478             if (!is_error(ret) && put_user_s32(val, arg2)) {
6479                 return -TARGET_EFAULT;
6480             }
6481             return ret;
6482         }
6483 
6484     case PR_GET_TID_ADDRESS:
6485         {
6486             TaskState *ts = get_task_state(env_cpu(env));
6487             return put_user_ual(ts->child_tidptr, arg2);
6488         }
6489 
6490     case PR_GET_FPEXC:
6491     case PR_SET_FPEXC:
6492         /* Was used for SPE on PowerPC. */
6493         return -TARGET_EINVAL;
6494 
6495     case PR_GET_ENDIAN:
6496     case PR_SET_ENDIAN:
6497     case PR_GET_FPEMU:
6498     case PR_SET_FPEMU:
6499     case PR_SET_MM:
6500     case PR_GET_SECCOMP:
6501     case PR_SET_SECCOMP:
6502     case PR_SET_SYSCALL_USER_DISPATCH:
6503     case PR_GET_THP_DISABLE:
6504     case PR_SET_THP_DISABLE:
6505     case PR_GET_TSC:
6506     case PR_SET_TSC:
6507         /* Disable to prevent the target disabling stuff we need. */
6508         return -TARGET_EINVAL;
6509 
6510     default:
6511         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6512                       option);
6513         return -TARGET_EINVAL;
6514     }
6515 }
6516 
6517 #define NEW_STACK_SIZE 0x40000
6518 
6519 
6520 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6521 typedef struct {
6522     CPUArchState *env;
6523     pthread_mutex_t mutex;
6524     pthread_cond_t cond;
6525     pthread_t thread;
6526     uint32_t tid;
6527     abi_ulong child_tidptr;
6528     abi_ulong parent_tidptr;
6529     sigset_t sigmask;
6530 } new_thread_info;
6531 
6532 static void *clone_func(void *arg)
6533 {
6534     new_thread_info *info = arg;
6535     CPUArchState *env;
6536     CPUState *cpu;
6537     TaskState *ts;
6538 
6539     rcu_register_thread();
6540     tcg_register_thread();
6541     env = info->env;
6542     cpu = env_cpu(env);
6543     thread_cpu = cpu;
6544     ts = get_task_state(cpu);
6545     info->tid = sys_gettid();
6546     task_settid(ts);
6547     if (info->child_tidptr)
6548         put_user_u32(info->tid, info->child_tidptr);
6549     if (info->parent_tidptr)
6550         put_user_u32(info->tid, info->parent_tidptr);
6551     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6552     /* Enable signals.  */
6553     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6554     /* Signal to the parent that we're ready.  */
6555     pthread_mutex_lock(&info->mutex);
6556     pthread_cond_broadcast(&info->cond);
6557     pthread_mutex_unlock(&info->mutex);
6558     /* Wait until the parent has finished initializing the tls state.  */
6559     pthread_mutex_lock(&clone_lock);
6560     pthread_mutex_unlock(&clone_lock);
6561     cpu_loop(env);
6562     /* never exits */
6563     return NULL;
6564 }
6565 
6566 /* do_fork() Must return host values and target errnos (unlike most
6567    do_*() functions). */
6568 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6569                    abi_ulong parent_tidptr, target_ulong newtls,
6570                    abi_ulong child_tidptr)
6571 {
6572     CPUState *cpu = env_cpu(env);
6573     int ret;
6574     TaskState *ts;
6575     CPUState *new_cpu;
6576     CPUArchState *new_env;
6577     sigset_t sigmask;
6578 
6579     flags &= ~CLONE_IGNORED_FLAGS;
6580 
6581     /* Emulate vfork() with fork() */
6582     if (flags & CLONE_VFORK)
6583         flags &= ~(CLONE_VFORK | CLONE_VM);
6584 
6585     if (flags & CLONE_VM) {
6586         TaskState *parent_ts = get_task_state(cpu);
6587         new_thread_info info;
6588         pthread_attr_t attr;
6589 
6590         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6591             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6592             return -TARGET_EINVAL;
6593         }
6594 
6595         ts = g_new0(TaskState, 1);
6596         init_task_state(ts);
6597 
6598         /* Grab a mutex so that thread setup appears atomic.  */
6599         pthread_mutex_lock(&clone_lock);
6600 
6601         /*
6602          * If this is our first additional thread, we need to ensure we
6603          * generate code for parallel execution and flush old translations.
6604          * Do this now so that the copy gets CF_PARALLEL too.
6605          */
6606         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6607             tcg_cflags_set(cpu, CF_PARALLEL);
6608             tb_flush(cpu);
6609         }
6610 
6611         /* we create a new CPU instance. */
6612         new_env = cpu_copy(env);
6613         /* Init regs that differ from the parent.  */
6614         cpu_clone_regs_child(new_env, newsp, flags);
6615         cpu_clone_regs_parent(env, flags);
6616         new_cpu = env_cpu(new_env);
6617         new_cpu->opaque = ts;
6618         ts->bprm = parent_ts->bprm;
6619         ts->info = parent_ts->info;
6620         ts->signal_mask = parent_ts->signal_mask;
6621 
6622         if (flags & CLONE_CHILD_CLEARTID) {
6623             ts->child_tidptr = child_tidptr;
6624         }
6625 
6626         if (flags & CLONE_SETTLS) {
6627             cpu_set_tls (new_env, newtls);
6628         }
6629 
6630         memset(&info, 0, sizeof(info));
6631         pthread_mutex_init(&info.mutex, NULL);
6632         pthread_mutex_lock(&info.mutex);
6633         pthread_cond_init(&info.cond, NULL);
6634         info.env = new_env;
6635         if (flags & CLONE_CHILD_SETTID) {
6636             info.child_tidptr = child_tidptr;
6637         }
6638         if (flags & CLONE_PARENT_SETTID) {
6639             info.parent_tidptr = parent_tidptr;
6640         }
6641 
6642         ret = pthread_attr_init(&attr);
6643         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6644         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6645         /* It is not safe to deliver signals until the child has finished
6646            initializing, so temporarily block all signals.  */
6647         sigfillset(&sigmask);
6648         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6649         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6650 
6651         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6652         /* TODO: Free new CPU state if thread creation failed.  */
6653 
6654         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6655         pthread_attr_destroy(&attr);
6656         if (ret == 0) {
6657             /* Wait for the child to initialize.  */
6658             pthread_cond_wait(&info.cond, &info.mutex);
6659             ret = info.tid;
6660         } else {
6661             ret = -1;
6662         }
6663         pthread_mutex_unlock(&info.mutex);
6664         pthread_cond_destroy(&info.cond);
6665         pthread_mutex_destroy(&info.mutex);
6666         pthread_mutex_unlock(&clone_lock);
6667     } else {
6668         /* if no CLONE_VM, we consider it is a fork */
6669         if (flags & CLONE_INVALID_FORK_FLAGS) {
6670             return -TARGET_EINVAL;
6671         }
6672 
6673         /* We can't support custom termination signals */
6674         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6675             return -TARGET_EINVAL;
6676         }
6677 
6678 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6679         if (flags & CLONE_PIDFD) {
6680             return -TARGET_EINVAL;
6681         }
6682 #endif
6683 
6684         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6685         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6686             return -TARGET_EINVAL;
6687         }
6688 
6689         if (block_signals()) {
6690             return -QEMU_ERESTARTSYS;
6691         }
6692 
6693         fork_start();
6694         ret = fork();
6695         if (ret == 0) {
6696             /* Child Process.  */
6697             cpu_clone_regs_child(env, newsp, flags);
6698             fork_end(ret);
6699             /* There is a race condition here.  The parent process could
6700                theoretically read the TID in the child process before the child
6701                tid is set.  This would require using either ptrace
6702                (not implemented) or having *_tidptr to point at a shared memory
6703                mapping.  We can't repeat the spinlock hack used above because
6704                the child process gets its own copy of the lock.  */
6705             if (flags & CLONE_CHILD_SETTID)
6706                 put_user_u32(sys_gettid(), child_tidptr);
6707             if (flags & CLONE_PARENT_SETTID)
6708                 put_user_u32(sys_gettid(), parent_tidptr);
6709             ts = get_task_state(cpu);
6710             if (flags & CLONE_SETTLS)
6711                 cpu_set_tls (env, newtls);
6712             if (flags & CLONE_CHILD_CLEARTID)
6713                 ts->child_tidptr = child_tidptr;
6714         } else {
6715             cpu_clone_regs_parent(env, flags);
6716             if (flags & CLONE_PIDFD) {
6717                 int pid_fd = 0;
6718 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6719                 int pid_child = ret;
6720                 pid_fd = pidfd_open(pid_child, 0);
6721                 if (pid_fd >= 0) {
6722                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6723                                                | FD_CLOEXEC);
6724                 } else {
6725                         pid_fd = 0;
6726                 }
6727 #endif
6728                 put_user_u32(pid_fd, parent_tidptr);
6729             }
6730             fork_end(ret);
6731         }
6732         g_assert(!cpu_in_exclusive_context(cpu));
6733     }
6734     return ret;
6735 }
6736 
6737 /* warning : doesn't handle linux specific flags... */
6738 static int target_to_host_fcntl_cmd(int cmd)
6739 {
6740     int ret;
6741 
6742     switch(cmd) {
6743     case TARGET_F_DUPFD:
6744     case TARGET_F_GETFD:
6745     case TARGET_F_SETFD:
6746     case TARGET_F_GETFL:
6747     case TARGET_F_SETFL:
6748     case TARGET_F_OFD_GETLK:
6749     case TARGET_F_OFD_SETLK:
6750     case TARGET_F_OFD_SETLKW:
6751         ret = cmd;
6752         break;
6753     case TARGET_F_GETLK:
6754         ret = F_GETLK;
6755         break;
6756     case TARGET_F_SETLK:
6757         ret = F_SETLK;
6758         break;
6759     case TARGET_F_SETLKW:
6760         ret = F_SETLKW;
6761         break;
6762     case TARGET_F_GETOWN:
6763         ret = F_GETOWN;
6764         break;
6765     case TARGET_F_SETOWN:
6766         ret = F_SETOWN;
6767         break;
6768     case TARGET_F_GETSIG:
6769         ret = F_GETSIG;
6770         break;
6771     case TARGET_F_SETSIG:
6772         ret = F_SETSIG;
6773         break;
6774 #if TARGET_ABI_BITS == 32
6775     case TARGET_F_GETLK64:
6776         ret = F_GETLK;
6777         break;
6778     case TARGET_F_SETLK64:
6779         ret = F_SETLK;
6780         break;
6781     case TARGET_F_SETLKW64:
6782         ret = F_SETLKW;
6783         break;
6784 #endif
6785     case TARGET_F_SETLEASE:
6786         ret = F_SETLEASE;
6787         break;
6788     case TARGET_F_GETLEASE:
6789         ret = F_GETLEASE;
6790         break;
6791 #ifdef F_DUPFD_CLOEXEC
6792     case TARGET_F_DUPFD_CLOEXEC:
6793         ret = F_DUPFD_CLOEXEC;
6794         break;
6795 #endif
6796     case TARGET_F_NOTIFY:
6797         ret = F_NOTIFY;
6798         break;
6799 #ifdef F_GETOWN_EX
6800     case TARGET_F_GETOWN_EX:
6801         ret = F_GETOWN_EX;
6802         break;
6803 #endif
6804 #ifdef F_SETOWN_EX
6805     case TARGET_F_SETOWN_EX:
6806         ret = F_SETOWN_EX;
6807         break;
6808 #endif
6809 #ifdef F_SETPIPE_SZ
6810     case TARGET_F_SETPIPE_SZ:
6811         ret = F_SETPIPE_SZ;
6812         break;
6813     case TARGET_F_GETPIPE_SZ:
6814         ret = F_GETPIPE_SZ;
6815         break;
6816 #endif
6817 #ifdef F_ADD_SEALS
6818     case TARGET_F_ADD_SEALS:
6819         ret = F_ADD_SEALS;
6820         break;
6821     case TARGET_F_GET_SEALS:
6822         ret = F_GET_SEALS;
6823         break;
6824 #endif
6825     default:
6826         ret = -TARGET_EINVAL;
6827         break;
6828     }
6829 
6830 #if defined(__powerpc64__)
6831     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6832      * is not supported by kernel. The glibc fcntl call actually adjusts
6833      * them to 5, 6 and 7 before making the syscall(). Since we make the
6834      * syscall directly, adjust to what is supported by the kernel.
6835      */
6836     if (ret >= F_GETLK && ret <= F_SETLKW) {
6837         ret -= F_GETLK - 5;
6838     }
6839 #endif
6840 
6841     return ret;
6842 }
6843 
6844 #define FLOCK_TRANSTBL \
6845     switch (type) { \
6846     TRANSTBL_CONVERT(F_RDLCK); \
6847     TRANSTBL_CONVERT(F_WRLCK); \
6848     TRANSTBL_CONVERT(F_UNLCK); \
6849     }
6850 
6851 static int target_to_host_flock(int type)
6852 {
6853 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6854     FLOCK_TRANSTBL
6855 #undef  TRANSTBL_CONVERT
6856     return -TARGET_EINVAL;
6857 }
6858 
6859 static int host_to_target_flock(int type)
6860 {
6861 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6862     FLOCK_TRANSTBL
6863 #undef  TRANSTBL_CONVERT
6864     /* if we don't know how to convert the value coming
6865      * from the host we copy to the target field as-is
6866      */
6867     return type;
6868 }
6869 
6870 static inline abi_long copy_from_user_flock(struct flock *fl,
6871                                             abi_ulong target_flock_addr)
6872 {
6873     struct target_flock *target_fl;
6874     int l_type;
6875 
6876     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6877         return -TARGET_EFAULT;
6878     }
6879 
6880     __get_user(l_type, &target_fl->l_type);
6881     l_type = target_to_host_flock(l_type);
6882     if (l_type < 0) {
6883         return l_type;
6884     }
6885     fl->l_type = l_type;
6886     __get_user(fl->l_whence, &target_fl->l_whence);
6887     __get_user(fl->l_start, &target_fl->l_start);
6888     __get_user(fl->l_len, &target_fl->l_len);
6889     __get_user(fl->l_pid, &target_fl->l_pid);
6890     unlock_user_struct(target_fl, target_flock_addr, 0);
6891     return 0;
6892 }
6893 
6894 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6895                                           const struct flock *fl)
6896 {
6897     struct target_flock *target_fl;
6898     short l_type;
6899 
6900     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6901         return -TARGET_EFAULT;
6902     }
6903 
6904     l_type = host_to_target_flock(fl->l_type);
6905     __put_user(l_type, &target_fl->l_type);
6906     __put_user(fl->l_whence, &target_fl->l_whence);
6907     __put_user(fl->l_start, &target_fl->l_start);
6908     __put_user(fl->l_len, &target_fl->l_len);
6909     __put_user(fl->l_pid, &target_fl->l_pid);
6910     unlock_user_struct(target_fl, target_flock_addr, 1);
6911     return 0;
6912 }
6913 
6914 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6915 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6916 
6917 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6918 struct target_oabi_flock64 {
6919     abi_short l_type;
6920     abi_short l_whence;
6921     abi_llong l_start;
6922     abi_llong l_len;
6923     abi_int   l_pid;
6924 } QEMU_PACKED;
6925 
6926 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6927                                                    abi_ulong target_flock_addr)
6928 {
6929     struct target_oabi_flock64 *target_fl;
6930     int l_type;
6931 
6932     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6933         return -TARGET_EFAULT;
6934     }
6935 
6936     __get_user(l_type, &target_fl->l_type);
6937     l_type = target_to_host_flock(l_type);
6938     if (l_type < 0) {
6939         return l_type;
6940     }
6941     fl->l_type = l_type;
6942     __get_user(fl->l_whence, &target_fl->l_whence);
6943     __get_user(fl->l_start, &target_fl->l_start);
6944     __get_user(fl->l_len, &target_fl->l_len);
6945     __get_user(fl->l_pid, &target_fl->l_pid);
6946     unlock_user_struct(target_fl, target_flock_addr, 0);
6947     return 0;
6948 }
6949 
6950 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6951                                                  const struct flock *fl)
6952 {
6953     struct target_oabi_flock64 *target_fl;
6954     short l_type;
6955 
6956     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6957         return -TARGET_EFAULT;
6958     }
6959 
6960     l_type = host_to_target_flock(fl->l_type);
6961     __put_user(l_type, &target_fl->l_type);
6962     __put_user(fl->l_whence, &target_fl->l_whence);
6963     __put_user(fl->l_start, &target_fl->l_start);
6964     __put_user(fl->l_len, &target_fl->l_len);
6965     __put_user(fl->l_pid, &target_fl->l_pid);
6966     unlock_user_struct(target_fl, target_flock_addr, 1);
6967     return 0;
6968 }
6969 #endif
6970 
6971 static inline abi_long copy_from_user_flock64(struct flock *fl,
6972                                               abi_ulong target_flock_addr)
6973 {
6974     struct target_flock64 *target_fl;
6975     int l_type;
6976 
6977     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6978         return -TARGET_EFAULT;
6979     }
6980 
6981     __get_user(l_type, &target_fl->l_type);
6982     l_type = target_to_host_flock(l_type);
6983     if (l_type < 0) {
6984         return l_type;
6985     }
6986     fl->l_type = l_type;
6987     __get_user(fl->l_whence, &target_fl->l_whence);
6988     __get_user(fl->l_start, &target_fl->l_start);
6989     __get_user(fl->l_len, &target_fl->l_len);
6990     __get_user(fl->l_pid, &target_fl->l_pid);
6991     unlock_user_struct(target_fl, target_flock_addr, 0);
6992     return 0;
6993 }
6994 
6995 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
6996                                             const struct flock *fl)
6997 {
6998     struct target_flock64 *target_fl;
6999     short l_type;
7000 
7001     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7002         return -TARGET_EFAULT;
7003     }
7004 
7005     l_type = host_to_target_flock(fl->l_type);
7006     __put_user(l_type, &target_fl->l_type);
7007     __put_user(fl->l_whence, &target_fl->l_whence);
7008     __put_user(fl->l_start, &target_fl->l_start);
7009     __put_user(fl->l_len, &target_fl->l_len);
7010     __put_user(fl->l_pid, &target_fl->l_pid);
7011     unlock_user_struct(target_fl, target_flock_addr, 1);
7012     return 0;
7013 }
7014 
7015 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7016 {
7017     struct flock fl;
7018 #ifdef F_GETOWN_EX
7019     struct f_owner_ex fox;
7020     struct target_f_owner_ex *target_fox;
7021 #endif
7022     abi_long ret;
7023     int host_cmd = target_to_host_fcntl_cmd(cmd);
7024 
7025     if (host_cmd == -TARGET_EINVAL)
7026 	    return host_cmd;
7027 
7028     switch(cmd) {
7029     case TARGET_F_GETLK:
7030         ret = copy_from_user_flock(&fl, arg);
7031         if (ret) {
7032             return ret;
7033         }
7034         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7035         if (ret == 0) {
7036             ret = copy_to_user_flock(arg, &fl);
7037         }
7038         break;
7039 
7040     case TARGET_F_SETLK:
7041     case TARGET_F_SETLKW:
7042         ret = copy_from_user_flock(&fl, arg);
7043         if (ret) {
7044             return ret;
7045         }
7046         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7047         break;
7048 
7049     case TARGET_F_GETLK64:
7050     case TARGET_F_OFD_GETLK:
7051         ret = copy_from_user_flock64(&fl, arg);
7052         if (ret) {
7053             return ret;
7054         }
7055         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7056         if (ret == 0) {
7057             ret = copy_to_user_flock64(arg, &fl);
7058         }
7059         break;
7060     case TARGET_F_SETLK64:
7061     case TARGET_F_SETLKW64:
7062     case TARGET_F_OFD_SETLK:
7063     case TARGET_F_OFD_SETLKW:
7064         ret = copy_from_user_flock64(&fl, arg);
7065         if (ret) {
7066             return ret;
7067         }
7068         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7069         break;
7070 
7071     case TARGET_F_GETFL:
7072         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7073         if (ret >= 0) {
7074             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7075             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7076             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7077                 ret |= TARGET_O_LARGEFILE;
7078             }
7079         }
7080         break;
7081 
7082     case TARGET_F_SETFL:
7083         ret = get_errno(safe_fcntl(fd, host_cmd,
7084                                    target_to_host_bitmask(arg,
7085                                                           fcntl_flags_tbl)));
7086         break;
7087 
7088 #ifdef F_GETOWN_EX
7089     case TARGET_F_GETOWN_EX:
7090         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7091         if (ret >= 0) {
7092             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7093                 return -TARGET_EFAULT;
7094             target_fox->type = tswap32(fox.type);
7095             target_fox->pid = tswap32(fox.pid);
7096             unlock_user_struct(target_fox, arg, 1);
7097         }
7098         break;
7099 #endif
7100 
7101 #ifdef F_SETOWN_EX
7102     case TARGET_F_SETOWN_EX:
7103         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7104             return -TARGET_EFAULT;
7105         fox.type = tswap32(target_fox->type);
7106         fox.pid = tswap32(target_fox->pid);
7107         unlock_user_struct(target_fox, arg, 0);
7108         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7109         break;
7110 #endif
7111 
7112     case TARGET_F_SETSIG:
7113         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7114         break;
7115 
7116     case TARGET_F_GETSIG:
7117         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7118         break;
7119 
7120     case TARGET_F_SETOWN:
7121     case TARGET_F_GETOWN:
7122     case TARGET_F_SETLEASE:
7123     case TARGET_F_GETLEASE:
7124     case TARGET_F_SETPIPE_SZ:
7125     case TARGET_F_GETPIPE_SZ:
7126     case TARGET_F_ADD_SEALS:
7127     case TARGET_F_GET_SEALS:
7128         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7129         break;
7130 
7131     default:
7132         ret = get_errno(safe_fcntl(fd, cmd, arg));
7133         break;
7134     }
7135     return ret;
7136 }
7137 
7138 #ifdef USE_UID16
7139 
7140 static inline int high2lowuid(int uid)
7141 {
7142     if (uid > 65535)
7143         return 65534;
7144     else
7145         return uid;
7146 }
7147 
7148 static inline int high2lowgid(int gid)
7149 {
7150     if (gid > 65535)
7151         return 65534;
7152     else
7153         return gid;
7154 }
7155 
7156 static inline int low2highuid(int uid)
7157 {
7158     if ((int16_t)uid == -1)
7159         return -1;
7160     else
7161         return uid;
7162 }
7163 
7164 static inline int low2highgid(int gid)
7165 {
7166     if ((int16_t)gid == -1)
7167         return -1;
7168     else
7169         return gid;
7170 }
7171 static inline int tswapid(int id)
7172 {
7173     return tswap16(id);
7174 }
7175 
7176 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7177 
7178 #else /* !USE_UID16 */
7179 static inline int high2lowuid(int uid)
7180 {
7181     return uid;
7182 }
7183 static inline int high2lowgid(int gid)
7184 {
7185     return gid;
7186 }
7187 static inline int low2highuid(int uid)
7188 {
7189     return uid;
7190 }
7191 static inline int low2highgid(int gid)
7192 {
7193     return gid;
7194 }
7195 static inline int tswapid(int id)
7196 {
7197     return tswap32(id);
7198 }
7199 
7200 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7201 
7202 #endif /* USE_UID16 */
7203 
7204 /* We must do direct syscalls for setting UID/GID, because we want to
7205  * implement the Linux system call semantics of "change only for this thread",
7206  * not the libc/POSIX semantics of "change for all threads in process".
7207  * (See http://ewontfix.com/17/ for more details.)
7208  * We use the 32-bit version of the syscalls if present; if it is not
7209  * then either the host architecture supports 32-bit UIDs natively with
7210  * the standard syscall, or the 16-bit UID is the best we can do.
7211  */
7212 #ifdef __NR_setuid32
7213 #define __NR_sys_setuid __NR_setuid32
7214 #else
7215 #define __NR_sys_setuid __NR_setuid
7216 #endif
7217 #ifdef __NR_setgid32
7218 #define __NR_sys_setgid __NR_setgid32
7219 #else
7220 #define __NR_sys_setgid __NR_setgid
7221 #endif
7222 #ifdef __NR_setresuid32
7223 #define __NR_sys_setresuid __NR_setresuid32
7224 #else
7225 #define __NR_sys_setresuid __NR_setresuid
7226 #endif
7227 #ifdef __NR_setresgid32
7228 #define __NR_sys_setresgid __NR_setresgid32
7229 #else
7230 #define __NR_sys_setresgid __NR_setresgid
7231 #endif
7232 #ifdef __NR_setgroups32
7233 #define __NR_sys_setgroups __NR_setgroups32
7234 #else
7235 #define __NR_sys_setgroups __NR_setgroups
7236 #endif
7237 
7238 _syscall1(int, sys_setuid, uid_t, uid)
7239 _syscall1(int, sys_setgid, gid_t, gid)
7240 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7241 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7242 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7243 
7244 void syscall_init(void)
7245 {
7246     IOCTLEntry *ie;
7247     const argtype *arg_type;
7248     int size;
7249 
7250     thunk_init(STRUCT_MAX);
7251 
7252 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7253 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7254 #include "syscall_types.h"
7255 #undef STRUCT
7256 #undef STRUCT_SPECIAL
7257 
7258     /* we patch the ioctl size if necessary. We rely on the fact that
7259        no ioctl has all the bits at '1' in the size field */
7260     ie = ioctl_entries;
7261     while (ie->target_cmd != 0) {
7262         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7263             TARGET_IOC_SIZEMASK) {
7264             arg_type = ie->arg_type;
7265             if (arg_type[0] != TYPE_PTR) {
7266                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7267                         ie->target_cmd);
7268                 exit(1);
7269             }
7270             arg_type++;
7271             size = thunk_type_size(arg_type, 0);
7272             ie->target_cmd = (ie->target_cmd &
7273                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7274                 (size << TARGET_IOC_SIZESHIFT);
7275         }
7276 
7277         /* automatic consistency check if same arch */
7278 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7279     (defined(__x86_64__) && defined(TARGET_X86_64))
7280         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7281             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7282                     ie->name, ie->target_cmd, ie->host_cmd);
7283         }
7284 #endif
7285         ie++;
7286     }
7287 }
7288 
7289 #ifdef TARGET_NR_truncate64
7290 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7291                                          abi_long arg2,
7292                                          abi_long arg3,
7293                                          abi_long arg4)
7294 {
7295     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7296         arg2 = arg3;
7297         arg3 = arg4;
7298     }
7299     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7300 }
7301 #endif
7302 
7303 #ifdef TARGET_NR_ftruncate64
7304 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7305                                           abi_long arg2,
7306                                           abi_long arg3,
7307                                           abi_long arg4)
7308 {
7309     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7310         arg2 = arg3;
7311         arg3 = arg4;
7312     }
7313     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7314 }
7315 #endif
7316 
7317 #if defined(TARGET_NR_timer_settime) || \
7318     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7319 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7320                                                  abi_ulong target_addr)
7321 {
7322     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7323                                 offsetof(struct target_itimerspec,
7324                                          it_interval)) ||
7325         target_to_host_timespec(&host_its->it_value, target_addr +
7326                                 offsetof(struct target_itimerspec,
7327                                          it_value))) {
7328         return -TARGET_EFAULT;
7329     }
7330 
7331     return 0;
7332 }
7333 #endif
7334 
7335 #if defined(TARGET_NR_timer_settime64) || \
7336     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7337 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7338                                                    abi_ulong target_addr)
7339 {
7340     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7341                                   offsetof(struct target__kernel_itimerspec,
7342                                            it_interval)) ||
7343         target_to_host_timespec64(&host_its->it_value, target_addr +
7344                                   offsetof(struct target__kernel_itimerspec,
7345                                            it_value))) {
7346         return -TARGET_EFAULT;
7347     }
7348 
7349     return 0;
7350 }
7351 #endif
7352 
7353 #if ((defined(TARGET_NR_timerfd_gettime) || \
7354       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7355       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7356 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7357                                                  struct itimerspec *host_its)
7358 {
7359     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7360                                                        it_interval),
7361                                 &host_its->it_interval) ||
7362         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7363                                                        it_value),
7364                                 &host_its->it_value)) {
7365         return -TARGET_EFAULT;
7366     }
7367     return 0;
7368 }
7369 #endif
7370 
7371 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7372       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7373       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7374 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7375                                                    struct itimerspec *host_its)
7376 {
7377     if (host_to_target_timespec64(target_addr +
7378                                   offsetof(struct target__kernel_itimerspec,
7379                                            it_interval),
7380                                   &host_its->it_interval) ||
7381         host_to_target_timespec64(target_addr +
7382                                   offsetof(struct target__kernel_itimerspec,
7383                                            it_value),
7384                                   &host_its->it_value)) {
7385         return -TARGET_EFAULT;
7386     }
7387     return 0;
7388 }
7389 #endif
7390 
7391 #if defined(TARGET_NR_adjtimex) || \
7392     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7393 static inline abi_long target_to_host_timex(struct timex *host_tx,
7394                                             abi_long target_addr)
7395 {
7396     struct target_timex *target_tx;
7397 
7398     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7399         return -TARGET_EFAULT;
7400     }
7401 
7402     __get_user(host_tx->modes, &target_tx->modes);
7403     __get_user(host_tx->offset, &target_tx->offset);
7404     __get_user(host_tx->freq, &target_tx->freq);
7405     __get_user(host_tx->maxerror, &target_tx->maxerror);
7406     __get_user(host_tx->esterror, &target_tx->esterror);
7407     __get_user(host_tx->status, &target_tx->status);
7408     __get_user(host_tx->constant, &target_tx->constant);
7409     __get_user(host_tx->precision, &target_tx->precision);
7410     __get_user(host_tx->tolerance, &target_tx->tolerance);
7411     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7412     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7413     __get_user(host_tx->tick, &target_tx->tick);
7414     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7415     __get_user(host_tx->jitter, &target_tx->jitter);
7416     __get_user(host_tx->shift, &target_tx->shift);
7417     __get_user(host_tx->stabil, &target_tx->stabil);
7418     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7419     __get_user(host_tx->calcnt, &target_tx->calcnt);
7420     __get_user(host_tx->errcnt, &target_tx->errcnt);
7421     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7422     __get_user(host_tx->tai, &target_tx->tai);
7423 
7424     unlock_user_struct(target_tx, target_addr, 0);
7425     return 0;
7426 }
7427 
7428 static inline abi_long host_to_target_timex(abi_long target_addr,
7429                                             struct timex *host_tx)
7430 {
7431     struct target_timex *target_tx;
7432 
7433     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7434         return -TARGET_EFAULT;
7435     }
7436 
7437     __put_user(host_tx->modes, &target_tx->modes);
7438     __put_user(host_tx->offset, &target_tx->offset);
7439     __put_user(host_tx->freq, &target_tx->freq);
7440     __put_user(host_tx->maxerror, &target_tx->maxerror);
7441     __put_user(host_tx->esterror, &target_tx->esterror);
7442     __put_user(host_tx->status, &target_tx->status);
7443     __put_user(host_tx->constant, &target_tx->constant);
7444     __put_user(host_tx->precision, &target_tx->precision);
7445     __put_user(host_tx->tolerance, &target_tx->tolerance);
7446     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7447     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7448     __put_user(host_tx->tick, &target_tx->tick);
7449     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7450     __put_user(host_tx->jitter, &target_tx->jitter);
7451     __put_user(host_tx->shift, &target_tx->shift);
7452     __put_user(host_tx->stabil, &target_tx->stabil);
7453     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7454     __put_user(host_tx->calcnt, &target_tx->calcnt);
7455     __put_user(host_tx->errcnt, &target_tx->errcnt);
7456     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7457     __put_user(host_tx->tai, &target_tx->tai);
7458 
7459     unlock_user_struct(target_tx, target_addr, 1);
7460     return 0;
7461 }
7462 #endif
7463 
7464 
7465 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7466 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7467                                               abi_long target_addr)
7468 {
7469     struct target__kernel_timex *target_tx;
7470 
7471     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7472                                  offsetof(struct target__kernel_timex,
7473                                           time))) {
7474         return -TARGET_EFAULT;
7475     }
7476 
7477     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7478         return -TARGET_EFAULT;
7479     }
7480 
7481     __get_user(host_tx->modes, &target_tx->modes);
7482     __get_user(host_tx->offset, &target_tx->offset);
7483     __get_user(host_tx->freq, &target_tx->freq);
7484     __get_user(host_tx->maxerror, &target_tx->maxerror);
7485     __get_user(host_tx->esterror, &target_tx->esterror);
7486     __get_user(host_tx->status, &target_tx->status);
7487     __get_user(host_tx->constant, &target_tx->constant);
7488     __get_user(host_tx->precision, &target_tx->precision);
7489     __get_user(host_tx->tolerance, &target_tx->tolerance);
7490     __get_user(host_tx->tick, &target_tx->tick);
7491     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7492     __get_user(host_tx->jitter, &target_tx->jitter);
7493     __get_user(host_tx->shift, &target_tx->shift);
7494     __get_user(host_tx->stabil, &target_tx->stabil);
7495     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7496     __get_user(host_tx->calcnt, &target_tx->calcnt);
7497     __get_user(host_tx->errcnt, &target_tx->errcnt);
7498     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7499     __get_user(host_tx->tai, &target_tx->tai);
7500 
7501     unlock_user_struct(target_tx, target_addr, 0);
7502     return 0;
7503 }
7504 
7505 static inline abi_long host_to_target_timex64(abi_long target_addr,
7506                                               struct timex *host_tx)
7507 {
7508     struct target__kernel_timex *target_tx;
7509 
7510    if (copy_to_user_timeval64(target_addr +
7511                               offsetof(struct target__kernel_timex, time),
7512                               &host_tx->time)) {
7513         return -TARGET_EFAULT;
7514     }
7515 
7516     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7517         return -TARGET_EFAULT;
7518     }
7519 
7520     __put_user(host_tx->modes, &target_tx->modes);
7521     __put_user(host_tx->offset, &target_tx->offset);
7522     __put_user(host_tx->freq, &target_tx->freq);
7523     __put_user(host_tx->maxerror, &target_tx->maxerror);
7524     __put_user(host_tx->esterror, &target_tx->esterror);
7525     __put_user(host_tx->status, &target_tx->status);
7526     __put_user(host_tx->constant, &target_tx->constant);
7527     __put_user(host_tx->precision, &target_tx->precision);
7528     __put_user(host_tx->tolerance, &target_tx->tolerance);
7529     __put_user(host_tx->tick, &target_tx->tick);
7530     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7531     __put_user(host_tx->jitter, &target_tx->jitter);
7532     __put_user(host_tx->shift, &target_tx->shift);
7533     __put_user(host_tx->stabil, &target_tx->stabil);
7534     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7535     __put_user(host_tx->calcnt, &target_tx->calcnt);
7536     __put_user(host_tx->errcnt, &target_tx->errcnt);
7537     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7538     __put_user(host_tx->tai, &target_tx->tai);
7539 
7540     unlock_user_struct(target_tx, target_addr, 1);
7541     return 0;
7542 }
7543 #endif
7544 
7545 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7546 #define sigev_notify_thread_id _sigev_un._tid
7547 #endif
7548 
7549 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7550                                                abi_ulong target_addr)
7551 {
7552     struct target_sigevent *target_sevp;
7553 
7554     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7555         return -TARGET_EFAULT;
7556     }
7557 
7558     /* This union is awkward on 64 bit systems because it has a 32 bit
7559      * integer and a pointer in it; we follow the conversion approach
7560      * used for handling sigval types in signal.c so the guest should get
7561      * the correct value back even if we did a 64 bit byteswap and it's
7562      * using the 32 bit integer.
7563      */
7564     host_sevp->sigev_value.sival_ptr =
7565         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7566     host_sevp->sigev_signo =
7567         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7568     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7569     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7570 
7571     unlock_user_struct(target_sevp, target_addr, 1);
7572     return 0;
7573 }
7574 
7575 #if defined(TARGET_NR_mlockall)
7576 static inline int target_to_host_mlockall_arg(int arg)
7577 {
7578     int result = 0;
7579 
7580     if (arg & TARGET_MCL_CURRENT) {
7581         result |= MCL_CURRENT;
7582     }
7583     if (arg & TARGET_MCL_FUTURE) {
7584         result |= MCL_FUTURE;
7585     }
7586 #ifdef MCL_ONFAULT
7587     if (arg & TARGET_MCL_ONFAULT) {
7588         result |= MCL_ONFAULT;
7589     }
7590 #endif
7591 
7592     return result;
7593 }
7594 #endif
7595 
7596 static inline int target_to_host_msync_arg(abi_long arg)
7597 {
7598     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7599            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7600            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7601            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7602 }
7603 
7604 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7605      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7606      defined(TARGET_NR_newfstatat))
7607 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7608                                              abi_ulong target_addr,
7609                                              struct stat *host_st)
7610 {
7611 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7612     if (cpu_env->eabi) {
7613         struct target_eabi_stat64 *target_st;
7614 
7615         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7616             return -TARGET_EFAULT;
7617         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7618         __put_user(host_st->st_dev, &target_st->st_dev);
7619         __put_user(host_st->st_ino, &target_st->st_ino);
7620 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7621         __put_user(host_st->st_ino, &target_st->__st_ino);
7622 #endif
7623         __put_user(host_st->st_mode, &target_st->st_mode);
7624         __put_user(host_st->st_nlink, &target_st->st_nlink);
7625         __put_user(host_st->st_uid, &target_st->st_uid);
7626         __put_user(host_st->st_gid, &target_st->st_gid);
7627         __put_user(host_st->st_rdev, &target_st->st_rdev);
7628         __put_user(host_st->st_size, &target_st->st_size);
7629         __put_user(host_st->st_blksize, &target_st->st_blksize);
7630         __put_user(host_st->st_blocks, &target_st->st_blocks);
7631         __put_user(host_st->st_atime, &target_st->target_st_atime);
7632         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7633         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7634 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7635         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7636         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7637         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7638 #endif
7639         unlock_user_struct(target_st, target_addr, 1);
7640     } else
7641 #endif
7642     {
7643 #if defined(TARGET_HAS_STRUCT_STAT64)
7644         struct target_stat64 *target_st;
7645 #else
7646         struct target_stat *target_st;
7647 #endif
7648 
7649         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7650             return -TARGET_EFAULT;
7651         memset(target_st, 0, sizeof(*target_st));
7652         __put_user(host_st->st_dev, &target_st->st_dev);
7653         __put_user(host_st->st_ino, &target_st->st_ino);
7654 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7655         __put_user(host_st->st_ino, &target_st->__st_ino);
7656 #endif
7657         __put_user(host_st->st_mode, &target_st->st_mode);
7658         __put_user(host_st->st_nlink, &target_st->st_nlink);
7659         __put_user(host_st->st_uid, &target_st->st_uid);
7660         __put_user(host_st->st_gid, &target_st->st_gid);
7661         __put_user(host_st->st_rdev, &target_st->st_rdev);
7662         /* XXX: better use of kernel struct */
7663         __put_user(host_st->st_size, &target_st->st_size);
7664         __put_user(host_st->st_blksize, &target_st->st_blksize);
7665         __put_user(host_st->st_blocks, &target_st->st_blocks);
7666         __put_user(host_st->st_atime, &target_st->target_st_atime);
7667         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7668         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7669 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7670         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7671         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7672         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7673 #endif
7674         unlock_user_struct(target_st, target_addr, 1);
7675     }
7676 
7677     return 0;
7678 }
7679 #endif
7680 
7681 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7682 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7683                                             abi_ulong target_addr)
7684 {
7685     struct target_statx *target_stx;
7686 
7687     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7688         return -TARGET_EFAULT;
7689     }
7690     memset(target_stx, 0, sizeof(*target_stx));
7691 
7692     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7693     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7694     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7695     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7696     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7697     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7698     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7699     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7700     __put_user(host_stx->stx_size, &target_stx->stx_size);
7701     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7702     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7703     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7704     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7705     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7706     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7707     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7708     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7709     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7710     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7711     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7712     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7713     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7714     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7715 
7716     unlock_user_struct(target_stx, target_addr, 1);
7717 
7718     return 0;
7719 }
7720 #endif
7721 
7722 static int do_sys_futex(int *uaddr, int op, int val,
7723                          const struct timespec *timeout, int *uaddr2,
7724                          int val3)
7725 {
7726 #if HOST_LONG_BITS == 64
7727 #if defined(__NR_futex)
7728     /* always a 64-bit time_t, it doesn't define _time64 version  */
7729     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7730 
7731 #endif
7732 #else /* HOST_LONG_BITS == 64 */
7733 #if defined(__NR_futex_time64)
7734     if (sizeof(timeout->tv_sec) == 8) {
7735         /* _time64 function on 32bit arch */
7736         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7737     }
7738 #endif
7739 #if defined(__NR_futex)
7740     /* old function on 32bit arch */
7741     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7742 #endif
7743 #endif /* HOST_LONG_BITS == 64 */
7744     g_assert_not_reached();
7745 }
7746 
7747 static int do_safe_futex(int *uaddr, int op, int val,
7748                          const struct timespec *timeout, int *uaddr2,
7749                          int val3)
7750 {
7751 #if HOST_LONG_BITS == 64
7752 #if defined(__NR_futex)
7753     /* always a 64-bit time_t, it doesn't define _time64 version  */
7754     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7755 #endif
7756 #else /* HOST_LONG_BITS == 64 */
7757 #if defined(__NR_futex_time64)
7758     if (sizeof(timeout->tv_sec) == 8) {
7759         /* _time64 function on 32bit arch */
7760         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7761                                            val3));
7762     }
7763 #endif
7764 #if defined(__NR_futex)
7765     /* old function on 32bit arch */
7766     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7767 #endif
7768 #endif /* HOST_LONG_BITS == 64 */
7769     return -TARGET_ENOSYS;
7770 }
7771 
7772 /* ??? Using host futex calls even when target atomic operations
7773    are not really atomic probably breaks things.  However implementing
7774    futexes locally would make futexes shared between multiple processes
7775    tricky.  However they're probably useless because guest atomic
7776    operations won't work either.  */
7777 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7778 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7779                     int op, int val, target_ulong timeout,
7780                     target_ulong uaddr2, int val3)
7781 {
7782     struct timespec ts, *pts = NULL;
7783     void *haddr2 = NULL;
7784     int base_op;
7785 
7786     /* We assume FUTEX_* constants are the same on both host and target. */
7787 #ifdef FUTEX_CMD_MASK
7788     base_op = op & FUTEX_CMD_MASK;
7789 #else
7790     base_op = op;
7791 #endif
7792     switch (base_op) {
7793     case FUTEX_WAIT:
7794     case FUTEX_WAIT_BITSET:
7795         val = tswap32(val);
7796         break;
7797     case FUTEX_WAIT_REQUEUE_PI:
7798         val = tswap32(val);
7799         haddr2 = g2h(cpu, uaddr2);
7800         break;
7801     case FUTEX_LOCK_PI:
7802     case FUTEX_LOCK_PI2:
7803         break;
7804     case FUTEX_WAKE:
7805     case FUTEX_WAKE_BITSET:
7806     case FUTEX_TRYLOCK_PI:
7807     case FUTEX_UNLOCK_PI:
7808         timeout = 0;
7809         break;
7810     case FUTEX_FD:
7811         val = target_to_host_signal(val);
7812         timeout = 0;
7813         break;
7814     case FUTEX_CMP_REQUEUE:
7815     case FUTEX_CMP_REQUEUE_PI:
7816         val3 = tswap32(val3);
7817         /* fall through */
7818     case FUTEX_REQUEUE:
7819     case FUTEX_WAKE_OP:
7820         /*
7821          * For these, the 4th argument is not TIMEOUT, but VAL2.
7822          * But the prototype of do_safe_futex takes a pointer, so
7823          * insert casts to satisfy the compiler.  We do not need
7824          * to tswap VAL2 since it's not compared to guest memory.
7825           */
7826         pts = (struct timespec *)(uintptr_t)timeout;
7827         timeout = 0;
7828         haddr2 = g2h(cpu, uaddr2);
7829         break;
7830     default:
7831         return -TARGET_ENOSYS;
7832     }
7833     if (timeout) {
7834         pts = &ts;
7835         if (time64
7836             ? target_to_host_timespec64(pts, timeout)
7837             : target_to_host_timespec(pts, timeout)) {
7838             return -TARGET_EFAULT;
7839         }
7840     }
7841     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7842 }
7843 #endif
7844 
7845 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7846 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7847                                      abi_long handle, abi_long mount_id,
7848                                      abi_long flags)
7849 {
7850     struct file_handle *target_fh;
7851     struct file_handle *fh;
7852     int mid = 0;
7853     abi_long ret;
7854     char *name;
7855     unsigned int size, total_size;
7856 
7857     if (get_user_s32(size, handle)) {
7858         return -TARGET_EFAULT;
7859     }
7860 
7861     name = lock_user_string(pathname);
7862     if (!name) {
7863         return -TARGET_EFAULT;
7864     }
7865 
7866     total_size = sizeof(struct file_handle) + size;
7867     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7868     if (!target_fh) {
7869         unlock_user(name, pathname, 0);
7870         return -TARGET_EFAULT;
7871     }
7872 
7873     fh = g_malloc0(total_size);
7874     fh->handle_bytes = size;
7875 
7876     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7877     unlock_user(name, pathname, 0);
7878 
7879     /* man name_to_handle_at(2):
7880      * Other than the use of the handle_bytes field, the caller should treat
7881      * the file_handle structure as an opaque data type
7882      */
7883 
7884     memcpy(target_fh, fh, total_size);
7885     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7886     target_fh->handle_type = tswap32(fh->handle_type);
7887     g_free(fh);
7888     unlock_user(target_fh, handle, total_size);
7889 
7890     if (put_user_s32(mid, mount_id)) {
7891         return -TARGET_EFAULT;
7892     }
7893 
7894     return ret;
7895 
7896 }
7897 #endif
7898 
7899 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7900 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7901                                      abi_long flags)
7902 {
7903     struct file_handle *target_fh;
7904     struct file_handle *fh;
7905     unsigned int size, total_size;
7906     abi_long ret;
7907 
7908     if (get_user_s32(size, handle)) {
7909         return -TARGET_EFAULT;
7910     }
7911 
7912     total_size = sizeof(struct file_handle) + size;
7913     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7914     if (!target_fh) {
7915         return -TARGET_EFAULT;
7916     }
7917 
7918     fh = g_memdup(target_fh, total_size);
7919     fh->handle_bytes = size;
7920     fh->handle_type = tswap32(target_fh->handle_type);
7921 
7922     ret = get_errno(open_by_handle_at(mount_fd, fh,
7923                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7924 
7925     g_free(fh);
7926 
7927     unlock_user(target_fh, handle, total_size);
7928 
7929     return ret;
7930 }
7931 #endif
7932 
7933 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7934 
7935 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7936 {
7937     int host_flags;
7938     target_sigset_t *target_mask;
7939     sigset_t host_mask;
7940     abi_long ret;
7941 
7942     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7943         return -TARGET_EINVAL;
7944     }
7945     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7946         return -TARGET_EFAULT;
7947     }
7948 
7949     target_to_host_sigset(&host_mask, target_mask);
7950 
7951     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7952 
7953     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7954     if (ret >= 0) {
7955         fd_trans_register(ret, &target_signalfd_trans);
7956     }
7957 
7958     unlock_user_struct(target_mask, mask, 0);
7959 
7960     return ret;
7961 }
7962 #endif
7963 
7964 /* Map host to target signal numbers for the wait family of syscalls.
7965    Assume all other status bits are the same.  */
7966 int host_to_target_waitstatus(int status)
7967 {
7968     if (WIFSIGNALED(status)) {
7969         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
7970     }
7971     if (WIFSTOPPED(status)) {
7972         return (host_to_target_signal(WSTOPSIG(status)) << 8)
7973                | (status & 0xff);
7974     }
7975     return status;
7976 }
7977 
7978 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
7979 {
7980     CPUState *cpu = env_cpu(cpu_env);
7981     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
7982     int i;
7983 
7984     for (i = 0; i < bprm->argc; i++) {
7985         size_t len = strlen(bprm->argv[i]) + 1;
7986 
7987         if (write(fd, bprm->argv[i], len) != len) {
7988             return -1;
7989         }
7990     }
7991 
7992     return 0;
7993 }
7994 
7995 struct open_self_maps_data {
7996     TaskState *ts;
7997     IntervalTreeRoot *host_maps;
7998     int fd;
7999     bool smaps;
8000 };
8001 
8002 /*
8003  * Subroutine to output one line of /proc/self/maps,
8004  * or one region of /proc/self/smaps.
8005  */
8006 
8007 #ifdef TARGET_HPPA
8008 # define test_stack(S, E, L)  (E == L)
8009 #else
8010 # define test_stack(S, E, L)  (S == L)
8011 #endif
8012 
8013 static void open_self_maps_4(const struct open_self_maps_data *d,
8014                              const MapInfo *mi, abi_ptr start,
8015                              abi_ptr end, unsigned flags)
8016 {
8017     const struct image_info *info = d->ts->info;
8018     const char *path = mi->path;
8019     uint64_t offset;
8020     int fd = d->fd;
8021     int count;
8022 
8023     if (test_stack(start, end, info->stack_limit)) {
8024         path = "[stack]";
8025     } else if (start == info->brk) {
8026         path = "[heap]";
8027     } else if (start == info->vdso) {
8028         path = "[vdso]";
8029 #ifdef TARGET_X86_64
8030     } else if (start == TARGET_VSYSCALL_PAGE) {
8031         path = "[vsyscall]";
8032 #endif
8033     }
8034 
8035     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8036     offset = mi->offset;
8037     if (mi->dev) {
8038         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8039         offset += hstart - mi->itree.start;
8040     }
8041 
8042     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8043                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8044                     start, end,
8045                     (flags & PAGE_READ) ? 'r' : '-',
8046                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8047                     (flags & PAGE_EXEC) ? 'x' : '-',
8048                     mi->is_priv ? 'p' : 's',
8049                     offset, major(mi->dev), minor(mi->dev),
8050                     (uint64_t)mi->inode);
8051     if (path) {
8052         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8053     } else {
8054         dprintf(fd, "\n");
8055     }
8056 
8057     if (d->smaps) {
8058         unsigned long size = end - start;
8059         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8060         unsigned long size_kb = size >> 10;
8061 
8062         dprintf(fd, "Size:                  %lu kB\n"
8063                 "KernelPageSize:        %lu kB\n"
8064                 "MMUPageSize:           %lu kB\n"
8065                 "Rss:                   0 kB\n"
8066                 "Pss:                   0 kB\n"
8067                 "Pss_Dirty:             0 kB\n"
8068                 "Shared_Clean:          0 kB\n"
8069                 "Shared_Dirty:          0 kB\n"
8070                 "Private_Clean:         0 kB\n"
8071                 "Private_Dirty:         0 kB\n"
8072                 "Referenced:            0 kB\n"
8073                 "Anonymous:             %lu kB\n"
8074                 "LazyFree:              0 kB\n"
8075                 "AnonHugePages:         0 kB\n"
8076                 "ShmemPmdMapped:        0 kB\n"
8077                 "FilePmdMapped:         0 kB\n"
8078                 "Shared_Hugetlb:        0 kB\n"
8079                 "Private_Hugetlb:       0 kB\n"
8080                 "Swap:                  0 kB\n"
8081                 "SwapPss:               0 kB\n"
8082                 "Locked:                0 kB\n"
8083                 "THPeligible:    0\n"
8084                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8085                 size_kb, page_size_kb, page_size_kb,
8086                 (flags & PAGE_ANON ? size_kb : 0),
8087                 (flags & PAGE_READ) ? " rd" : "",
8088                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8089                 (flags & PAGE_EXEC) ? " ex" : "",
8090                 mi->is_priv ? "" : " sh",
8091                 (flags & PAGE_READ) ? " mr" : "",
8092                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8093                 (flags & PAGE_EXEC) ? " me" : "",
8094                 mi->is_priv ? "" : " ms");
8095     }
8096 }
8097 
8098 /*
8099  * Callback for walk_memory_regions, when read_self_maps() fails.
8100  * Proceed without the benefit of host /proc/self/maps cross-check.
8101  */
8102 static int open_self_maps_3(void *opaque, target_ulong guest_start,
8103                             target_ulong guest_end, unsigned long flags)
8104 {
8105     static const MapInfo mi = { .is_priv = true };
8106 
8107     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8108     return 0;
8109 }
8110 
8111 /*
8112  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8113  */
8114 static int open_self_maps_2(void *opaque, target_ulong guest_start,
8115                             target_ulong guest_end, unsigned long flags)
8116 {
8117     const struct open_self_maps_data *d = opaque;
8118     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8119     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8120 
8121 #ifdef TARGET_X86_64
8122     /*
8123      * Because of the extremely high position of the page within the guest
8124      * virtual address space, this is not backed by host memory at all.
8125      * Therefore the loop below would fail.  This is the only instance
8126      * of not having host backing memory.
8127      */
8128     if (guest_start == TARGET_VSYSCALL_PAGE) {
8129         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8130     }
8131 #endif
8132 
8133     while (1) {
8134         IntervalTreeNode *n =
8135             interval_tree_iter_first(d->host_maps, host_start, host_start);
8136         MapInfo *mi = container_of(n, MapInfo, itree);
8137         uintptr_t this_hlast = MIN(host_last, n->last);
8138         target_ulong this_gend = h2g(this_hlast) + 1;
8139 
8140         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8141 
8142         if (this_hlast == host_last) {
8143             return 0;
8144         }
8145         host_start = this_hlast + 1;
8146         guest_start = h2g(host_start);
8147     }
8148 }
8149 
8150 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8151 {
8152     struct open_self_maps_data d = {
8153         .ts = get_task_state(env_cpu(env)),
8154         .host_maps = read_self_maps(),
8155         .fd = fd,
8156         .smaps = smaps
8157     };
8158 
8159     if (d.host_maps) {
8160         walk_memory_regions(&d, open_self_maps_2);
8161         free_self_maps(d.host_maps);
8162     } else {
8163         walk_memory_regions(&d, open_self_maps_3);
8164     }
8165     return 0;
8166 }
8167 
8168 static int open_self_maps(CPUArchState *cpu_env, int fd)
8169 {
8170     return open_self_maps_1(cpu_env, fd, false);
8171 }
8172 
8173 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8174 {
8175     return open_self_maps_1(cpu_env, fd, true);
8176 }
8177 
8178 static int open_self_stat(CPUArchState *cpu_env, int fd)
8179 {
8180     CPUState *cpu = env_cpu(cpu_env);
8181     TaskState *ts = get_task_state(cpu);
8182     g_autoptr(GString) buf = g_string_new(NULL);
8183     int i;
8184 
8185     for (i = 0; i < 44; i++) {
8186         if (i == 0) {
8187             /* pid */
8188             g_string_printf(buf, FMT_pid " ", getpid());
8189         } else if (i == 1) {
8190             /* app name */
8191             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8192             bin = bin ? bin + 1 : ts->bprm->argv[0];
8193             g_string_printf(buf, "(%.15s) ", bin);
8194         } else if (i == 2) {
8195             /* task state */
8196             g_string_assign(buf, "R "); /* we are running right now */
8197         } else if (i == 3) {
8198             /* ppid */
8199             g_string_printf(buf, FMT_pid " ", getppid());
8200         } else if (i == 19) {
8201             /* num_threads */
8202             int cpus = 0;
8203             WITH_RCU_READ_LOCK_GUARD() {
8204                 CPUState *cpu_iter;
8205                 CPU_FOREACH(cpu_iter) {
8206                     cpus++;
8207                 }
8208             }
8209             g_string_printf(buf, "%d ", cpus);
8210         } else if (i == 21) {
8211             /* starttime */
8212             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8213         } else if (i == 27) {
8214             /* stack bottom */
8215             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8216         } else {
8217             /* for the rest, there is MasterCard */
8218             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8219         }
8220 
8221         if (write(fd, buf->str, buf->len) != buf->len) {
8222             return -1;
8223         }
8224     }
8225 
8226     return 0;
8227 }
8228 
8229 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8230 {
8231     CPUState *cpu = env_cpu(cpu_env);
8232     TaskState *ts = get_task_state(cpu);
8233     abi_ulong auxv = ts->info->saved_auxv;
8234     abi_ulong len = ts->info->auxv_len;
8235     char *ptr;
8236 
8237     /*
8238      * Auxiliary vector is stored in target process stack.
8239      * read in whole auxv vector and copy it to file
8240      */
8241     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8242     if (ptr != NULL) {
8243         while (len > 0) {
8244             ssize_t r;
8245             r = write(fd, ptr, len);
8246             if (r <= 0) {
8247                 break;
8248             }
8249             len -= r;
8250             ptr += r;
8251         }
8252         lseek(fd, 0, SEEK_SET);
8253         unlock_user(ptr, auxv, len);
8254     }
8255 
8256     return 0;
8257 }
8258 
8259 static int is_proc_myself(const char *filename, const char *entry)
8260 {
8261     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8262         filename += strlen("/proc/");
8263         if (!strncmp(filename, "self/", strlen("self/"))) {
8264             filename += strlen("self/");
8265         } else if (*filename >= '1' && *filename <= '9') {
8266             char myself[80];
8267             snprintf(myself, sizeof(myself), "%d/", getpid());
8268             if (!strncmp(filename, myself, strlen(myself))) {
8269                 filename += strlen(myself);
8270             } else {
8271                 return 0;
8272             }
8273         } else {
8274             return 0;
8275         }
8276         if (!strcmp(filename, entry)) {
8277             return 1;
8278         }
8279     }
8280     return 0;
8281 }
8282 
8283 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8284                       const char *fmt, int code)
8285 {
8286     if (logfile) {
8287         CPUState *cs = env_cpu(env);
8288 
8289         fprintf(logfile, fmt, code);
8290         fprintf(logfile, "Failing executable: %s\n", exec_path);
8291         cpu_dump_state(cs, logfile, 0);
8292         open_self_maps(env, fileno(logfile));
8293     }
8294 }
8295 
8296 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8297 {
8298     /* dump to console */
8299     excp_dump_file(stderr, env, fmt, code);
8300 
8301     /* dump to log file */
8302     if (qemu_log_separate()) {
8303         FILE *logfile = qemu_log_trylock();
8304 
8305         excp_dump_file(logfile, env, fmt, code);
8306         qemu_log_unlock(logfile);
8307     }
8308 }
8309 
8310 #include "target_proc.h"
8311 
8312 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8313     defined(HAVE_ARCH_PROC_CPUINFO) || \
8314     defined(HAVE_ARCH_PROC_HARDWARE)
8315 static int is_proc(const char *filename, const char *entry)
8316 {
8317     return strcmp(filename, entry) == 0;
8318 }
8319 #endif
8320 
8321 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8322 static int open_net_route(CPUArchState *cpu_env, int fd)
8323 {
8324     FILE *fp;
8325     char *line = NULL;
8326     size_t len = 0;
8327     ssize_t read;
8328 
8329     fp = fopen("/proc/net/route", "r");
8330     if (fp == NULL) {
8331         return -1;
8332     }
8333 
8334     /* read header */
8335 
8336     read = getline(&line, &len, fp);
8337     dprintf(fd, "%s", line);
8338 
8339     /* read routes */
8340 
8341     while ((read = getline(&line, &len, fp)) != -1) {
8342         char iface[16];
8343         uint32_t dest, gw, mask;
8344         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8345         int fields;
8346 
8347         fields = sscanf(line,
8348                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8349                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8350                         &mask, &mtu, &window, &irtt);
8351         if (fields != 11) {
8352             continue;
8353         }
8354         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8355                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8356                 metric, tswap32(mask), mtu, window, irtt);
8357     }
8358 
8359     free(line);
8360     fclose(fp);
8361 
8362     return 0;
8363 }
8364 #endif
8365 
8366 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8367                               const char *fname, int flags, mode_t mode,
8368                               int openat2_resolve, bool safe)
8369 {
8370     g_autofree char *proc_name = NULL;
8371     const char *pathname;
8372     struct fake_open {
8373         const char *filename;
8374         int (*fill)(CPUArchState *cpu_env, int fd);
8375         int (*cmp)(const char *s1, const char *s2);
8376     };
8377     const struct fake_open *fake_open;
8378     static const struct fake_open fakes[] = {
8379         { "maps", open_self_maps, is_proc_myself },
8380         { "smaps", open_self_smaps, is_proc_myself },
8381         { "stat", open_self_stat, is_proc_myself },
8382         { "auxv", open_self_auxv, is_proc_myself },
8383         { "cmdline", open_self_cmdline, is_proc_myself },
8384 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8385         { "/proc/net/route", open_net_route, is_proc },
8386 #endif
8387 #if defined(HAVE_ARCH_PROC_CPUINFO)
8388         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8389 #endif
8390 #if defined(HAVE_ARCH_PROC_HARDWARE)
8391         { "/proc/hardware", open_hardware, is_proc },
8392 #endif
8393         { NULL, NULL, NULL }
8394     };
8395 
8396     /* if this is a file from /proc/ filesystem, expand full name */
8397     proc_name = realpath(fname, NULL);
8398     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8399         pathname = proc_name;
8400     } else {
8401         pathname = fname;
8402     }
8403 
8404     if (is_proc_myself(pathname, "exe")) {
8405         /* Honor openat2 resolve flags */
8406         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8407             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8408             errno = ELOOP;
8409             return -1;
8410         }
8411         if (safe) {
8412             return safe_openat(dirfd, exec_path, flags, mode);
8413         } else {
8414             return openat(dirfd, exec_path, flags, mode);
8415         }
8416     }
8417 
8418     for (fake_open = fakes; fake_open->filename; fake_open++) {
8419         if (fake_open->cmp(pathname, fake_open->filename)) {
8420             break;
8421         }
8422     }
8423 
8424     if (fake_open->filename) {
8425         const char *tmpdir;
8426         char filename[PATH_MAX];
8427         int fd, r;
8428 
8429         fd = memfd_create("qemu-open", 0);
8430         if (fd < 0) {
8431             if (errno != ENOSYS) {
8432                 return fd;
8433             }
8434             /* create temporary file to map stat to */
8435             tmpdir = getenv("TMPDIR");
8436             if (!tmpdir)
8437                 tmpdir = "/tmp";
8438             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8439             fd = mkstemp(filename);
8440             if (fd < 0) {
8441                 return fd;
8442             }
8443             unlink(filename);
8444         }
8445 
8446         if ((r = fake_open->fill(cpu_env, fd))) {
8447             int e = errno;
8448             close(fd);
8449             errno = e;
8450             return r;
8451         }
8452         lseek(fd, 0, SEEK_SET);
8453 
8454         return fd;
8455     }
8456 
8457     return -2;
8458 }
8459 
8460 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8461                     int flags, mode_t mode, bool safe)
8462 {
8463     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8464     if (fd > -2) {
8465         return fd;
8466     }
8467 
8468     if (safe) {
8469         return safe_openat(dirfd, path(pathname), flags, mode);
8470     } else {
8471         return openat(dirfd, path(pathname), flags, mode);
8472     }
8473 }
8474 
8475 
8476 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8477                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8478                       abi_ulong guest_size)
8479 {
8480     struct open_how_ver0 how = {0};
8481     char *pathname;
8482     int ret;
8483 
8484     if (guest_size < sizeof(struct target_open_how_ver0)) {
8485         return -TARGET_EINVAL;
8486     }
8487     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8488     if (ret) {
8489         if (ret == -TARGET_E2BIG) {
8490             qemu_log_mask(LOG_UNIMP,
8491                           "Unimplemented openat2 open_how size: "
8492                           TARGET_ABI_FMT_lu "\n", guest_size);
8493         }
8494         return ret;
8495     }
8496     pathname = lock_user_string(guest_pathname);
8497     if (!pathname) {
8498         return -TARGET_EFAULT;
8499     }
8500 
8501     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8502     how.mode = tswap64(how.mode);
8503     how.resolve = tswap64(how.resolve);
8504     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8505                                 how.resolve, true);
8506     if (fd > -2) {
8507         ret = get_errno(fd);
8508     } else {
8509         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8510                                      sizeof(struct open_how_ver0)));
8511     }
8512 
8513     fd_trans_unregister(ret);
8514     unlock_user(pathname, guest_pathname, 0);
8515     return ret;
8516 }
8517 
8518 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8519 {
8520     ssize_t ret;
8521 
8522     if (!pathname || !buf) {
8523         errno = EFAULT;
8524         return -1;
8525     }
8526 
8527     if (!bufsiz) {
8528         /* Short circuit this for the magic exe check. */
8529         errno = EINVAL;
8530         return -1;
8531     }
8532 
8533     if (is_proc_myself((const char *)pathname, "exe")) {
8534         /*
8535          * Don't worry about sign mismatch as earlier mapping
8536          * logic would have thrown a bad address error.
8537          */
8538         ret = MIN(strlen(exec_path), bufsiz);
8539         /* We cannot NUL terminate the string. */
8540         memcpy(buf, exec_path, ret);
8541     } else {
8542         ret = readlink(path(pathname), buf, bufsiz);
8543     }
8544 
8545     return ret;
8546 }
8547 
8548 static int do_execv(CPUArchState *cpu_env, int dirfd,
8549                     abi_long pathname, abi_long guest_argp,
8550                     abi_long guest_envp, int flags, bool is_execveat)
8551 {
8552     int ret;
8553     char **argp, **envp;
8554     int argc, envc;
8555     abi_ulong gp;
8556     abi_ulong addr;
8557     char **q;
8558     void *p;
8559 
8560     argc = 0;
8561 
8562     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8563         if (get_user_ual(addr, gp)) {
8564             return -TARGET_EFAULT;
8565         }
8566         if (!addr) {
8567             break;
8568         }
8569         argc++;
8570     }
8571     envc = 0;
8572     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8573         if (get_user_ual(addr, gp)) {
8574             return -TARGET_EFAULT;
8575         }
8576         if (!addr) {
8577             break;
8578         }
8579         envc++;
8580     }
8581 
8582     argp = g_new0(char *, argc + 1);
8583     envp = g_new0(char *, envc + 1);
8584 
8585     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8586         if (get_user_ual(addr, gp)) {
8587             goto execve_efault;
8588         }
8589         if (!addr) {
8590             break;
8591         }
8592         *q = lock_user_string(addr);
8593         if (!*q) {
8594             goto execve_efault;
8595         }
8596     }
8597     *q = NULL;
8598 
8599     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8600         if (get_user_ual(addr, gp)) {
8601             goto execve_efault;
8602         }
8603         if (!addr) {
8604             break;
8605         }
8606         *q = lock_user_string(addr);
8607         if (!*q) {
8608             goto execve_efault;
8609         }
8610     }
8611     *q = NULL;
8612 
8613     /*
8614      * Although execve() is not an interruptible syscall it is
8615      * a special case where we must use the safe_syscall wrapper:
8616      * if we allow a signal to happen before we make the host
8617      * syscall then we will 'lose' it, because at the point of
8618      * execve the process leaves QEMU's control. So we use the
8619      * safe syscall wrapper to ensure that we either take the
8620      * signal as a guest signal, or else it does not happen
8621      * before the execve completes and makes it the other
8622      * program's problem.
8623      */
8624     p = lock_user_string(pathname);
8625     if (!p) {
8626         goto execve_efault;
8627     }
8628 
8629     const char *exe = p;
8630     if (is_proc_myself(p, "exe")) {
8631         exe = exec_path;
8632     }
8633     ret = is_execveat
8634         ? safe_execveat(dirfd, exe, argp, envp, flags)
8635         : safe_execve(exe, argp, envp);
8636     ret = get_errno(ret);
8637 
8638     unlock_user(p, pathname, 0);
8639 
8640     goto execve_end;
8641 
8642 execve_efault:
8643     ret = -TARGET_EFAULT;
8644 
8645 execve_end:
8646     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8647         if (get_user_ual(addr, gp) || !addr) {
8648             break;
8649         }
8650         unlock_user(*q, addr, 0);
8651     }
8652     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8653         if (get_user_ual(addr, gp) || !addr) {
8654             break;
8655         }
8656         unlock_user(*q, addr, 0);
8657     }
8658 
8659     g_free(argp);
8660     g_free(envp);
8661     return ret;
8662 }
8663 
8664 #define TIMER_MAGIC 0x0caf0000
8665 #define TIMER_MAGIC_MASK 0xffff0000
8666 
8667 /* Convert QEMU provided timer ID back to internal 16bit index format */
8668 static target_timer_t get_timer_id(abi_long arg)
8669 {
8670     target_timer_t timerid = arg;
8671 
8672     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8673         return -TARGET_EINVAL;
8674     }
8675 
8676     timerid &= 0xffff;
8677 
8678     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8679         return -TARGET_EINVAL;
8680     }
8681 
8682     return timerid;
8683 }
8684 
8685 static int target_to_host_cpu_mask(unsigned long *host_mask,
8686                                    size_t host_size,
8687                                    abi_ulong target_addr,
8688                                    size_t target_size)
8689 {
8690     unsigned target_bits = sizeof(abi_ulong) * 8;
8691     unsigned host_bits = sizeof(*host_mask) * 8;
8692     abi_ulong *target_mask;
8693     unsigned i, j;
8694 
8695     assert(host_size >= target_size);
8696 
8697     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8698     if (!target_mask) {
8699         return -TARGET_EFAULT;
8700     }
8701     memset(host_mask, 0, host_size);
8702 
8703     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8704         unsigned bit = i * target_bits;
8705         abi_ulong val;
8706 
8707         __get_user(val, &target_mask[i]);
8708         for (j = 0; j < target_bits; j++, bit++) {
8709             if (val & (1UL << j)) {
8710                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8711             }
8712         }
8713     }
8714 
8715     unlock_user(target_mask, target_addr, 0);
8716     return 0;
8717 }
8718 
8719 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8720                                    size_t host_size,
8721                                    abi_ulong target_addr,
8722                                    size_t target_size)
8723 {
8724     unsigned target_bits = sizeof(abi_ulong) * 8;
8725     unsigned host_bits = sizeof(*host_mask) * 8;
8726     abi_ulong *target_mask;
8727     unsigned i, j;
8728 
8729     assert(host_size >= target_size);
8730 
8731     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8732     if (!target_mask) {
8733         return -TARGET_EFAULT;
8734     }
8735 
8736     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8737         unsigned bit = i * target_bits;
8738         abi_ulong val = 0;
8739 
8740         for (j = 0; j < target_bits; j++, bit++) {
8741             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8742                 val |= 1UL << j;
8743             }
8744         }
8745         __put_user(val, &target_mask[i]);
8746     }
8747 
8748     unlock_user(target_mask, target_addr, target_size);
8749     return 0;
8750 }
8751 
8752 #ifdef TARGET_NR_getdents
8753 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8754 {
8755     g_autofree void *hdirp = NULL;
8756     void *tdirp;
8757     int hlen, hoff, toff;
8758     int hreclen, treclen;
8759     off_t prev_diroff = 0;
8760 
8761     hdirp = g_try_malloc(count);
8762     if (!hdirp) {
8763         return -TARGET_ENOMEM;
8764     }
8765 
8766 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8767     hlen = sys_getdents(dirfd, hdirp, count);
8768 #else
8769     hlen = sys_getdents64(dirfd, hdirp, count);
8770 #endif
8771 
8772     hlen = get_errno(hlen);
8773     if (is_error(hlen)) {
8774         return hlen;
8775     }
8776 
8777     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8778     if (!tdirp) {
8779         return -TARGET_EFAULT;
8780     }
8781 
8782     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8783 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8784         struct linux_dirent *hde = hdirp + hoff;
8785 #else
8786         struct linux_dirent64 *hde = hdirp + hoff;
8787 #endif
8788         struct target_dirent *tde = tdirp + toff;
8789         int namelen;
8790         uint8_t type;
8791 
8792         namelen = strlen(hde->d_name);
8793         hreclen = hde->d_reclen;
8794         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8795         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8796 
8797         if (toff + treclen > count) {
8798             /*
8799              * If the host struct is smaller than the target struct, or
8800              * requires less alignment and thus packs into less space,
8801              * then the host can return more entries than we can pass
8802              * on to the guest.
8803              */
8804             if (toff == 0) {
8805                 toff = -TARGET_EINVAL; /* result buffer is too small */
8806                 break;
8807             }
8808             /*
8809              * Return what we have, resetting the file pointer to the
8810              * location of the first record not returned.
8811              */
8812             lseek(dirfd, prev_diroff, SEEK_SET);
8813             break;
8814         }
8815 
8816         prev_diroff = hde->d_off;
8817         tde->d_ino = tswapal(hde->d_ino);
8818         tde->d_off = tswapal(hde->d_off);
8819         tde->d_reclen = tswap16(treclen);
8820         memcpy(tde->d_name, hde->d_name, namelen + 1);
8821 
8822         /*
8823          * The getdents type is in what was formerly a padding byte at the
8824          * end of the structure.
8825          */
8826 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8827         type = *((uint8_t *)hde + hreclen - 1);
8828 #else
8829         type = hde->d_type;
8830 #endif
8831         *((uint8_t *)tde + treclen - 1) = type;
8832     }
8833 
8834     unlock_user(tdirp, arg2, toff);
8835     return toff;
8836 }
8837 #endif /* TARGET_NR_getdents */
8838 
8839 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8840 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8841 {
8842     g_autofree void *hdirp = NULL;
8843     void *tdirp;
8844     int hlen, hoff, toff;
8845     int hreclen, treclen;
8846     off_t prev_diroff = 0;
8847 
8848     hdirp = g_try_malloc(count);
8849     if (!hdirp) {
8850         return -TARGET_ENOMEM;
8851     }
8852 
8853     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8854     if (is_error(hlen)) {
8855         return hlen;
8856     }
8857 
8858     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8859     if (!tdirp) {
8860         return -TARGET_EFAULT;
8861     }
8862 
8863     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8864         struct linux_dirent64 *hde = hdirp + hoff;
8865         struct target_dirent64 *tde = tdirp + toff;
8866         int namelen;
8867 
8868         namelen = strlen(hde->d_name) + 1;
8869         hreclen = hde->d_reclen;
8870         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8871         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8872 
8873         if (toff + treclen > count) {
8874             /*
8875              * If the host struct is smaller than the target struct, or
8876              * requires less alignment and thus packs into less space,
8877              * then the host can return more entries than we can pass
8878              * on to the guest.
8879              */
8880             if (toff == 0) {
8881                 toff = -TARGET_EINVAL; /* result buffer is too small */
8882                 break;
8883             }
8884             /*
8885              * Return what we have, resetting the file pointer to the
8886              * location of the first record not returned.
8887              */
8888             lseek(dirfd, prev_diroff, SEEK_SET);
8889             break;
8890         }
8891 
8892         prev_diroff = hde->d_off;
8893         tde->d_ino = tswap64(hde->d_ino);
8894         tde->d_off = tswap64(hde->d_off);
8895         tde->d_reclen = tswap16(treclen);
8896         tde->d_type = hde->d_type;
8897         memcpy(tde->d_name, hde->d_name, namelen);
8898     }
8899 
8900     unlock_user(tdirp, arg2, toff);
8901     return toff;
8902 }
8903 #endif /* TARGET_NR_getdents64 */
8904 
8905 #if defined(TARGET_NR_riscv_hwprobe)
8906 
8907 #define RISCV_HWPROBE_KEY_MVENDORID     0
8908 #define RISCV_HWPROBE_KEY_MARCHID       1
8909 #define RISCV_HWPROBE_KEY_MIMPID        2
8910 
8911 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8912 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8913 
8914 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8915 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8916 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8917 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8918 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8919 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8920 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8921 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8922 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8923 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8924 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8925 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8926 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8927 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8928 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8929 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8930 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8931 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8932 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8933 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8934 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8935 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8936 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8937 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8938 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8939 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8940 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8941 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8942 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8943 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8944 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8945 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8946 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1 << 31)
8947 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8948 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8949 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8950 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8951 
8952 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8953 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8954 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
8955 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
8956 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
8957 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
8958 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
8959 
8960 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
8961 
8962 struct riscv_hwprobe {
8963     abi_llong  key;
8964     abi_ullong value;
8965 };
8966 
8967 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
8968                                     struct riscv_hwprobe *pair,
8969                                     size_t pair_count)
8970 {
8971     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
8972 
8973     for (; pair_count > 0; pair_count--, pair++) {
8974         abi_llong key;
8975         abi_ullong value;
8976         __put_user(0, &pair->value);
8977         __get_user(key, &pair->key);
8978         switch (key) {
8979         case RISCV_HWPROBE_KEY_MVENDORID:
8980             __put_user(cfg->mvendorid, &pair->value);
8981             break;
8982         case RISCV_HWPROBE_KEY_MARCHID:
8983             __put_user(cfg->marchid, &pair->value);
8984             break;
8985         case RISCV_HWPROBE_KEY_MIMPID:
8986             __put_user(cfg->mimpid, &pair->value);
8987             break;
8988         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
8989             value = riscv_has_ext(env, RVI) &&
8990                     riscv_has_ext(env, RVM) &&
8991                     riscv_has_ext(env, RVA) ?
8992                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
8993             __put_user(value, &pair->value);
8994             break;
8995         case RISCV_HWPROBE_KEY_IMA_EXT_0:
8996             value = riscv_has_ext(env, RVF) &&
8997                     riscv_has_ext(env, RVD) ?
8998                     RISCV_HWPROBE_IMA_FD : 0;
8999             value |= riscv_has_ext(env, RVC) ?
9000                      RISCV_HWPROBE_IMA_C : 0;
9001             value |= riscv_has_ext(env, RVV) ?
9002                      RISCV_HWPROBE_IMA_V : 0;
9003             value |= cfg->ext_zba ?
9004                      RISCV_HWPROBE_EXT_ZBA : 0;
9005             value |= cfg->ext_zbb ?
9006                      RISCV_HWPROBE_EXT_ZBB : 0;
9007             value |= cfg->ext_zbs ?
9008                      RISCV_HWPROBE_EXT_ZBS : 0;
9009             value |= cfg->ext_zicboz ?
9010                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9011             value |= cfg->ext_zbc ?
9012                      RISCV_HWPROBE_EXT_ZBC : 0;
9013             value |= cfg->ext_zbkb ?
9014                      RISCV_HWPROBE_EXT_ZBKB : 0;
9015             value |= cfg->ext_zbkc ?
9016                      RISCV_HWPROBE_EXT_ZBKC : 0;
9017             value |= cfg->ext_zbkx ?
9018                      RISCV_HWPROBE_EXT_ZBKX : 0;
9019             value |= cfg->ext_zknd ?
9020                      RISCV_HWPROBE_EXT_ZKND : 0;
9021             value |= cfg->ext_zkne ?
9022                      RISCV_HWPROBE_EXT_ZKNE : 0;
9023             value |= cfg->ext_zknh ?
9024                      RISCV_HWPROBE_EXT_ZKNH : 0;
9025             value |= cfg->ext_zksed ?
9026                      RISCV_HWPROBE_EXT_ZKSED : 0;
9027             value |= cfg->ext_zksh ?
9028                      RISCV_HWPROBE_EXT_ZKSH : 0;
9029             value |= cfg->ext_zkt ?
9030                      RISCV_HWPROBE_EXT_ZKT : 0;
9031             value |= cfg->ext_zvbb ?
9032                      RISCV_HWPROBE_EXT_ZVBB : 0;
9033             value |= cfg->ext_zvbc ?
9034                      RISCV_HWPROBE_EXT_ZVBC : 0;
9035             value |= cfg->ext_zvkb ?
9036                      RISCV_HWPROBE_EXT_ZVKB : 0;
9037             value |= cfg->ext_zvkg ?
9038                      RISCV_HWPROBE_EXT_ZVKG : 0;
9039             value |= cfg->ext_zvkned ?
9040                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9041             value |= cfg->ext_zvknha ?
9042                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9043             value |= cfg->ext_zvknhb ?
9044                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9045             value |= cfg->ext_zvksed ?
9046                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9047             value |= cfg->ext_zvksh ?
9048                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9049             value |= cfg->ext_zvkt ?
9050                      RISCV_HWPROBE_EXT_ZVKT : 0;
9051             value |= cfg->ext_zfh ?
9052                      RISCV_HWPROBE_EXT_ZFH : 0;
9053             value |= cfg->ext_zfhmin ?
9054                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9055             value |= cfg->ext_zihintntl ?
9056                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9057             value |= cfg->ext_zvfh ?
9058                      RISCV_HWPROBE_EXT_ZVFH : 0;
9059             value |= cfg->ext_zvfhmin ?
9060                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9061             value |= cfg->ext_zfa ?
9062                      RISCV_HWPROBE_EXT_ZFA : 0;
9063             value |= cfg->ext_ztso ?
9064                      RISCV_HWPROBE_EXT_ZTSO : 0;
9065             value |= cfg->ext_zacas ?
9066                      RISCV_HWPROBE_EXT_ZACAS : 0;
9067             value |= cfg->ext_zicond ?
9068                      RISCV_HWPROBE_EXT_ZICOND : 0;
9069             __put_user(value, &pair->value);
9070             break;
9071         case RISCV_HWPROBE_KEY_CPUPERF_0:
9072             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9073             break;
9074         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9075             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9076             __put_user(value, &pair->value);
9077             break;
9078         default:
9079             __put_user(-1, &pair->key);
9080             break;
9081         }
9082     }
9083 }
9084 
9085 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9086 {
9087     int ret, i, tmp;
9088     size_t host_mask_size, target_mask_size;
9089     unsigned long *host_mask;
9090 
9091     /*
9092      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9093      * arg3 contains the cpu count.
9094      */
9095     tmp = (8 * sizeof(abi_ulong));
9096     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9097     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9098                      ~(sizeof(*host_mask) - 1);
9099 
9100     host_mask = alloca(host_mask_size);
9101 
9102     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9103                                   arg4, target_mask_size);
9104     if (ret != 0) {
9105         return ret;
9106     }
9107 
9108     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9109         if (host_mask[i] != 0) {
9110             return 0;
9111         }
9112     }
9113     return -TARGET_EINVAL;
9114 }
9115 
9116 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9117                                  abi_long arg2, abi_long arg3,
9118                                  abi_long arg4, abi_long arg5)
9119 {
9120     int ret;
9121     struct riscv_hwprobe *host_pairs;
9122 
9123     /* flags must be 0 */
9124     if (arg5 != 0) {
9125         return -TARGET_EINVAL;
9126     }
9127 
9128     /* check cpu_set */
9129     if (arg3 != 0) {
9130         ret = cpu_set_valid(arg3, arg4);
9131         if (ret != 0) {
9132             return ret;
9133         }
9134     } else if (arg4 != 0) {
9135         return -TARGET_EINVAL;
9136     }
9137 
9138     /* no pairs */
9139     if (arg2 == 0) {
9140         return 0;
9141     }
9142 
9143     host_pairs = lock_user(VERIFY_WRITE, arg1,
9144                            sizeof(*host_pairs) * (size_t)arg2, 0);
9145     if (host_pairs == NULL) {
9146         return -TARGET_EFAULT;
9147     }
9148     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9149     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9150     return 0;
9151 }
9152 #endif /* TARGET_NR_riscv_hwprobe */
9153 
9154 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9155 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9156 #endif
9157 
9158 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9159 #define __NR_sys_open_tree __NR_open_tree
9160 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9161           unsigned int, __flags)
9162 #endif
9163 
9164 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9165 #define __NR_sys_move_mount __NR_move_mount
9166 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9167            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9168 #endif
9169 
9170 /* This is an internal helper for do_syscall so that it is easier
9171  * to have a single return point, so that actions, such as logging
9172  * of syscall results, can be performed.
9173  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9174  */
9175 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9176                             abi_long arg2, abi_long arg3, abi_long arg4,
9177                             abi_long arg5, abi_long arg6, abi_long arg7,
9178                             abi_long arg8)
9179 {
9180     CPUState *cpu = env_cpu(cpu_env);
9181     abi_long ret;
9182 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9183     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9184     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9185     || defined(TARGET_NR_statx)
9186     struct stat st;
9187 #endif
9188 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9189     || defined(TARGET_NR_fstatfs)
9190     struct statfs stfs;
9191 #endif
9192     void *p;
9193 
9194     switch(num) {
9195     case TARGET_NR_exit:
9196         /* In old applications this may be used to implement _exit(2).
9197            However in threaded applications it is used for thread termination,
9198            and _exit_group is used for application termination.
9199            Do thread termination if we have more then one thread.  */
9200 
9201         if (block_signals()) {
9202             return -QEMU_ERESTARTSYS;
9203         }
9204 
9205         pthread_mutex_lock(&clone_lock);
9206 
9207         if (CPU_NEXT(first_cpu)) {
9208             TaskState *ts = get_task_state(cpu);
9209 
9210             if (ts->child_tidptr) {
9211                 put_user_u32(0, ts->child_tidptr);
9212                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9213                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9214             }
9215 
9216             object_unparent(OBJECT(cpu));
9217             object_unref(OBJECT(cpu));
9218             /*
9219              * At this point the CPU should be unrealized and removed
9220              * from cpu lists. We can clean-up the rest of the thread
9221              * data without the lock held.
9222              */
9223 
9224             pthread_mutex_unlock(&clone_lock);
9225 
9226             thread_cpu = NULL;
9227             g_free(ts);
9228             rcu_unregister_thread();
9229             pthread_exit(NULL);
9230         }
9231 
9232         pthread_mutex_unlock(&clone_lock);
9233         preexit_cleanup(cpu_env, arg1);
9234         _exit(arg1);
9235         return 0; /* avoid warning */
9236     case TARGET_NR_read:
9237         if (arg2 == 0 && arg3 == 0) {
9238             return get_errno(safe_read(arg1, 0, 0));
9239         } else {
9240             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9241                 return -TARGET_EFAULT;
9242             ret = get_errno(safe_read(arg1, p, arg3));
9243             if (ret >= 0 &&
9244                 fd_trans_host_to_target_data(arg1)) {
9245                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9246             }
9247             unlock_user(p, arg2, ret);
9248         }
9249         return ret;
9250     case TARGET_NR_write:
9251         if (arg2 == 0 && arg3 == 0) {
9252             return get_errno(safe_write(arg1, 0, 0));
9253         }
9254         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9255             return -TARGET_EFAULT;
9256         if (fd_trans_target_to_host_data(arg1)) {
9257             void *copy = g_malloc(arg3);
9258             memcpy(copy, p, arg3);
9259             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9260             if (ret >= 0) {
9261                 ret = get_errno(safe_write(arg1, copy, ret));
9262             }
9263             g_free(copy);
9264         } else {
9265             ret = get_errno(safe_write(arg1, p, arg3));
9266         }
9267         unlock_user(p, arg2, 0);
9268         return ret;
9269 
9270 #ifdef TARGET_NR_open
9271     case TARGET_NR_open:
9272         if (!(p = lock_user_string(arg1)))
9273             return -TARGET_EFAULT;
9274         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9275                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9276                                   arg3, true));
9277         fd_trans_unregister(ret);
9278         unlock_user(p, arg1, 0);
9279         return ret;
9280 #endif
9281     case TARGET_NR_openat:
9282         if (!(p = lock_user_string(arg2)))
9283             return -TARGET_EFAULT;
9284         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9285                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9286                                   arg4, true));
9287         fd_trans_unregister(ret);
9288         unlock_user(p, arg2, 0);
9289         return ret;
9290     case TARGET_NR_openat2:
9291         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9292         return ret;
9293 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9294     case TARGET_NR_name_to_handle_at:
9295         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9296         return ret;
9297 #endif
9298 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9299     case TARGET_NR_open_by_handle_at:
9300         ret = do_open_by_handle_at(arg1, arg2, arg3);
9301         fd_trans_unregister(ret);
9302         return ret;
9303 #endif
9304 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9305     case TARGET_NR_pidfd_open:
9306         return get_errno(pidfd_open(arg1, arg2));
9307 #endif
9308 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9309     case TARGET_NR_pidfd_send_signal:
9310         {
9311             siginfo_t uinfo, *puinfo;
9312 
9313             if (arg3) {
9314                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9315                 if (!p) {
9316                     return -TARGET_EFAULT;
9317                  }
9318                  target_to_host_siginfo(&uinfo, p);
9319                  unlock_user(p, arg3, 0);
9320                  puinfo = &uinfo;
9321             } else {
9322                  puinfo = NULL;
9323             }
9324             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9325                                               puinfo, arg4));
9326         }
9327         return ret;
9328 #endif
9329 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9330     case TARGET_NR_pidfd_getfd:
9331         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9332 #endif
9333     case TARGET_NR_close:
9334         fd_trans_unregister(arg1);
9335         return get_errno(close(arg1));
9336 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9337     case TARGET_NR_close_range:
9338         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9339         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9340             abi_long fd, maxfd;
9341             maxfd = MIN(arg2, target_fd_max);
9342             for (fd = arg1; fd < maxfd; fd++) {
9343                 fd_trans_unregister(fd);
9344             }
9345         }
9346         return ret;
9347 #endif
9348 
9349     case TARGET_NR_brk:
9350         return do_brk(arg1);
9351 #ifdef TARGET_NR_fork
9352     case TARGET_NR_fork:
9353         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9354 #endif
9355 #ifdef TARGET_NR_waitpid
9356     case TARGET_NR_waitpid:
9357         {
9358             int status;
9359             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9360             if (!is_error(ret) && arg2 && ret
9361                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9362                 return -TARGET_EFAULT;
9363         }
9364         return ret;
9365 #endif
9366 #ifdef TARGET_NR_waitid
9367     case TARGET_NR_waitid:
9368         {
9369             struct rusage ru;
9370             siginfo_t info;
9371 
9372             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9373                                         arg4, (arg5 ? &ru : NULL)));
9374             if (!is_error(ret)) {
9375                 if (arg3) {
9376                     p = lock_user(VERIFY_WRITE, arg3,
9377                                   sizeof(target_siginfo_t), 0);
9378                     if (!p) {
9379                         return -TARGET_EFAULT;
9380                     }
9381                     host_to_target_siginfo(p, &info);
9382                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9383                 }
9384                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9385                     return -TARGET_EFAULT;
9386                 }
9387             }
9388         }
9389         return ret;
9390 #endif
9391 #ifdef TARGET_NR_creat /* not on alpha */
9392     case TARGET_NR_creat:
9393         if (!(p = lock_user_string(arg1)))
9394             return -TARGET_EFAULT;
9395         ret = get_errno(creat(p, arg2));
9396         fd_trans_unregister(ret);
9397         unlock_user(p, arg1, 0);
9398         return ret;
9399 #endif
9400 #ifdef TARGET_NR_link
9401     case TARGET_NR_link:
9402         {
9403             void * p2;
9404             p = lock_user_string(arg1);
9405             p2 = lock_user_string(arg2);
9406             if (!p || !p2)
9407                 ret = -TARGET_EFAULT;
9408             else
9409                 ret = get_errno(link(p, p2));
9410             unlock_user(p2, arg2, 0);
9411             unlock_user(p, arg1, 0);
9412         }
9413         return ret;
9414 #endif
9415 #if defined(TARGET_NR_linkat)
9416     case TARGET_NR_linkat:
9417         {
9418             void * p2 = NULL;
9419             if (!arg2 || !arg4)
9420                 return -TARGET_EFAULT;
9421             p  = lock_user_string(arg2);
9422             p2 = lock_user_string(arg4);
9423             if (!p || !p2)
9424                 ret = -TARGET_EFAULT;
9425             else
9426                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9427             unlock_user(p, arg2, 0);
9428             unlock_user(p2, arg4, 0);
9429         }
9430         return ret;
9431 #endif
9432 #ifdef TARGET_NR_unlink
9433     case TARGET_NR_unlink:
9434         if (!(p = lock_user_string(arg1)))
9435             return -TARGET_EFAULT;
9436         ret = get_errno(unlink(p));
9437         unlock_user(p, arg1, 0);
9438         return ret;
9439 #endif
9440 #if defined(TARGET_NR_unlinkat)
9441     case TARGET_NR_unlinkat:
9442         if (!(p = lock_user_string(arg2)))
9443             return -TARGET_EFAULT;
9444         ret = get_errno(unlinkat(arg1, p, arg3));
9445         unlock_user(p, arg2, 0);
9446         return ret;
9447 #endif
9448     case TARGET_NR_execveat:
9449         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9450     case TARGET_NR_execve:
9451         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9452     case TARGET_NR_chdir:
9453         if (!(p = lock_user_string(arg1)))
9454             return -TARGET_EFAULT;
9455         ret = get_errno(chdir(p));
9456         unlock_user(p, arg1, 0);
9457         return ret;
9458 #ifdef TARGET_NR_time
9459     case TARGET_NR_time:
9460         {
9461             time_t host_time;
9462             ret = get_errno(time(&host_time));
9463             if (!is_error(ret)
9464                 && arg1
9465                 && put_user_sal(host_time, arg1))
9466                 return -TARGET_EFAULT;
9467         }
9468         return ret;
9469 #endif
9470 #ifdef TARGET_NR_mknod
9471     case TARGET_NR_mknod:
9472         if (!(p = lock_user_string(arg1)))
9473             return -TARGET_EFAULT;
9474         ret = get_errno(mknod(p, arg2, arg3));
9475         unlock_user(p, arg1, 0);
9476         return ret;
9477 #endif
9478 #if defined(TARGET_NR_mknodat)
9479     case TARGET_NR_mknodat:
9480         if (!(p = lock_user_string(arg2)))
9481             return -TARGET_EFAULT;
9482         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9483         unlock_user(p, arg2, 0);
9484         return ret;
9485 #endif
9486 #ifdef TARGET_NR_chmod
9487     case TARGET_NR_chmod:
9488         if (!(p = lock_user_string(arg1)))
9489             return -TARGET_EFAULT;
9490         ret = get_errno(chmod(p, arg2));
9491         unlock_user(p, arg1, 0);
9492         return ret;
9493 #endif
9494 #ifdef TARGET_NR_lseek
9495     case TARGET_NR_lseek:
9496         return get_errno(lseek(arg1, arg2, arg3));
9497 #endif
9498 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9499     /* Alpha specific */
9500     case TARGET_NR_getxpid:
9501         cpu_env->ir[IR_A4] = getppid();
9502         return get_errno(getpid());
9503 #endif
9504 #ifdef TARGET_NR_getpid
9505     case TARGET_NR_getpid:
9506         return get_errno(getpid());
9507 #endif
9508     case TARGET_NR_mount:
9509         {
9510             /* need to look at the data field */
9511             void *p2, *p3;
9512 
9513             if (arg1) {
9514                 p = lock_user_string(arg1);
9515                 if (!p) {
9516                     return -TARGET_EFAULT;
9517                 }
9518             } else {
9519                 p = NULL;
9520             }
9521 
9522             p2 = lock_user_string(arg2);
9523             if (!p2) {
9524                 if (arg1) {
9525                     unlock_user(p, arg1, 0);
9526                 }
9527                 return -TARGET_EFAULT;
9528             }
9529 
9530             if (arg3) {
9531                 p3 = lock_user_string(arg3);
9532                 if (!p3) {
9533                     if (arg1) {
9534                         unlock_user(p, arg1, 0);
9535                     }
9536                     unlock_user(p2, arg2, 0);
9537                     return -TARGET_EFAULT;
9538                 }
9539             } else {
9540                 p3 = NULL;
9541             }
9542 
9543             /* FIXME - arg5 should be locked, but it isn't clear how to
9544              * do that since it's not guaranteed to be a NULL-terminated
9545              * string.
9546              */
9547             if (!arg5) {
9548                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9549             } else {
9550                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9551             }
9552             ret = get_errno(ret);
9553 
9554             if (arg1) {
9555                 unlock_user(p, arg1, 0);
9556             }
9557             unlock_user(p2, arg2, 0);
9558             if (arg3) {
9559                 unlock_user(p3, arg3, 0);
9560             }
9561         }
9562         return ret;
9563 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9564 #if defined(TARGET_NR_umount)
9565     case TARGET_NR_umount:
9566 #endif
9567 #if defined(TARGET_NR_oldumount)
9568     case TARGET_NR_oldumount:
9569 #endif
9570         if (!(p = lock_user_string(arg1)))
9571             return -TARGET_EFAULT;
9572         ret = get_errno(umount(p));
9573         unlock_user(p, arg1, 0);
9574         return ret;
9575 #endif
9576 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9577     case TARGET_NR_move_mount:
9578         {
9579             void *p2, *p4;
9580 
9581             if (!arg2 || !arg4) {
9582                 return -TARGET_EFAULT;
9583             }
9584 
9585             p2 = lock_user_string(arg2);
9586             if (!p2) {
9587                 return -TARGET_EFAULT;
9588             }
9589 
9590             p4 = lock_user_string(arg4);
9591             if (!p4) {
9592                 unlock_user(p2, arg2, 0);
9593                 return -TARGET_EFAULT;
9594             }
9595             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9596 
9597             unlock_user(p2, arg2, 0);
9598             unlock_user(p4, arg4, 0);
9599 
9600             return ret;
9601         }
9602 #endif
9603 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9604     case TARGET_NR_open_tree:
9605         {
9606             void *p2;
9607             int host_flags;
9608 
9609             if (!arg2) {
9610                 return -TARGET_EFAULT;
9611             }
9612 
9613             p2 = lock_user_string(arg2);
9614             if (!p2) {
9615                 return -TARGET_EFAULT;
9616             }
9617 
9618             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9619             if (arg3 & TARGET_O_CLOEXEC) {
9620                 host_flags |= O_CLOEXEC;
9621             }
9622 
9623             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9624 
9625             unlock_user(p2, arg2, 0);
9626 
9627             return ret;
9628         }
9629 #endif
9630 #ifdef TARGET_NR_stime /* not on alpha */
9631     case TARGET_NR_stime:
9632         {
9633             struct timespec ts;
9634             ts.tv_nsec = 0;
9635             if (get_user_sal(ts.tv_sec, arg1)) {
9636                 return -TARGET_EFAULT;
9637             }
9638             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9639         }
9640 #endif
9641 #ifdef TARGET_NR_alarm /* not on alpha */
9642     case TARGET_NR_alarm:
9643         return alarm(arg1);
9644 #endif
9645 #ifdef TARGET_NR_pause /* not on alpha */
9646     case TARGET_NR_pause:
9647         if (!block_signals()) {
9648             sigsuspend(&get_task_state(cpu)->signal_mask);
9649         }
9650         return -TARGET_EINTR;
9651 #endif
9652 #ifdef TARGET_NR_utime
9653     case TARGET_NR_utime:
9654         {
9655             struct utimbuf tbuf, *host_tbuf;
9656             struct target_utimbuf *target_tbuf;
9657             if (arg2) {
9658                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9659                     return -TARGET_EFAULT;
9660                 tbuf.actime = tswapal(target_tbuf->actime);
9661                 tbuf.modtime = tswapal(target_tbuf->modtime);
9662                 unlock_user_struct(target_tbuf, arg2, 0);
9663                 host_tbuf = &tbuf;
9664             } else {
9665                 host_tbuf = NULL;
9666             }
9667             if (!(p = lock_user_string(arg1)))
9668                 return -TARGET_EFAULT;
9669             ret = get_errno(utime(p, host_tbuf));
9670             unlock_user(p, arg1, 0);
9671         }
9672         return ret;
9673 #endif
9674 #ifdef TARGET_NR_utimes
9675     case TARGET_NR_utimes:
9676         {
9677             struct timeval *tvp, tv[2];
9678             if (arg2) {
9679                 if (copy_from_user_timeval(&tv[0], arg2)
9680                     || copy_from_user_timeval(&tv[1],
9681                                               arg2 + sizeof(struct target_timeval)))
9682                     return -TARGET_EFAULT;
9683                 tvp = tv;
9684             } else {
9685                 tvp = NULL;
9686             }
9687             if (!(p = lock_user_string(arg1)))
9688                 return -TARGET_EFAULT;
9689             ret = get_errno(utimes(p, tvp));
9690             unlock_user(p, arg1, 0);
9691         }
9692         return ret;
9693 #endif
9694 #if defined(TARGET_NR_futimesat)
9695     case TARGET_NR_futimesat:
9696         {
9697             struct timeval *tvp, tv[2];
9698             if (arg3) {
9699                 if (copy_from_user_timeval(&tv[0], arg3)
9700                     || copy_from_user_timeval(&tv[1],
9701                                               arg3 + sizeof(struct target_timeval)))
9702                     return -TARGET_EFAULT;
9703                 tvp = tv;
9704             } else {
9705                 tvp = NULL;
9706             }
9707             if (!(p = lock_user_string(arg2))) {
9708                 return -TARGET_EFAULT;
9709             }
9710             ret = get_errno(futimesat(arg1, path(p), tvp));
9711             unlock_user(p, arg2, 0);
9712         }
9713         return ret;
9714 #endif
9715 #ifdef TARGET_NR_access
9716     case TARGET_NR_access:
9717         if (!(p = lock_user_string(arg1))) {
9718             return -TARGET_EFAULT;
9719         }
9720         ret = get_errno(access(path(p), arg2));
9721         unlock_user(p, arg1, 0);
9722         return ret;
9723 #endif
9724 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9725     case TARGET_NR_faccessat:
9726         if (!(p = lock_user_string(arg2))) {
9727             return -TARGET_EFAULT;
9728         }
9729         ret = get_errno(faccessat(arg1, p, arg3, 0));
9730         unlock_user(p, arg2, 0);
9731         return ret;
9732 #endif
9733 #if defined(TARGET_NR_faccessat2)
9734     case TARGET_NR_faccessat2:
9735         if (!(p = lock_user_string(arg2))) {
9736             return -TARGET_EFAULT;
9737         }
9738         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9739         unlock_user(p, arg2, 0);
9740         return ret;
9741 #endif
9742 #ifdef TARGET_NR_nice /* not on alpha */
9743     case TARGET_NR_nice:
9744         return get_errno(nice(arg1));
9745 #endif
9746     case TARGET_NR_sync:
9747         sync();
9748         return 0;
9749 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9750     case TARGET_NR_syncfs:
9751         return get_errno(syncfs(arg1));
9752 #endif
9753     case TARGET_NR_kill:
9754         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9755 #ifdef TARGET_NR_rename
9756     case TARGET_NR_rename:
9757         {
9758             void *p2;
9759             p = lock_user_string(arg1);
9760             p2 = lock_user_string(arg2);
9761             if (!p || !p2)
9762                 ret = -TARGET_EFAULT;
9763             else
9764                 ret = get_errno(rename(p, p2));
9765             unlock_user(p2, arg2, 0);
9766             unlock_user(p, arg1, 0);
9767         }
9768         return ret;
9769 #endif
9770 #if defined(TARGET_NR_renameat)
9771     case TARGET_NR_renameat:
9772         {
9773             void *p2;
9774             p  = lock_user_string(arg2);
9775             p2 = lock_user_string(arg4);
9776             if (!p || !p2)
9777                 ret = -TARGET_EFAULT;
9778             else
9779                 ret = get_errno(renameat(arg1, p, arg3, p2));
9780             unlock_user(p2, arg4, 0);
9781             unlock_user(p, arg2, 0);
9782         }
9783         return ret;
9784 #endif
9785 #if defined(TARGET_NR_renameat2)
9786     case TARGET_NR_renameat2:
9787         {
9788             void *p2;
9789             p  = lock_user_string(arg2);
9790             p2 = lock_user_string(arg4);
9791             if (!p || !p2) {
9792                 ret = -TARGET_EFAULT;
9793             } else {
9794                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9795             }
9796             unlock_user(p2, arg4, 0);
9797             unlock_user(p, arg2, 0);
9798         }
9799         return ret;
9800 #endif
9801 #ifdef TARGET_NR_mkdir
9802     case TARGET_NR_mkdir:
9803         if (!(p = lock_user_string(arg1)))
9804             return -TARGET_EFAULT;
9805         ret = get_errno(mkdir(p, arg2));
9806         unlock_user(p, arg1, 0);
9807         return ret;
9808 #endif
9809 #if defined(TARGET_NR_mkdirat)
9810     case TARGET_NR_mkdirat:
9811         if (!(p = lock_user_string(arg2)))
9812             return -TARGET_EFAULT;
9813         ret = get_errno(mkdirat(arg1, p, arg3));
9814         unlock_user(p, arg2, 0);
9815         return ret;
9816 #endif
9817 #ifdef TARGET_NR_rmdir
9818     case TARGET_NR_rmdir:
9819         if (!(p = lock_user_string(arg1)))
9820             return -TARGET_EFAULT;
9821         ret = get_errno(rmdir(p));
9822         unlock_user(p, arg1, 0);
9823         return ret;
9824 #endif
9825     case TARGET_NR_dup:
9826         ret = get_errno(dup(arg1));
9827         if (ret >= 0) {
9828             fd_trans_dup(arg1, ret);
9829         }
9830         return ret;
9831 #ifdef TARGET_NR_pipe
9832     case TARGET_NR_pipe:
9833         return do_pipe(cpu_env, arg1, 0, 0);
9834 #endif
9835 #ifdef TARGET_NR_pipe2
9836     case TARGET_NR_pipe2:
9837         return do_pipe(cpu_env, arg1,
9838                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9839 #endif
9840     case TARGET_NR_times:
9841         {
9842             struct target_tms *tmsp;
9843             struct tms tms;
9844             ret = get_errno(times(&tms));
9845             if (arg1) {
9846                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9847                 if (!tmsp)
9848                     return -TARGET_EFAULT;
9849                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9850                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9851                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9852                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9853             }
9854             if (!is_error(ret))
9855                 ret = host_to_target_clock_t(ret);
9856         }
9857         return ret;
9858     case TARGET_NR_acct:
9859         if (arg1 == 0) {
9860             ret = get_errno(acct(NULL));
9861         } else {
9862             if (!(p = lock_user_string(arg1))) {
9863                 return -TARGET_EFAULT;
9864             }
9865             ret = get_errno(acct(path(p)));
9866             unlock_user(p, arg1, 0);
9867         }
9868         return ret;
9869 #ifdef TARGET_NR_umount2
9870     case TARGET_NR_umount2:
9871         if (!(p = lock_user_string(arg1)))
9872             return -TARGET_EFAULT;
9873         ret = get_errno(umount2(p, arg2));
9874         unlock_user(p, arg1, 0);
9875         return ret;
9876 #endif
9877     case TARGET_NR_ioctl:
9878         return do_ioctl(arg1, arg2, arg3);
9879 #ifdef TARGET_NR_fcntl
9880     case TARGET_NR_fcntl:
9881         return do_fcntl(arg1, arg2, arg3);
9882 #endif
9883     case TARGET_NR_setpgid:
9884         return get_errno(setpgid(arg1, arg2));
9885     case TARGET_NR_umask:
9886         return get_errno(umask(arg1));
9887     case TARGET_NR_chroot:
9888         if (!(p = lock_user_string(arg1)))
9889             return -TARGET_EFAULT;
9890         ret = get_errno(chroot(p));
9891         unlock_user(p, arg1, 0);
9892         return ret;
9893 #ifdef TARGET_NR_dup2
9894     case TARGET_NR_dup2:
9895         ret = get_errno(dup2(arg1, arg2));
9896         if (ret >= 0) {
9897             fd_trans_dup(arg1, arg2);
9898         }
9899         return ret;
9900 #endif
9901 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9902     case TARGET_NR_dup3:
9903     {
9904         int host_flags;
9905 
9906         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9907             return -EINVAL;
9908         }
9909         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9910         ret = get_errno(dup3(arg1, arg2, host_flags));
9911         if (ret >= 0) {
9912             fd_trans_dup(arg1, arg2);
9913         }
9914         return ret;
9915     }
9916 #endif
9917 #ifdef TARGET_NR_getppid /* not on alpha */
9918     case TARGET_NR_getppid:
9919         return get_errno(getppid());
9920 #endif
9921 #ifdef TARGET_NR_getpgrp
9922     case TARGET_NR_getpgrp:
9923         return get_errno(getpgrp());
9924 #endif
9925     case TARGET_NR_setsid:
9926         return get_errno(setsid());
9927 #ifdef TARGET_NR_sigaction
9928     case TARGET_NR_sigaction:
9929         {
9930 #if defined(TARGET_MIPS)
9931 	    struct target_sigaction act, oact, *pact, *old_act;
9932 
9933 	    if (arg2) {
9934                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9935                     return -TARGET_EFAULT;
9936 		act._sa_handler = old_act->_sa_handler;
9937 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9938 		act.sa_flags = old_act->sa_flags;
9939 		unlock_user_struct(old_act, arg2, 0);
9940 		pact = &act;
9941 	    } else {
9942 		pact = NULL;
9943 	    }
9944 
9945         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9946 
9947 	    if (!is_error(ret) && arg3) {
9948                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9949                     return -TARGET_EFAULT;
9950 		old_act->_sa_handler = oact._sa_handler;
9951 		old_act->sa_flags = oact.sa_flags;
9952 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9953 		old_act->sa_mask.sig[1] = 0;
9954 		old_act->sa_mask.sig[2] = 0;
9955 		old_act->sa_mask.sig[3] = 0;
9956 		unlock_user_struct(old_act, arg3, 1);
9957 	    }
9958 #else
9959             struct target_old_sigaction *old_act;
9960             struct target_sigaction act, oact, *pact;
9961             if (arg2) {
9962                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9963                     return -TARGET_EFAULT;
9964                 act._sa_handler = old_act->_sa_handler;
9965                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9966                 act.sa_flags = old_act->sa_flags;
9967 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9968                 act.sa_restorer = old_act->sa_restorer;
9969 #endif
9970                 unlock_user_struct(old_act, arg2, 0);
9971                 pact = &act;
9972             } else {
9973                 pact = NULL;
9974             }
9975             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9976             if (!is_error(ret) && arg3) {
9977                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9978                     return -TARGET_EFAULT;
9979                 old_act->_sa_handler = oact._sa_handler;
9980                 old_act->sa_mask = oact.sa_mask.sig[0];
9981                 old_act->sa_flags = oact.sa_flags;
9982 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9983                 old_act->sa_restorer = oact.sa_restorer;
9984 #endif
9985                 unlock_user_struct(old_act, arg3, 1);
9986             }
9987 #endif
9988         }
9989         return ret;
9990 #endif
9991     case TARGET_NR_rt_sigaction:
9992         {
9993             /*
9994              * For Alpha and SPARC this is a 5 argument syscall, with
9995              * a 'restorer' parameter which must be copied into the
9996              * sa_restorer field of the sigaction struct.
9997              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9998              * and arg5 is the sigsetsize.
9999              */
10000 #if defined(TARGET_ALPHA)
10001             target_ulong sigsetsize = arg4;
10002             target_ulong restorer = arg5;
10003 #elif defined(TARGET_SPARC)
10004             target_ulong restorer = arg4;
10005             target_ulong sigsetsize = arg5;
10006 #else
10007             target_ulong sigsetsize = arg4;
10008             target_ulong restorer = 0;
10009 #endif
10010             struct target_sigaction *act = NULL;
10011             struct target_sigaction *oact = NULL;
10012 
10013             if (sigsetsize != sizeof(target_sigset_t)) {
10014                 return -TARGET_EINVAL;
10015             }
10016             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10017                 return -TARGET_EFAULT;
10018             }
10019             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10020                 ret = -TARGET_EFAULT;
10021             } else {
10022                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10023                 if (oact) {
10024                     unlock_user_struct(oact, arg3, 1);
10025                 }
10026             }
10027             if (act) {
10028                 unlock_user_struct(act, arg2, 0);
10029             }
10030         }
10031         return ret;
10032 #ifdef TARGET_NR_sgetmask /* not on alpha */
10033     case TARGET_NR_sgetmask:
10034         {
10035             sigset_t cur_set;
10036             abi_ulong target_set;
10037             ret = do_sigprocmask(0, NULL, &cur_set);
10038             if (!ret) {
10039                 host_to_target_old_sigset(&target_set, &cur_set);
10040                 ret = target_set;
10041             }
10042         }
10043         return ret;
10044 #endif
10045 #ifdef TARGET_NR_ssetmask /* not on alpha */
10046     case TARGET_NR_ssetmask:
10047         {
10048             sigset_t set, oset;
10049             abi_ulong target_set = arg1;
10050             target_to_host_old_sigset(&set, &target_set);
10051             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10052             if (!ret) {
10053                 host_to_target_old_sigset(&target_set, &oset);
10054                 ret = target_set;
10055             }
10056         }
10057         return ret;
10058 #endif
10059 #ifdef TARGET_NR_sigprocmask
10060     case TARGET_NR_sigprocmask:
10061         {
10062 #if defined(TARGET_ALPHA)
10063             sigset_t set, oldset;
10064             abi_ulong mask;
10065             int how;
10066 
10067             switch (arg1) {
10068             case TARGET_SIG_BLOCK:
10069                 how = SIG_BLOCK;
10070                 break;
10071             case TARGET_SIG_UNBLOCK:
10072                 how = SIG_UNBLOCK;
10073                 break;
10074             case TARGET_SIG_SETMASK:
10075                 how = SIG_SETMASK;
10076                 break;
10077             default:
10078                 return -TARGET_EINVAL;
10079             }
10080             mask = arg2;
10081             target_to_host_old_sigset(&set, &mask);
10082 
10083             ret = do_sigprocmask(how, &set, &oldset);
10084             if (!is_error(ret)) {
10085                 host_to_target_old_sigset(&mask, &oldset);
10086                 ret = mask;
10087                 cpu_env->ir[IR_V0] = 0; /* force no error */
10088             }
10089 #else
10090             sigset_t set, oldset, *set_ptr;
10091             int how;
10092 
10093             if (arg2) {
10094                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10095                 if (!p) {
10096                     return -TARGET_EFAULT;
10097                 }
10098                 target_to_host_old_sigset(&set, p);
10099                 unlock_user(p, arg2, 0);
10100                 set_ptr = &set;
10101                 switch (arg1) {
10102                 case TARGET_SIG_BLOCK:
10103                     how = SIG_BLOCK;
10104                     break;
10105                 case TARGET_SIG_UNBLOCK:
10106                     how = SIG_UNBLOCK;
10107                     break;
10108                 case TARGET_SIG_SETMASK:
10109                     how = SIG_SETMASK;
10110                     break;
10111                 default:
10112                     return -TARGET_EINVAL;
10113                 }
10114             } else {
10115                 how = 0;
10116                 set_ptr = NULL;
10117             }
10118             ret = do_sigprocmask(how, set_ptr, &oldset);
10119             if (!is_error(ret) && arg3) {
10120                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10121                     return -TARGET_EFAULT;
10122                 host_to_target_old_sigset(p, &oldset);
10123                 unlock_user(p, arg3, sizeof(target_sigset_t));
10124             }
10125 #endif
10126         }
10127         return ret;
10128 #endif
10129     case TARGET_NR_rt_sigprocmask:
10130         {
10131             int how = arg1;
10132             sigset_t set, oldset, *set_ptr;
10133 
10134             if (arg4 != sizeof(target_sigset_t)) {
10135                 return -TARGET_EINVAL;
10136             }
10137 
10138             if (arg2) {
10139                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10140                 if (!p) {
10141                     return -TARGET_EFAULT;
10142                 }
10143                 target_to_host_sigset(&set, p);
10144                 unlock_user(p, arg2, 0);
10145                 set_ptr = &set;
10146                 switch(how) {
10147                 case TARGET_SIG_BLOCK:
10148                     how = SIG_BLOCK;
10149                     break;
10150                 case TARGET_SIG_UNBLOCK:
10151                     how = SIG_UNBLOCK;
10152                     break;
10153                 case TARGET_SIG_SETMASK:
10154                     how = SIG_SETMASK;
10155                     break;
10156                 default:
10157                     return -TARGET_EINVAL;
10158                 }
10159             } else {
10160                 how = 0;
10161                 set_ptr = NULL;
10162             }
10163             ret = do_sigprocmask(how, set_ptr, &oldset);
10164             if (!is_error(ret) && arg3) {
10165                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10166                     return -TARGET_EFAULT;
10167                 host_to_target_sigset(p, &oldset);
10168                 unlock_user(p, arg3, sizeof(target_sigset_t));
10169             }
10170         }
10171         return ret;
10172 #ifdef TARGET_NR_sigpending
10173     case TARGET_NR_sigpending:
10174         {
10175             sigset_t set;
10176             ret = get_errno(sigpending(&set));
10177             if (!is_error(ret)) {
10178                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10179                     return -TARGET_EFAULT;
10180                 host_to_target_old_sigset(p, &set);
10181                 unlock_user(p, arg1, sizeof(target_sigset_t));
10182             }
10183         }
10184         return ret;
10185 #endif
10186     case TARGET_NR_rt_sigpending:
10187         {
10188             sigset_t set;
10189 
10190             /* Yes, this check is >, not != like most. We follow the kernel's
10191              * logic and it does it like this because it implements
10192              * NR_sigpending through the same code path, and in that case
10193              * the old_sigset_t is smaller in size.
10194              */
10195             if (arg2 > sizeof(target_sigset_t)) {
10196                 return -TARGET_EINVAL;
10197             }
10198 
10199             ret = get_errno(sigpending(&set));
10200             if (!is_error(ret)) {
10201                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10202                     return -TARGET_EFAULT;
10203                 host_to_target_sigset(p, &set);
10204                 unlock_user(p, arg1, sizeof(target_sigset_t));
10205             }
10206         }
10207         return ret;
10208 #ifdef TARGET_NR_sigsuspend
10209     case TARGET_NR_sigsuspend:
10210         {
10211             sigset_t *set;
10212 
10213 #if defined(TARGET_ALPHA)
10214             TaskState *ts = get_task_state(cpu);
10215             /* target_to_host_old_sigset will bswap back */
10216             abi_ulong mask = tswapal(arg1);
10217             set = &ts->sigsuspend_mask;
10218             target_to_host_old_sigset(set, &mask);
10219 #else
10220             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10221             if (ret != 0) {
10222                 return ret;
10223             }
10224 #endif
10225             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10226             finish_sigsuspend_mask(ret);
10227         }
10228         return ret;
10229 #endif
10230     case TARGET_NR_rt_sigsuspend:
10231         {
10232             sigset_t *set;
10233 
10234             ret = process_sigsuspend_mask(&set, arg1, arg2);
10235             if (ret != 0) {
10236                 return ret;
10237             }
10238             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10239             finish_sigsuspend_mask(ret);
10240         }
10241         return ret;
10242 #ifdef TARGET_NR_rt_sigtimedwait
10243     case TARGET_NR_rt_sigtimedwait:
10244         {
10245             sigset_t set;
10246             struct timespec uts, *puts;
10247             siginfo_t uinfo;
10248 
10249             if (arg4 != sizeof(target_sigset_t)) {
10250                 return -TARGET_EINVAL;
10251             }
10252 
10253             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10254                 return -TARGET_EFAULT;
10255             target_to_host_sigset(&set, p);
10256             unlock_user(p, arg1, 0);
10257             if (arg3) {
10258                 puts = &uts;
10259                 if (target_to_host_timespec(puts, arg3)) {
10260                     return -TARGET_EFAULT;
10261                 }
10262             } else {
10263                 puts = NULL;
10264             }
10265             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10266                                                  SIGSET_T_SIZE));
10267             if (!is_error(ret)) {
10268                 if (arg2) {
10269                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10270                                   0);
10271                     if (!p) {
10272                         return -TARGET_EFAULT;
10273                     }
10274                     host_to_target_siginfo(p, &uinfo);
10275                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10276                 }
10277                 ret = host_to_target_signal(ret);
10278             }
10279         }
10280         return ret;
10281 #endif
10282 #ifdef TARGET_NR_rt_sigtimedwait_time64
10283     case TARGET_NR_rt_sigtimedwait_time64:
10284         {
10285             sigset_t set;
10286             struct timespec uts, *puts;
10287             siginfo_t uinfo;
10288 
10289             if (arg4 != sizeof(target_sigset_t)) {
10290                 return -TARGET_EINVAL;
10291             }
10292 
10293             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10294             if (!p) {
10295                 return -TARGET_EFAULT;
10296             }
10297             target_to_host_sigset(&set, p);
10298             unlock_user(p, arg1, 0);
10299             if (arg3) {
10300                 puts = &uts;
10301                 if (target_to_host_timespec64(puts, arg3)) {
10302                     return -TARGET_EFAULT;
10303                 }
10304             } else {
10305                 puts = NULL;
10306             }
10307             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10308                                                  SIGSET_T_SIZE));
10309             if (!is_error(ret)) {
10310                 if (arg2) {
10311                     p = lock_user(VERIFY_WRITE, arg2,
10312                                   sizeof(target_siginfo_t), 0);
10313                     if (!p) {
10314                         return -TARGET_EFAULT;
10315                     }
10316                     host_to_target_siginfo(p, &uinfo);
10317                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10318                 }
10319                 ret = host_to_target_signal(ret);
10320             }
10321         }
10322         return ret;
10323 #endif
10324     case TARGET_NR_rt_sigqueueinfo:
10325         {
10326             siginfo_t uinfo;
10327 
10328             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10329             if (!p) {
10330                 return -TARGET_EFAULT;
10331             }
10332             target_to_host_siginfo(&uinfo, p);
10333             unlock_user(p, arg3, 0);
10334             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10335         }
10336         return ret;
10337     case TARGET_NR_rt_tgsigqueueinfo:
10338         {
10339             siginfo_t uinfo;
10340 
10341             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10342             if (!p) {
10343                 return -TARGET_EFAULT;
10344             }
10345             target_to_host_siginfo(&uinfo, p);
10346             unlock_user(p, arg4, 0);
10347             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10348         }
10349         return ret;
10350 #ifdef TARGET_NR_sigreturn
10351     case TARGET_NR_sigreturn:
10352         if (block_signals()) {
10353             return -QEMU_ERESTARTSYS;
10354         }
10355         return do_sigreturn(cpu_env);
10356 #endif
10357     case TARGET_NR_rt_sigreturn:
10358         if (block_signals()) {
10359             return -QEMU_ERESTARTSYS;
10360         }
10361         return do_rt_sigreturn(cpu_env);
10362     case TARGET_NR_sethostname:
10363         if (!(p = lock_user_string(arg1)))
10364             return -TARGET_EFAULT;
10365         ret = get_errno(sethostname(p, arg2));
10366         unlock_user(p, arg1, 0);
10367         return ret;
10368 #ifdef TARGET_NR_setrlimit
10369     case TARGET_NR_setrlimit:
10370         {
10371             int resource = target_to_host_resource(arg1);
10372             struct target_rlimit *target_rlim;
10373             struct rlimit rlim;
10374             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10375                 return -TARGET_EFAULT;
10376             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10377             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10378             unlock_user_struct(target_rlim, arg2, 0);
10379             /*
10380              * If we just passed through resource limit settings for memory then
10381              * they would also apply to QEMU's own allocations, and QEMU will
10382              * crash or hang or die if its allocations fail. Ideally we would
10383              * track the guest allocations in QEMU and apply the limits ourselves.
10384              * For now, just tell the guest the call succeeded but don't actually
10385              * limit anything.
10386              */
10387             if (resource != RLIMIT_AS &&
10388                 resource != RLIMIT_DATA &&
10389                 resource != RLIMIT_STACK) {
10390                 return get_errno(setrlimit(resource, &rlim));
10391             } else {
10392                 return 0;
10393             }
10394         }
10395 #endif
10396 #ifdef TARGET_NR_getrlimit
10397     case TARGET_NR_getrlimit:
10398         {
10399             int resource = target_to_host_resource(arg1);
10400             struct target_rlimit *target_rlim;
10401             struct rlimit rlim;
10402 
10403             ret = get_errno(getrlimit(resource, &rlim));
10404             if (!is_error(ret)) {
10405                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10406                     return -TARGET_EFAULT;
10407                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10408                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10409                 unlock_user_struct(target_rlim, arg2, 1);
10410             }
10411         }
10412         return ret;
10413 #endif
10414     case TARGET_NR_getrusage:
10415         {
10416             struct rusage rusage;
10417             ret = get_errno(getrusage(arg1, &rusage));
10418             if (!is_error(ret)) {
10419                 ret = host_to_target_rusage(arg2, &rusage);
10420             }
10421         }
10422         return ret;
10423 #if defined(TARGET_NR_gettimeofday)
10424     case TARGET_NR_gettimeofday:
10425         {
10426             struct timeval tv;
10427             struct timezone tz;
10428 
10429             ret = get_errno(gettimeofday(&tv, &tz));
10430             if (!is_error(ret)) {
10431                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10432                     return -TARGET_EFAULT;
10433                 }
10434                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10435                     return -TARGET_EFAULT;
10436                 }
10437             }
10438         }
10439         return ret;
10440 #endif
10441 #if defined(TARGET_NR_settimeofday)
10442     case TARGET_NR_settimeofday:
10443         {
10444             struct timeval tv, *ptv = NULL;
10445             struct timezone tz, *ptz = NULL;
10446 
10447             if (arg1) {
10448                 if (copy_from_user_timeval(&tv, arg1)) {
10449                     return -TARGET_EFAULT;
10450                 }
10451                 ptv = &tv;
10452             }
10453 
10454             if (arg2) {
10455                 if (copy_from_user_timezone(&tz, arg2)) {
10456                     return -TARGET_EFAULT;
10457                 }
10458                 ptz = &tz;
10459             }
10460 
10461             return get_errno(settimeofday(ptv, ptz));
10462         }
10463 #endif
10464 #if defined(TARGET_NR_select)
10465     case TARGET_NR_select:
10466 #if defined(TARGET_WANT_NI_OLD_SELECT)
10467         /* some architectures used to have old_select here
10468          * but now ENOSYS it.
10469          */
10470         ret = -TARGET_ENOSYS;
10471 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10472         ret = do_old_select(arg1);
10473 #else
10474         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10475 #endif
10476         return ret;
10477 #endif
10478 #ifdef TARGET_NR_pselect6
10479     case TARGET_NR_pselect6:
10480         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10481 #endif
10482 #ifdef TARGET_NR_pselect6_time64
10483     case TARGET_NR_pselect6_time64:
10484         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10485 #endif
10486 #ifdef TARGET_NR_symlink
10487     case TARGET_NR_symlink:
10488         {
10489             void *p2;
10490             p = lock_user_string(arg1);
10491             p2 = lock_user_string(arg2);
10492             if (!p || !p2)
10493                 ret = -TARGET_EFAULT;
10494             else
10495                 ret = get_errno(symlink(p, p2));
10496             unlock_user(p2, arg2, 0);
10497             unlock_user(p, arg1, 0);
10498         }
10499         return ret;
10500 #endif
10501 #if defined(TARGET_NR_symlinkat)
10502     case TARGET_NR_symlinkat:
10503         {
10504             void *p2;
10505             p  = lock_user_string(arg1);
10506             p2 = lock_user_string(arg3);
10507             if (!p || !p2)
10508                 ret = -TARGET_EFAULT;
10509             else
10510                 ret = get_errno(symlinkat(p, arg2, p2));
10511             unlock_user(p2, arg3, 0);
10512             unlock_user(p, arg1, 0);
10513         }
10514         return ret;
10515 #endif
10516 #ifdef TARGET_NR_readlink
10517     case TARGET_NR_readlink:
10518         {
10519             void *p2;
10520             p = lock_user_string(arg1);
10521             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10522             ret = get_errno(do_guest_readlink(p, p2, arg3));
10523             unlock_user(p2, arg2, ret);
10524             unlock_user(p, arg1, 0);
10525         }
10526         return ret;
10527 #endif
10528 #if defined(TARGET_NR_readlinkat)
10529     case TARGET_NR_readlinkat:
10530         {
10531             void *p2;
10532             p  = lock_user_string(arg2);
10533             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10534             if (!p || !p2) {
10535                 ret = -TARGET_EFAULT;
10536             } else if (!arg4) {
10537                 /* Short circuit this for the magic exe check. */
10538                 ret = -TARGET_EINVAL;
10539             } else if (is_proc_myself((const char *)p, "exe")) {
10540                 /*
10541                  * Don't worry about sign mismatch as earlier mapping
10542                  * logic would have thrown a bad address error.
10543                  */
10544                 ret = MIN(strlen(exec_path), arg4);
10545                 /* We cannot NUL terminate the string. */
10546                 memcpy(p2, exec_path, ret);
10547             } else {
10548                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10549             }
10550             unlock_user(p2, arg3, ret);
10551             unlock_user(p, arg2, 0);
10552         }
10553         return ret;
10554 #endif
10555 #ifdef TARGET_NR_swapon
10556     case TARGET_NR_swapon:
10557         if (!(p = lock_user_string(arg1)))
10558             return -TARGET_EFAULT;
10559         ret = get_errno(swapon(p, arg2));
10560         unlock_user(p, arg1, 0);
10561         return ret;
10562 #endif
10563     case TARGET_NR_reboot:
10564         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10565            /* arg4 must be ignored in all other cases */
10566            p = lock_user_string(arg4);
10567            if (!p) {
10568                return -TARGET_EFAULT;
10569            }
10570            ret = get_errno(reboot(arg1, arg2, arg3, p));
10571            unlock_user(p, arg4, 0);
10572         } else {
10573            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10574         }
10575         return ret;
10576 #ifdef TARGET_NR_mmap
10577     case TARGET_NR_mmap:
10578 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10579     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10580     defined(TARGET_M68K) || defined(TARGET_MICROBLAZE) \
10581     || defined(TARGET_S390X)
10582         {
10583             abi_ulong *v;
10584             abi_ulong v1, v2, v3, v4, v5, v6;
10585             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10586                 return -TARGET_EFAULT;
10587             v1 = tswapal(v[0]);
10588             v2 = tswapal(v[1]);
10589             v3 = tswapal(v[2]);
10590             v4 = tswapal(v[3]);
10591             v5 = tswapal(v[4]);
10592             v6 = tswapal(v[5]);
10593             unlock_user(v, arg1, 0);
10594             return do_mmap(v1, v2, v3, v4, v5, v6);
10595         }
10596 #else
10597         /* mmap pointers are always untagged */
10598         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10599 #endif
10600 #endif
10601 #ifdef TARGET_NR_mmap2
10602     case TARGET_NR_mmap2:
10603 #ifndef MMAP_SHIFT
10604 #define MMAP_SHIFT 12
10605 #endif
10606         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10607                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10608 #endif
10609     case TARGET_NR_munmap:
10610         arg1 = cpu_untagged_addr(cpu, arg1);
10611         return get_errno(target_munmap(arg1, arg2));
10612     case TARGET_NR_mprotect:
10613         arg1 = cpu_untagged_addr(cpu, arg1);
10614         {
10615             TaskState *ts = get_task_state(cpu);
10616             /* Special hack to detect libc making the stack executable.  */
10617             if ((arg3 & PROT_GROWSDOWN)
10618                 && arg1 >= ts->info->stack_limit
10619                 && arg1 <= ts->info->start_stack) {
10620                 arg3 &= ~PROT_GROWSDOWN;
10621                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10622                 arg1 = ts->info->stack_limit;
10623             }
10624         }
10625         return get_errno(target_mprotect(arg1, arg2, arg3));
10626 #ifdef TARGET_NR_mremap
10627     case TARGET_NR_mremap:
10628         arg1 = cpu_untagged_addr(cpu, arg1);
10629         /* mremap new_addr (arg5) is always untagged */
10630         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10631 #endif
10632         /* ??? msync/mlock/munlock are broken for softmmu.  */
10633 #ifdef TARGET_NR_msync
10634     case TARGET_NR_msync:
10635         return get_errno(msync(g2h(cpu, arg1), arg2,
10636                                target_to_host_msync_arg(arg3)));
10637 #endif
10638 #ifdef TARGET_NR_mlock
10639     case TARGET_NR_mlock:
10640         return get_errno(mlock(g2h(cpu, arg1), arg2));
10641 #endif
10642 #ifdef TARGET_NR_munlock
10643     case TARGET_NR_munlock:
10644         return get_errno(munlock(g2h(cpu, arg1), arg2));
10645 #endif
10646 #ifdef TARGET_NR_mlockall
10647     case TARGET_NR_mlockall:
10648         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10649 #endif
10650 #ifdef TARGET_NR_munlockall
10651     case TARGET_NR_munlockall:
10652         return get_errno(munlockall());
10653 #endif
10654 #ifdef TARGET_NR_truncate
10655     case TARGET_NR_truncate:
10656         if (!(p = lock_user_string(arg1)))
10657             return -TARGET_EFAULT;
10658         ret = get_errno(truncate(p, arg2));
10659         unlock_user(p, arg1, 0);
10660         return ret;
10661 #endif
10662 #ifdef TARGET_NR_ftruncate
10663     case TARGET_NR_ftruncate:
10664         return get_errno(ftruncate(arg1, arg2));
10665 #endif
10666     case TARGET_NR_fchmod:
10667         return get_errno(fchmod(arg1, arg2));
10668 #if defined(TARGET_NR_fchmodat)
10669     case TARGET_NR_fchmodat:
10670         if (!(p = lock_user_string(arg2)))
10671             return -TARGET_EFAULT;
10672         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10673         unlock_user(p, arg2, 0);
10674         return ret;
10675 #endif
10676     case TARGET_NR_getpriority:
10677         /* Note that negative values are valid for getpriority, so we must
10678            differentiate based on errno settings.  */
10679         errno = 0;
10680         ret = getpriority(arg1, arg2);
10681         if (ret == -1 && errno != 0) {
10682             return -host_to_target_errno(errno);
10683         }
10684 #ifdef TARGET_ALPHA
10685         /* Return value is the unbiased priority.  Signal no error.  */
10686         cpu_env->ir[IR_V0] = 0;
10687 #else
10688         /* Return value is a biased priority to avoid negative numbers.  */
10689         ret = 20 - ret;
10690 #endif
10691         return ret;
10692     case TARGET_NR_setpriority:
10693         return get_errno(setpriority(arg1, arg2, arg3));
10694 #ifdef TARGET_NR_statfs
10695     case TARGET_NR_statfs:
10696         if (!(p = lock_user_string(arg1))) {
10697             return -TARGET_EFAULT;
10698         }
10699         ret = get_errno(statfs(path(p), &stfs));
10700         unlock_user(p, arg1, 0);
10701     convert_statfs:
10702         if (!is_error(ret)) {
10703             struct target_statfs *target_stfs;
10704 
10705             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10706                 return -TARGET_EFAULT;
10707             __put_user(stfs.f_type, &target_stfs->f_type);
10708             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10709             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10710             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10711             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10712             __put_user(stfs.f_files, &target_stfs->f_files);
10713             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10714             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10715             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10716             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10717             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10718 #ifdef _STATFS_F_FLAGS
10719             __put_user(stfs.f_flags, &target_stfs->f_flags);
10720 #else
10721             __put_user(0, &target_stfs->f_flags);
10722 #endif
10723             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10724             unlock_user_struct(target_stfs, arg2, 1);
10725         }
10726         return ret;
10727 #endif
10728 #ifdef TARGET_NR_fstatfs
10729     case TARGET_NR_fstatfs:
10730         ret = get_errno(fstatfs(arg1, &stfs));
10731         goto convert_statfs;
10732 #endif
10733 #ifdef TARGET_NR_statfs64
10734     case TARGET_NR_statfs64:
10735         if (!(p = lock_user_string(arg1))) {
10736             return -TARGET_EFAULT;
10737         }
10738         ret = get_errno(statfs(path(p), &stfs));
10739         unlock_user(p, arg1, 0);
10740     convert_statfs64:
10741         if (!is_error(ret)) {
10742             struct target_statfs64 *target_stfs;
10743 
10744             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10745                 return -TARGET_EFAULT;
10746             __put_user(stfs.f_type, &target_stfs->f_type);
10747             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10748             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10749             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10750             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10751             __put_user(stfs.f_files, &target_stfs->f_files);
10752             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10753             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10754             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10755             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10756             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10757 #ifdef _STATFS_F_FLAGS
10758             __put_user(stfs.f_flags, &target_stfs->f_flags);
10759 #else
10760             __put_user(0, &target_stfs->f_flags);
10761 #endif
10762             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10763             unlock_user_struct(target_stfs, arg3, 1);
10764         }
10765         return ret;
10766     case TARGET_NR_fstatfs64:
10767         ret = get_errno(fstatfs(arg1, &stfs));
10768         goto convert_statfs64;
10769 #endif
10770 #ifdef TARGET_NR_socketcall
10771     case TARGET_NR_socketcall:
10772         return do_socketcall(arg1, arg2);
10773 #endif
10774 #ifdef TARGET_NR_accept
10775     case TARGET_NR_accept:
10776         return do_accept4(arg1, arg2, arg3, 0);
10777 #endif
10778 #ifdef TARGET_NR_accept4
10779     case TARGET_NR_accept4:
10780         return do_accept4(arg1, arg2, arg3, arg4);
10781 #endif
10782 #ifdef TARGET_NR_bind
10783     case TARGET_NR_bind:
10784         return do_bind(arg1, arg2, arg3);
10785 #endif
10786 #ifdef TARGET_NR_connect
10787     case TARGET_NR_connect:
10788         return do_connect(arg1, arg2, arg3);
10789 #endif
10790 #ifdef TARGET_NR_getpeername
10791     case TARGET_NR_getpeername:
10792         return do_getpeername(arg1, arg2, arg3);
10793 #endif
10794 #ifdef TARGET_NR_getsockname
10795     case TARGET_NR_getsockname:
10796         return do_getsockname(arg1, arg2, arg3);
10797 #endif
10798 #ifdef TARGET_NR_getsockopt
10799     case TARGET_NR_getsockopt:
10800         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10801 #endif
10802 #ifdef TARGET_NR_listen
10803     case TARGET_NR_listen:
10804         return get_errno(listen(arg1, arg2));
10805 #endif
10806 #ifdef TARGET_NR_recv
10807     case TARGET_NR_recv:
10808         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10809 #endif
10810 #ifdef TARGET_NR_recvfrom
10811     case TARGET_NR_recvfrom:
10812         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10813 #endif
10814 #ifdef TARGET_NR_recvmsg
10815     case TARGET_NR_recvmsg:
10816         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10817 #endif
10818 #ifdef TARGET_NR_send
10819     case TARGET_NR_send:
10820         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10821 #endif
10822 #ifdef TARGET_NR_sendmsg
10823     case TARGET_NR_sendmsg:
10824         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10825 #endif
10826 #ifdef TARGET_NR_sendmmsg
10827     case TARGET_NR_sendmmsg:
10828         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10829 #endif
10830 #ifdef TARGET_NR_recvmmsg
10831     case TARGET_NR_recvmmsg:
10832         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10833 #endif
10834 #ifdef TARGET_NR_sendto
10835     case TARGET_NR_sendto:
10836         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10837 #endif
10838 #ifdef TARGET_NR_shutdown
10839     case TARGET_NR_shutdown:
10840         return get_errno(shutdown(arg1, arg2));
10841 #endif
10842 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10843     case TARGET_NR_getrandom:
10844         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10845         if (!p) {
10846             return -TARGET_EFAULT;
10847         }
10848         ret = get_errno(getrandom(p, arg2, arg3));
10849         unlock_user(p, arg1, ret);
10850         return ret;
10851 #endif
10852 #ifdef TARGET_NR_socket
10853     case TARGET_NR_socket:
10854         return do_socket(arg1, arg2, arg3);
10855 #endif
10856 #ifdef TARGET_NR_socketpair
10857     case TARGET_NR_socketpair:
10858         return do_socketpair(arg1, arg2, arg3, arg4);
10859 #endif
10860 #ifdef TARGET_NR_setsockopt
10861     case TARGET_NR_setsockopt:
10862         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10863 #endif
10864 #if defined(TARGET_NR_syslog)
10865     case TARGET_NR_syslog:
10866         {
10867             int len = arg2;
10868 
10869             switch (arg1) {
10870             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10871             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10872             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10873             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10874             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10875             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10876             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10877             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10878                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10879             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10880             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10881             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10882                 {
10883                     if (len < 0) {
10884                         return -TARGET_EINVAL;
10885                     }
10886                     if (len == 0) {
10887                         return 0;
10888                     }
10889                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10890                     if (!p) {
10891                         return -TARGET_EFAULT;
10892                     }
10893                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10894                     unlock_user(p, arg2, arg3);
10895                 }
10896                 return ret;
10897             default:
10898                 return -TARGET_EINVAL;
10899             }
10900         }
10901         break;
10902 #endif
10903     case TARGET_NR_setitimer:
10904         {
10905             struct itimerval value, ovalue, *pvalue;
10906 
10907             if (arg2) {
10908                 pvalue = &value;
10909                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10910                     || copy_from_user_timeval(&pvalue->it_value,
10911                                               arg2 + sizeof(struct target_timeval)))
10912                     return -TARGET_EFAULT;
10913             } else {
10914                 pvalue = NULL;
10915             }
10916             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10917             if (!is_error(ret) && arg3) {
10918                 if (copy_to_user_timeval(arg3,
10919                                          &ovalue.it_interval)
10920                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10921                                             &ovalue.it_value))
10922                     return -TARGET_EFAULT;
10923             }
10924         }
10925         return ret;
10926     case TARGET_NR_getitimer:
10927         {
10928             struct itimerval value;
10929 
10930             ret = get_errno(getitimer(arg1, &value));
10931             if (!is_error(ret) && arg2) {
10932                 if (copy_to_user_timeval(arg2,
10933                                          &value.it_interval)
10934                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10935                                             &value.it_value))
10936                     return -TARGET_EFAULT;
10937             }
10938         }
10939         return ret;
10940 #ifdef TARGET_NR_stat
10941     case TARGET_NR_stat:
10942         if (!(p = lock_user_string(arg1))) {
10943             return -TARGET_EFAULT;
10944         }
10945         ret = get_errno(stat(path(p), &st));
10946         unlock_user(p, arg1, 0);
10947         goto do_stat;
10948 #endif
10949 #ifdef TARGET_NR_lstat
10950     case TARGET_NR_lstat:
10951         if (!(p = lock_user_string(arg1))) {
10952             return -TARGET_EFAULT;
10953         }
10954         ret = get_errno(lstat(path(p), &st));
10955         unlock_user(p, arg1, 0);
10956         goto do_stat;
10957 #endif
10958 #ifdef TARGET_NR_fstat
10959     case TARGET_NR_fstat:
10960         {
10961             ret = get_errno(fstat(arg1, &st));
10962 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10963         do_stat:
10964 #endif
10965             if (!is_error(ret)) {
10966                 struct target_stat *target_st;
10967 
10968                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10969                     return -TARGET_EFAULT;
10970                 memset(target_st, 0, sizeof(*target_st));
10971                 __put_user(st.st_dev, &target_st->st_dev);
10972                 __put_user(st.st_ino, &target_st->st_ino);
10973                 __put_user(st.st_mode, &target_st->st_mode);
10974                 __put_user(st.st_uid, &target_st->st_uid);
10975                 __put_user(st.st_gid, &target_st->st_gid);
10976                 __put_user(st.st_nlink, &target_st->st_nlink);
10977                 __put_user(st.st_rdev, &target_st->st_rdev);
10978                 __put_user(st.st_size, &target_st->st_size);
10979                 __put_user(st.st_blksize, &target_st->st_blksize);
10980                 __put_user(st.st_blocks, &target_st->st_blocks);
10981                 __put_user(st.st_atime, &target_st->target_st_atime);
10982                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10983                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10984 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10985                 __put_user(st.st_atim.tv_nsec,
10986                            &target_st->target_st_atime_nsec);
10987                 __put_user(st.st_mtim.tv_nsec,
10988                            &target_st->target_st_mtime_nsec);
10989                 __put_user(st.st_ctim.tv_nsec,
10990                            &target_st->target_st_ctime_nsec);
10991 #endif
10992                 unlock_user_struct(target_st, arg2, 1);
10993             }
10994         }
10995         return ret;
10996 #endif
10997     case TARGET_NR_vhangup:
10998         return get_errno(vhangup());
10999 #ifdef TARGET_NR_syscall
11000     case TARGET_NR_syscall:
11001         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11002                           arg6, arg7, arg8, 0);
11003 #endif
11004 #if defined(TARGET_NR_wait4)
11005     case TARGET_NR_wait4:
11006         {
11007             int status;
11008             abi_long status_ptr = arg2;
11009             struct rusage rusage, *rusage_ptr;
11010             abi_ulong target_rusage = arg4;
11011             abi_long rusage_err;
11012             if (target_rusage)
11013                 rusage_ptr = &rusage;
11014             else
11015                 rusage_ptr = NULL;
11016             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11017             if (!is_error(ret)) {
11018                 if (status_ptr && ret) {
11019                     status = host_to_target_waitstatus(status);
11020                     if (put_user_s32(status, status_ptr))
11021                         return -TARGET_EFAULT;
11022                 }
11023                 if (target_rusage) {
11024                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11025                     if (rusage_err) {
11026                         ret = rusage_err;
11027                     }
11028                 }
11029             }
11030         }
11031         return ret;
11032 #endif
11033 #ifdef TARGET_NR_swapoff
11034     case TARGET_NR_swapoff:
11035         if (!(p = lock_user_string(arg1)))
11036             return -TARGET_EFAULT;
11037         ret = get_errno(swapoff(p));
11038         unlock_user(p, arg1, 0);
11039         return ret;
11040 #endif
11041     case TARGET_NR_sysinfo:
11042         {
11043             struct target_sysinfo *target_value;
11044             struct sysinfo value;
11045             ret = get_errno(sysinfo(&value));
11046             if (!is_error(ret) && arg1)
11047             {
11048                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11049                     return -TARGET_EFAULT;
11050                 __put_user(value.uptime, &target_value->uptime);
11051                 __put_user(value.loads[0], &target_value->loads[0]);
11052                 __put_user(value.loads[1], &target_value->loads[1]);
11053                 __put_user(value.loads[2], &target_value->loads[2]);
11054                 __put_user(value.totalram, &target_value->totalram);
11055                 __put_user(value.freeram, &target_value->freeram);
11056                 __put_user(value.sharedram, &target_value->sharedram);
11057                 __put_user(value.bufferram, &target_value->bufferram);
11058                 __put_user(value.totalswap, &target_value->totalswap);
11059                 __put_user(value.freeswap, &target_value->freeswap);
11060                 __put_user(value.procs, &target_value->procs);
11061                 __put_user(value.totalhigh, &target_value->totalhigh);
11062                 __put_user(value.freehigh, &target_value->freehigh);
11063                 __put_user(value.mem_unit, &target_value->mem_unit);
11064                 unlock_user_struct(target_value, arg1, 1);
11065             }
11066         }
11067         return ret;
11068 #ifdef TARGET_NR_ipc
11069     case TARGET_NR_ipc:
11070         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11071 #endif
11072 #ifdef TARGET_NR_semget
11073     case TARGET_NR_semget:
11074         return get_errno(semget(arg1, arg2, arg3));
11075 #endif
11076 #ifdef TARGET_NR_semop
11077     case TARGET_NR_semop:
11078         return do_semtimedop(arg1, arg2, arg3, 0, false);
11079 #endif
11080 #ifdef TARGET_NR_semtimedop
11081     case TARGET_NR_semtimedop:
11082         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11083 #endif
11084 #ifdef TARGET_NR_semtimedop_time64
11085     case TARGET_NR_semtimedop_time64:
11086         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11087 #endif
11088 #ifdef TARGET_NR_semctl
11089     case TARGET_NR_semctl:
11090         return do_semctl(arg1, arg2, arg3, arg4);
11091 #endif
11092 #ifdef TARGET_NR_msgctl
11093     case TARGET_NR_msgctl:
11094         return do_msgctl(arg1, arg2, arg3);
11095 #endif
11096 #ifdef TARGET_NR_msgget
11097     case TARGET_NR_msgget:
11098         return get_errno(msgget(arg1, arg2));
11099 #endif
11100 #ifdef TARGET_NR_msgrcv
11101     case TARGET_NR_msgrcv:
11102         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11103 #endif
11104 #ifdef TARGET_NR_msgsnd
11105     case TARGET_NR_msgsnd:
11106         return do_msgsnd(arg1, arg2, arg3, arg4);
11107 #endif
11108 #ifdef TARGET_NR_shmget
11109     case TARGET_NR_shmget:
11110         return get_errno(shmget(arg1, arg2, arg3));
11111 #endif
11112 #ifdef TARGET_NR_shmctl
11113     case TARGET_NR_shmctl:
11114         return do_shmctl(arg1, arg2, arg3);
11115 #endif
11116 #ifdef TARGET_NR_shmat
11117     case TARGET_NR_shmat:
11118         return target_shmat(cpu_env, arg1, arg2, arg3);
11119 #endif
11120 #ifdef TARGET_NR_shmdt
11121     case TARGET_NR_shmdt:
11122         return target_shmdt(arg1);
11123 #endif
11124     case TARGET_NR_fsync:
11125         return get_errno(fsync(arg1));
11126     case TARGET_NR_clone:
11127         /* Linux manages to have three different orderings for its
11128          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11129          * match the kernel's CONFIG_CLONE_* settings.
11130          * Microblaze is further special in that it uses a sixth
11131          * implicit argument to clone for the TLS pointer.
11132          */
11133 #if defined(TARGET_MICROBLAZE)
11134         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11135 #elif defined(TARGET_CLONE_BACKWARDS)
11136         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11137 #elif defined(TARGET_CLONE_BACKWARDS2)
11138         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11139 #else
11140         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11141 #endif
11142         return ret;
11143 #ifdef __NR_exit_group
11144         /* new thread calls */
11145     case TARGET_NR_exit_group:
11146         preexit_cleanup(cpu_env, arg1);
11147         return get_errno(exit_group(arg1));
11148 #endif
11149     case TARGET_NR_setdomainname:
11150         if (!(p = lock_user_string(arg1)))
11151             return -TARGET_EFAULT;
11152         ret = get_errno(setdomainname(p, arg2));
11153         unlock_user(p, arg1, 0);
11154         return ret;
11155     case TARGET_NR_uname:
11156         /* no need to transcode because we use the linux syscall */
11157         {
11158             struct new_utsname * buf;
11159 
11160             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11161                 return -TARGET_EFAULT;
11162             ret = get_errno(sys_uname(buf));
11163             if (!is_error(ret)) {
11164                 /* Overwrite the native machine name with whatever is being
11165                    emulated. */
11166                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11167                           sizeof(buf->machine));
11168                 /* Allow the user to override the reported release.  */
11169                 if (qemu_uname_release && *qemu_uname_release) {
11170                     g_strlcpy(buf->release, qemu_uname_release,
11171                               sizeof(buf->release));
11172                 }
11173             }
11174             unlock_user_struct(buf, arg1, 1);
11175         }
11176         return ret;
11177 #ifdef TARGET_I386
11178     case TARGET_NR_modify_ldt:
11179         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11180 #if !defined(TARGET_X86_64)
11181     case TARGET_NR_vm86:
11182         return do_vm86(cpu_env, arg1, arg2);
11183 #endif
11184 #endif
11185 #if defined(TARGET_NR_adjtimex)
11186     case TARGET_NR_adjtimex:
11187         {
11188             struct timex host_buf;
11189 
11190             if (target_to_host_timex(&host_buf, arg1) != 0) {
11191                 return -TARGET_EFAULT;
11192             }
11193             ret = get_errno(adjtimex(&host_buf));
11194             if (!is_error(ret)) {
11195                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11196                     return -TARGET_EFAULT;
11197                 }
11198             }
11199         }
11200         return ret;
11201 #endif
11202 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11203     case TARGET_NR_clock_adjtime:
11204         {
11205             struct timex htx;
11206 
11207             if (target_to_host_timex(&htx, arg2) != 0) {
11208                 return -TARGET_EFAULT;
11209             }
11210             ret = get_errno(clock_adjtime(arg1, &htx));
11211             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11212                 return -TARGET_EFAULT;
11213             }
11214         }
11215         return ret;
11216 #endif
11217 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11218     case TARGET_NR_clock_adjtime64:
11219         {
11220             struct timex htx;
11221 
11222             if (target_to_host_timex64(&htx, arg2) != 0) {
11223                 return -TARGET_EFAULT;
11224             }
11225             ret = get_errno(clock_adjtime(arg1, &htx));
11226             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11227                     return -TARGET_EFAULT;
11228             }
11229         }
11230         return ret;
11231 #endif
11232     case TARGET_NR_getpgid:
11233         return get_errno(getpgid(arg1));
11234     case TARGET_NR_fchdir:
11235         return get_errno(fchdir(arg1));
11236     case TARGET_NR_personality:
11237         return get_errno(personality(arg1));
11238 #ifdef TARGET_NR__llseek /* Not on alpha */
11239     case TARGET_NR__llseek:
11240         {
11241             int64_t res;
11242 #if !defined(__NR_llseek)
11243             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11244             if (res == -1) {
11245                 ret = get_errno(res);
11246             } else {
11247                 ret = 0;
11248             }
11249 #else
11250             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11251 #endif
11252             if ((ret == 0) && put_user_s64(res, arg4)) {
11253                 return -TARGET_EFAULT;
11254             }
11255         }
11256         return ret;
11257 #endif
11258 #ifdef TARGET_NR_getdents
11259     case TARGET_NR_getdents:
11260         return do_getdents(arg1, arg2, arg3);
11261 #endif /* TARGET_NR_getdents */
11262 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11263     case TARGET_NR_getdents64:
11264         return do_getdents64(arg1, arg2, arg3);
11265 #endif /* TARGET_NR_getdents64 */
11266 #if defined(TARGET_NR__newselect)
11267     case TARGET_NR__newselect:
11268         return do_select(arg1, arg2, arg3, arg4, arg5);
11269 #endif
11270 #ifdef TARGET_NR_poll
11271     case TARGET_NR_poll:
11272         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11273 #endif
11274 #ifdef TARGET_NR_ppoll
11275     case TARGET_NR_ppoll:
11276         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11277 #endif
11278 #ifdef TARGET_NR_ppoll_time64
11279     case TARGET_NR_ppoll_time64:
11280         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11281 #endif
11282     case TARGET_NR_flock:
11283         /* NOTE: the flock constant seems to be the same for every
11284            Linux platform */
11285         return get_errno(safe_flock(arg1, arg2));
11286     case TARGET_NR_readv:
11287         {
11288             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11289             if (vec != NULL) {
11290                 ret = get_errno(safe_readv(arg1, vec, arg3));
11291                 unlock_iovec(vec, arg2, arg3, 1);
11292             } else {
11293                 ret = -host_to_target_errno(errno);
11294             }
11295         }
11296         return ret;
11297     case TARGET_NR_writev:
11298         {
11299             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11300             if (vec != NULL) {
11301                 ret = get_errno(safe_writev(arg1, vec, arg3));
11302                 unlock_iovec(vec, arg2, arg3, 0);
11303             } else {
11304                 ret = -host_to_target_errno(errno);
11305             }
11306         }
11307         return ret;
11308 #if defined(TARGET_NR_preadv)
11309     case TARGET_NR_preadv:
11310         {
11311             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11312             if (vec != NULL) {
11313                 unsigned long low, high;
11314 
11315                 target_to_host_low_high(arg4, arg5, &low, &high);
11316                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11317                 unlock_iovec(vec, arg2, arg3, 1);
11318             } else {
11319                 ret = -host_to_target_errno(errno);
11320            }
11321         }
11322         return ret;
11323 #endif
11324 #if defined(TARGET_NR_pwritev)
11325     case TARGET_NR_pwritev:
11326         {
11327             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11328             if (vec != NULL) {
11329                 unsigned long low, high;
11330 
11331                 target_to_host_low_high(arg4, arg5, &low, &high);
11332                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11333                 unlock_iovec(vec, arg2, arg3, 0);
11334             } else {
11335                 ret = -host_to_target_errno(errno);
11336            }
11337         }
11338         return ret;
11339 #endif
11340     case TARGET_NR_getsid:
11341         return get_errno(getsid(arg1));
11342 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11343     case TARGET_NR_fdatasync:
11344         return get_errno(fdatasync(arg1));
11345 #endif
11346     case TARGET_NR_sched_getaffinity:
11347         {
11348             unsigned int mask_size;
11349             unsigned long *mask;
11350 
11351             /*
11352              * sched_getaffinity needs multiples of ulong, so need to take
11353              * care of mismatches between target ulong and host ulong sizes.
11354              */
11355             if (arg2 & (sizeof(abi_ulong) - 1)) {
11356                 return -TARGET_EINVAL;
11357             }
11358             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11359 
11360             mask = alloca(mask_size);
11361             memset(mask, 0, mask_size);
11362             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11363 
11364             if (!is_error(ret)) {
11365                 if (ret > arg2) {
11366                     /* More data returned than the caller's buffer will fit.
11367                      * This only happens if sizeof(abi_long) < sizeof(long)
11368                      * and the caller passed us a buffer holding an odd number
11369                      * of abi_longs. If the host kernel is actually using the
11370                      * extra 4 bytes then fail EINVAL; otherwise we can just
11371                      * ignore them and only copy the interesting part.
11372                      */
11373                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11374                     if (numcpus > arg2 * 8) {
11375                         return -TARGET_EINVAL;
11376                     }
11377                     ret = arg2;
11378                 }
11379 
11380                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11381                     return -TARGET_EFAULT;
11382                 }
11383             }
11384         }
11385         return ret;
11386     case TARGET_NR_sched_setaffinity:
11387         {
11388             unsigned int mask_size;
11389             unsigned long *mask;
11390 
11391             /*
11392              * sched_setaffinity needs multiples of ulong, so need to take
11393              * care of mismatches between target ulong and host ulong sizes.
11394              */
11395             if (arg2 & (sizeof(abi_ulong) - 1)) {
11396                 return -TARGET_EINVAL;
11397             }
11398             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11399             mask = alloca(mask_size);
11400 
11401             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11402             if (ret) {
11403                 return ret;
11404             }
11405 
11406             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11407         }
11408     case TARGET_NR_getcpu:
11409         {
11410             unsigned cpuid, node;
11411             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11412                                        arg2 ? &node : NULL,
11413                                        NULL));
11414             if (is_error(ret)) {
11415                 return ret;
11416             }
11417             if (arg1 && put_user_u32(cpuid, arg1)) {
11418                 return -TARGET_EFAULT;
11419             }
11420             if (arg2 && put_user_u32(node, arg2)) {
11421                 return -TARGET_EFAULT;
11422             }
11423         }
11424         return ret;
11425     case TARGET_NR_sched_setparam:
11426         {
11427             struct target_sched_param *target_schp;
11428             struct sched_param schp;
11429 
11430             if (arg2 == 0) {
11431                 return -TARGET_EINVAL;
11432             }
11433             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11434                 return -TARGET_EFAULT;
11435             }
11436             schp.sched_priority = tswap32(target_schp->sched_priority);
11437             unlock_user_struct(target_schp, arg2, 0);
11438             return get_errno(sys_sched_setparam(arg1, &schp));
11439         }
11440     case TARGET_NR_sched_getparam:
11441         {
11442             struct target_sched_param *target_schp;
11443             struct sched_param schp;
11444 
11445             if (arg2 == 0) {
11446                 return -TARGET_EINVAL;
11447             }
11448             ret = get_errno(sys_sched_getparam(arg1, &schp));
11449             if (!is_error(ret)) {
11450                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11451                     return -TARGET_EFAULT;
11452                 }
11453                 target_schp->sched_priority = tswap32(schp.sched_priority);
11454                 unlock_user_struct(target_schp, arg2, 1);
11455             }
11456         }
11457         return ret;
11458     case TARGET_NR_sched_setscheduler:
11459         {
11460             struct target_sched_param *target_schp;
11461             struct sched_param schp;
11462             if (arg3 == 0) {
11463                 return -TARGET_EINVAL;
11464             }
11465             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11466                 return -TARGET_EFAULT;
11467             }
11468             schp.sched_priority = tswap32(target_schp->sched_priority);
11469             unlock_user_struct(target_schp, arg3, 0);
11470             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11471         }
11472     case TARGET_NR_sched_getscheduler:
11473         return get_errno(sys_sched_getscheduler(arg1));
11474     case TARGET_NR_sched_getattr:
11475         {
11476             struct target_sched_attr *target_scha;
11477             struct sched_attr scha;
11478             if (arg2 == 0) {
11479                 return -TARGET_EINVAL;
11480             }
11481             if (arg3 > sizeof(scha)) {
11482                 arg3 = sizeof(scha);
11483             }
11484             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11485             if (!is_error(ret)) {
11486                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11487                 if (!target_scha) {
11488                     return -TARGET_EFAULT;
11489                 }
11490                 target_scha->size = tswap32(scha.size);
11491                 target_scha->sched_policy = tswap32(scha.sched_policy);
11492                 target_scha->sched_flags = tswap64(scha.sched_flags);
11493                 target_scha->sched_nice = tswap32(scha.sched_nice);
11494                 target_scha->sched_priority = tswap32(scha.sched_priority);
11495                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11496                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11497                 target_scha->sched_period = tswap64(scha.sched_period);
11498                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11499                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11500                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11501                 }
11502                 unlock_user(target_scha, arg2, arg3);
11503             }
11504             return ret;
11505         }
11506     case TARGET_NR_sched_setattr:
11507         {
11508             struct target_sched_attr *target_scha;
11509             struct sched_attr scha;
11510             uint32_t size;
11511             int zeroed;
11512             if (arg2 == 0) {
11513                 return -TARGET_EINVAL;
11514             }
11515             if (get_user_u32(size, arg2)) {
11516                 return -TARGET_EFAULT;
11517             }
11518             if (!size) {
11519                 size = offsetof(struct target_sched_attr, sched_util_min);
11520             }
11521             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11522                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11523                     return -TARGET_EFAULT;
11524                 }
11525                 return -TARGET_E2BIG;
11526             }
11527 
11528             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11529             if (zeroed < 0) {
11530                 return zeroed;
11531             } else if (zeroed == 0) {
11532                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11533                     return -TARGET_EFAULT;
11534                 }
11535                 return -TARGET_E2BIG;
11536             }
11537             if (size > sizeof(struct target_sched_attr)) {
11538                 size = sizeof(struct target_sched_attr);
11539             }
11540 
11541             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11542             if (!target_scha) {
11543                 return -TARGET_EFAULT;
11544             }
11545             scha.size = size;
11546             scha.sched_policy = tswap32(target_scha->sched_policy);
11547             scha.sched_flags = tswap64(target_scha->sched_flags);
11548             scha.sched_nice = tswap32(target_scha->sched_nice);
11549             scha.sched_priority = tswap32(target_scha->sched_priority);
11550             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11551             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11552             scha.sched_period = tswap64(target_scha->sched_period);
11553             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11554                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11555                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11556             }
11557             unlock_user(target_scha, arg2, 0);
11558             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11559         }
11560     case TARGET_NR_sched_yield:
11561         return get_errno(sched_yield());
11562     case TARGET_NR_sched_get_priority_max:
11563         return get_errno(sched_get_priority_max(arg1));
11564     case TARGET_NR_sched_get_priority_min:
11565         return get_errno(sched_get_priority_min(arg1));
11566 #ifdef TARGET_NR_sched_rr_get_interval
11567     case TARGET_NR_sched_rr_get_interval:
11568         {
11569             struct timespec ts;
11570             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11571             if (!is_error(ret)) {
11572                 ret = host_to_target_timespec(arg2, &ts);
11573             }
11574         }
11575         return ret;
11576 #endif
11577 #ifdef TARGET_NR_sched_rr_get_interval_time64
11578     case TARGET_NR_sched_rr_get_interval_time64:
11579         {
11580             struct timespec ts;
11581             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11582             if (!is_error(ret)) {
11583                 ret = host_to_target_timespec64(arg2, &ts);
11584             }
11585         }
11586         return ret;
11587 #endif
11588 #if defined(TARGET_NR_nanosleep)
11589     case TARGET_NR_nanosleep:
11590         {
11591             struct timespec req, rem;
11592             target_to_host_timespec(&req, arg1);
11593             ret = get_errno(safe_nanosleep(&req, &rem));
11594             if (is_error(ret) && arg2) {
11595                 host_to_target_timespec(arg2, &rem);
11596             }
11597         }
11598         return ret;
11599 #endif
11600     case TARGET_NR_prctl:
11601         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11602         break;
11603 #ifdef TARGET_NR_arch_prctl
11604     case TARGET_NR_arch_prctl:
11605         return do_arch_prctl(cpu_env, arg1, arg2);
11606 #endif
11607 #ifdef TARGET_NR_pread64
11608     case TARGET_NR_pread64:
11609         if (regpairs_aligned(cpu_env, num)) {
11610             arg4 = arg5;
11611             arg5 = arg6;
11612         }
11613         if (arg2 == 0 && arg3 == 0) {
11614             /* Special-case NULL buffer and zero length, which should succeed */
11615             p = 0;
11616         } else {
11617             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11618             if (!p) {
11619                 return -TARGET_EFAULT;
11620             }
11621         }
11622         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11623         unlock_user(p, arg2, ret);
11624         return ret;
11625     case TARGET_NR_pwrite64:
11626         if (regpairs_aligned(cpu_env, num)) {
11627             arg4 = arg5;
11628             arg5 = arg6;
11629         }
11630         if (arg2 == 0 && arg3 == 0) {
11631             /* Special-case NULL buffer and zero length, which should succeed */
11632             p = 0;
11633         } else {
11634             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11635             if (!p) {
11636                 return -TARGET_EFAULT;
11637             }
11638         }
11639         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11640         unlock_user(p, arg2, 0);
11641         return ret;
11642 #endif
11643     case TARGET_NR_getcwd:
11644         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11645             return -TARGET_EFAULT;
11646         ret = get_errno(sys_getcwd1(p, arg2));
11647         unlock_user(p, arg1, ret);
11648         return ret;
11649     case TARGET_NR_capget:
11650     case TARGET_NR_capset:
11651     {
11652         struct target_user_cap_header *target_header;
11653         struct target_user_cap_data *target_data = NULL;
11654         struct __user_cap_header_struct header;
11655         struct __user_cap_data_struct data[2];
11656         struct __user_cap_data_struct *dataptr = NULL;
11657         int i, target_datalen;
11658         int data_items = 1;
11659 
11660         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11661             return -TARGET_EFAULT;
11662         }
11663         header.version = tswap32(target_header->version);
11664         header.pid = tswap32(target_header->pid);
11665 
11666         if (header.version != _LINUX_CAPABILITY_VERSION) {
11667             /* Version 2 and up takes pointer to two user_data structs */
11668             data_items = 2;
11669         }
11670 
11671         target_datalen = sizeof(*target_data) * data_items;
11672 
11673         if (arg2) {
11674             if (num == TARGET_NR_capget) {
11675                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11676             } else {
11677                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11678             }
11679             if (!target_data) {
11680                 unlock_user_struct(target_header, arg1, 0);
11681                 return -TARGET_EFAULT;
11682             }
11683 
11684             if (num == TARGET_NR_capset) {
11685                 for (i = 0; i < data_items; i++) {
11686                     data[i].effective = tswap32(target_data[i].effective);
11687                     data[i].permitted = tswap32(target_data[i].permitted);
11688                     data[i].inheritable = tswap32(target_data[i].inheritable);
11689                 }
11690             }
11691 
11692             dataptr = data;
11693         }
11694 
11695         if (num == TARGET_NR_capget) {
11696             ret = get_errno(capget(&header, dataptr));
11697         } else {
11698             ret = get_errno(capset(&header, dataptr));
11699         }
11700 
11701         /* The kernel always updates version for both capget and capset */
11702         target_header->version = tswap32(header.version);
11703         unlock_user_struct(target_header, arg1, 1);
11704 
11705         if (arg2) {
11706             if (num == TARGET_NR_capget) {
11707                 for (i = 0; i < data_items; i++) {
11708                     target_data[i].effective = tswap32(data[i].effective);
11709                     target_data[i].permitted = tswap32(data[i].permitted);
11710                     target_data[i].inheritable = tswap32(data[i].inheritable);
11711                 }
11712                 unlock_user(target_data, arg2, target_datalen);
11713             } else {
11714                 unlock_user(target_data, arg2, 0);
11715             }
11716         }
11717         return ret;
11718     }
11719     case TARGET_NR_sigaltstack:
11720         return do_sigaltstack(arg1, arg2, cpu_env);
11721 
11722 #ifdef CONFIG_SENDFILE
11723 #ifdef TARGET_NR_sendfile
11724     case TARGET_NR_sendfile:
11725     {
11726         off_t *offp = NULL;
11727         off_t off;
11728         if (arg3) {
11729             ret = get_user_sal(off, arg3);
11730             if (is_error(ret)) {
11731                 return ret;
11732             }
11733             offp = &off;
11734         }
11735         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11736         if (!is_error(ret) && arg3) {
11737             abi_long ret2 = put_user_sal(off, arg3);
11738             if (is_error(ret2)) {
11739                 ret = ret2;
11740             }
11741         }
11742         return ret;
11743     }
11744 #endif
11745 #ifdef TARGET_NR_sendfile64
11746     case TARGET_NR_sendfile64:
11747     {
11748         off_t *offp = NULL;
11749         off_t off;
11750         if (arg3) {
11751             ret = get_user_s64(off, arg3);
11752             if (is_error(ret)) {
11753                 return ret;
11754             }
11755             offp = &off;
11756         }
11757         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11758         if (!is_error(ret) && arg3) {
11759             abi_long ret2 = put_user_s64(off, arg3);
11760             if (is_error(ret2)) {
11761                 ret = ret2;
11762             }
11763         }
11764         return ret;
11765     }
11766 #endif
11767 #endif
11768 #ifdef TARGET_NR_vfork
11769     case TARGET_NR_vfork:
11770         return get_errno(do_fork(cpu_env,
11771                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11772                          0, 0, 0, 0));
11773 #endif
11774 #ifdef TARGET_NR_ugetrlimit
11775     case TARGET_NR_ugetrlimit:
11776     {
11777 	struct rlimit rlim;
11778 	int resource = target_to_host_resource(arg1);
11779 	ret = get_errno(getrlimit(resource, &rlim));
11780 	if (!is_error(ret)) {
11781 	    struct target_rlimit *target_rlim;
11782             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11783                 return -TARGET_EFAULT;
11784 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11785 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11786             unlock_user_struct(target_rlim, arg2, 1);
11787 	}
11788         return ret;
11789     }
11790 #endif
11791 #ifdef TARGET_NR_truncate64
11792     case TARGET_NR_truncate64:
11793         if (!(p = lock_user_string(arg1)))
11794             return -TARGET_EFAULT;
11795 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11796         unlock_user(p, arg1, 0);
11797         return ret;
11798 #endif
11799 #ifdef TARGET_NR_ftruncate64
11800     case TARGET_NR_ftruncate64:
11801         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11802 #endif
11803 #ifdef TARGET_NR_stat64
11804     case TARGET_NR_stat64:
11805         if (!(p = lock_user_string(arg1))) {
11806             return -TARGET_EFAULT;
11807         }
11808         ret = get_errno(stat(path(p), &st));
11809         unlock_user(p, arg1, 0);
11810         if (!is_error(ret))
11811             ret = host_to_target_stat64(cpu_env, arg2, &st);
11812         return ret;
11813 #endif
11814 #ifdef TARGET_NR_lstat64
11815     case TARGET_NR_lstat64:
11816         if (!(p = lock_user_string(arg1))) {
11817             return -TARGET_EFAULT;
11818         }
11819         ret = get_errno(lstat(path(p), &st));
11820         unlock_user(p, arg1, 0);
11821         if (!is_error(ret))
11822             ret = host_to_target_stat64(cpu_env, arg2, &st);
11823         return ret;
11824 #endif
11825 #ifdef TARGET_NR_fstat64
11826     case TARGET_NR_fstat64:
11827         ret = get_errno(fstat(arg1, &st));
11828         if (!is_error(ret))
11829             ret = host_to_target_stat64(cpu_env, arg2, &st);
11830         return ret;
11831 #endif
11832 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11833 #ifdef TARGET_NR_fstatat64
11834     case TARGET_NR_fstatat64:
11835 #endif
11836 #ifdef TARGET_NR_newfstatat
11837     case TARGET_NR_newfstatat:
11838 #endif
11839         if (!(p = lock_user_string(arg2))) {
11840             return -TARGET_EFAULT;
11841         }
11842         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11843         unlock_user(p, arg2, 0);
11844         if (!is_error(ret))
11845             ret = host_to_target_stat64(cpu_env, arg3, &st);
11846         return ret;
11847 #endif
11848 #if defined(TARGET_NR_statx)
11849     case TARGET_NR_statx:
11850         {
11851             struct target_statx *target_stx;
11852             int dirfd = arg1;
11853             int flags = arg3;
11854 
11855             p = lock_user_string(arg2);
11856             if (p == NULL) {
11857                 return -TARGET_EFAULT;
11858             }
11859 #if defined(__NR_statx)
11860             {
11861                 /*
11862                  * It is assumed that struct statx is architecture independent.
11863                  */
11864                 struct target_statx host_stx;
11865                 int mask = arg4;
11866 
11867                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11868                 if (!is_error(ret)) {
11869                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11870                         unlock_user(p, arg2, 0);
11871                         return -TARGET_EFAULT;
11872                     }
11873                 }
11874 
11875                 if (ret != -TARGET_ENOSYS) {
11876                     unlock_user(p, arg2, 0);
11877                     return ret;
11878                 }
11879             }
11880 #endif
11881             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11882             unlock_user(p, arg2, 0);
11883 
11884             if (!is_error(ret)) {
11885                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11886                     return -TARGET_EFAULT;
11887                 }
11888                 memset(target_stx, 0, sizeof(*target_stx));
11889                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11890                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11891                 __put_user(st.st_ino, &target_stx->stx_ino);
11892                 __put_user(st.st_mode, &target_stx->stx_mode);
11893                 __put_user(st.st_uid, &target_stx->stx_uid);
11894                 __put_user(st.st_gid, &target_stx->stx_gid);
11895                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11896                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11897                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11898                 __put_user(st.st_size, &target_stx->stx_size);
11899                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11900                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11901                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11902                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11903                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11904                 unlock_user_struct(target_stx, arg5, 1);
11905             }
11906         }
11907         return ret;
11908 #endif
11909 #ifdef TARGET_NR_lchown
11910     case TARGET_NR_lchown:
11911         if (!(p = lock_user_string(arg1)))
11912             return -TARGET_EFAULT;
11913         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11914         unlock_user(p, arg1, 0);
11915         return ret;
11916 #endif
11917 #ifdef TARGET_NR_getuid
11918     case TARGET_NR_getuid:
11919         return get_errno(high2lowuid(getuid()));
11920 #endif
11921 #ifdef TARGET_NR_getgid
11922     case TARGET_NR_getgid:
11923         return get_errno(high2lowgid(getgid()));
11924 #endif
11925 #ifdef TARGET_NR_geteuid
11926     case TARGET_NR_geteuid:
11927         return get_errno(high2lowuid(geteuid()));
11928 #endif
11929 #ifdef TARGET_NR_getegid
11930     case TARGET_NR_getegid:
11931         return get_errno(high2lowgid(getegid()));
11932 #endif
11933     case TARGET_NR_setreuid:
11934         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11935     case TARGET_NR_setregid:
11936         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11937     case TARGET_NR_getgroups:
11938         { /* the same code as for TARGET_NR_getgroups32 */
11939             int gidsetsize = arg1;
11940             target_id *target_grouplist;
11941             g_autofree gid_t *grouplist = NULL;
11942             int i;
11943 
11944             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11945                 return -TARGET_EINVAL;
11946             }
11947             if (gidsetsize > 0) {
11948                 grouplist = g_try_new(gid_t, gidsetsize);
11949                 if (!grouplist) {
11950                     return -TARGET_ENOMEM;
11951                 }
11952             }
11953             ret = get_errno(getgroups(gidsetsize, grouplist));
11954             if (!is_error(ret) && gidsetsize > 0) {
11955                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11956                                              gidsetsize * sizeof(target_id), 0);
11957                 if (!target_grouplist) {
11958                     return -TARGET_EFAULT;
11959                 }
11960                 for (i = 0; i < ret; i++) {
11961                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11962                 }
11963                 unlock_user(target_grouplist, arg2,
11964                             gidsetsize * sizeof(target_id));
11965             }
11966             return ret;
11967         }
11968     case TARGET_NR_setgroups:
11969         { /* the same code as for TARGET_NR_setgroups32 */
11970             int gidsetsize = arg1;
11971             target_id *target_grouplist;
11972             g_autofree gid_t *grouplist = NULL;
11973             int i;
11974 
11975             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11976                 return -TARGET_EINVAL;
11977             }
11978             if (gidsetsize > 0) {
11979                 grouplist = g_try_new(gid_t, gidsetsize);
11980                 if (!grouplist) {
11981                     return -TARGET_ENOMEM;
11982                 }
11983                 target_grouplist = lock_user(VERIFY_READ, arg2,
11984                                              gidsetsize * sizeof(target_id), 1);
11985                 if (!target_grouplist) {
11986                     return -TARGET_EFAULT;
11987                 }
11988                 for (i = 0; i < gidsetsize; i++) {
11989                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11990                 }
11991                 unlock_user(target_grouplist, arg2,
11992                             gidsetsize * sizeof(target_id));
11993             }
11994             return get_errno(sys_setgroups(gidsetsize, grouplist));
11995         }
11996     case TARGET_NR_fchown:
11997         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11998 #if defined(TARGET_NR_fchownat)
11999     case TARGET_NR_fchownat:
12000         if (!(p = lock_user_string(arg2)))
12001             return -TARGET_EFAULT;
12002         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12003                                  low2highgid(arg4), arg5));
12004         unlock_user(p, arg2, 0);
12005         return ret;
12006 #endif
12007 #ifdef TARGET_NR_setresuid
12008     case TARGET_NR_setresuid:
12009         return get_errno(sys_setresuid(low2highuid(arg1),
12010                                        low2highuid(arg2),
12011                                        low2highuid(arg3)));
12012 #endif
12013 #ifdef TARGET_NR_getresuid
12014     case TARGET_NR_getresuid:
12015         {
12016             uid_t ruid, euid, suid;
12017             ret = get_errno(getresuid(&ruid, &euid, &suid));
12018             if (!is_error(ret)) {
12019                 if (put_user_id(high2lowuid(ruid), arg1)
12020                     || put_user_id(high2lowuid(euid), arg2)
12021                     || put_user_id(high2lowuid(suid), arg3))
12022                     return -TARGET_EFAULT;
12023             }
12024         }
12025         return ret;
12026 #endif
12027 #ifdef TARGET_NR_getresgid
12028     case TARGET_NR_setresgid:
12029         return get_errno(sys_setresgid(low2highgid(arg1),
12030                                        low2highgid(arg2),
12031                                        low2highgid(arg3)));
12032 #endif
12033 #ifdef TARGET_NR_getresgid
12034     case TARGET_NR_getresgid:
12035         {
12036             gid_t rgid, egid, sgid;
12037             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12038             if (!is_error(ret)) {
12039                 if (put_user_id(high2lowgid(rgid), arg1)
12040                     || put_user_id(high2lowgid(egid), arg2)
12041                     || put_user_id(high2lowgid(sgid), arg3))
12042                     return -TARGET_EFAULT;
12043             }
12044         }
12045         return ret;
12046 #endif
12047 #ifdef TARGET_NR_chown
12048     case TARGET_NR_chown:
12049         if (!(p = lock_user_string(arg1)))
12050             return -TARGET_EFAULT;
12051         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12052         unlock_user(p, arg1, 0);
12053         return ret;
12054 #endif
12055     case TARGET_NR_setuid:
12056         return get_errno(sys_setuid(low2highuid(arg1)));
12057     case TARGET_NR_setgid:
12058         return get_errno(sys_setgid(low2highgid(arg1)));
12059     case TARGET_NR_setfsuid:
12060         return get_errno(setfsuid(arg1));
12061     case TARGET_NR_setfsgid:
12062         return get_errno(setfsgid(arg1));
12063 
12064 #ifdef TARGET_NR_lchown32
12065     case TARGET_NR_lchown32:
12066         if (!(p = lock_user_string(arg1)))
12067             return -TARGET_EFAULT;
12068         ret = get_errno(lchown(p, arg2, arg3));
12069         unlock_user(p, arg1, 0);
12070         return ret;
12071 #endif
12072 #ifdef TARGET_NR_getuid32
12073     case TARGET_NR_getuid32:
12074         return get_errno(getuid());
12075 #endif
12076 
12077 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12078    /* Alpha specific */
12079     case TARGET_NR_getxuid:
12080          {
12081             uid_t euid;
12082             euid=geteuid();
12083             cpu_env->ir[IR_A4]=euid;
12084          }
12085         return get_errno(getuid());
12086 #endif
12087 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12088    /* Alpha specific */
12089     case TARGET_NR_getxgid:
12090          {
12091             uid_t egid;
12092             egid=getegid();
12093             cpu_env->ir[IR_A4]=egid;
12094          }
12095         return get_errno(getgid());
12096 #endif
12097 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12098     /* Alpha specific */
12099     case TARGET_NR_osf_getsysinfo:
12100         ret = -TARGET_EOPNOTSUPP;
12101         switch (arg1) {
12102           case TARGET_GSI_IEEE_FP_CONTROL:
12103             {
12104                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12105                 uint64_t swcr = cpu_env->swcr;
12106 
12107                 swcr &= ~SWCR_STATUS_MASK;
12108                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12109 
12110                 if (put_user_u64 (swcr, arg2))
12111                         return -TARGET_EFAULT;
12112                 ret = 0;
12113             }
12114             break;
12115 
12116           /* case GSI_IEEE_STATE_AT_SIGNAL:
12117              -- Not implemented in linux kernel.
12118              case GSI_UACPROC:
12119              -- Retrieves current unaligned access state; not much used.
12120              case GSI_PROC_TYPE:
12121              -- Retrieves implver information; surely not used.
12122              case GSI_GET_HWRPB:
12123              -- Grabs a copy of the HWRPB; surely not used.
12124           */
12125         }
12126         return ret;
12127 #endif
12128 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12129     /* Alpha specific */
12130     case TARGET_NR_osf_setsysinfo:
12131         ret = -TARGET_EOPNOTSUPP;
12132         switch (arg1) {
12133           case TARGET_SSI_IEEE_FP_CONTROL:
12134             {
12135                 uint64_t swcr, fpcr;
12136 
12137                 if (get_user_u64 (swcr, arg2)) {
12138                     return -TARGET_EFAULT;
12139                 }
12140 
12141                 /*
12142                  * The kernel calls swcr_update_status to update the
12143                  * status bits from the fpcr at every point that it
12144                  * could be queried.  Therefore, we store the status
12145                  * bits only in FPCR.
12146                  */
12147                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12148 
12149                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12150                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12151                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12152                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12153                 ret = 0;
12154             }
12155             break;
12156 
12157           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12158             {
12159                 uint64_t exc, fpcr, fex;
12160 
12161                 if (get_user_u64(exc, arg2)) {
12162                     return -TARGET_EFAULT;
12163                 }
12164                 exc &= SWCR_STATUS_MASK;
12165                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12166 
12167                 /* Old exceptions are not signaled.  */
12168                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12169                 fex = exc & ~fex;
12170                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12171                 fex &= (cpu_env)->swcr;
12172 
12173                 /* Update the hardware fpcr.  */
12174                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12175                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12176 
12177                 if (fex) {
12178                     int si_code = TARGET_FPE_FLTUNK;
12179                     target_siginfo_t info;
12180 
12181                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12182                         si_code = TARGET_FPE_FLTUND;
12183                     }
12184                     if (fex & SWCR_TRAP_ENABLE_INE) {
12185                         si_code = TARGET_FPE_FLTRES;
12186                     }
12187                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12188                         si_code = TARGET_FPE_FLTUND;
12189                     }
12190                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12191                         si_code = TARGET_FPE_FLTOVF;
12192                     }
12193                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12194                         si_code = TARGET_FPE_FLTDIV;
12195                     }
12196                     if (fex & SWCR_TRAP_ENABLE_INV) {
12197                         si_code = TARGET_FPE_FLTINV;
12198                     }
12199 
12200                     info.si_signo = SIGFPE;
12201                     info.si_errno = 0;
12202                     info.si_code = si_code;
12203                     info._sifields._sigfault._addr = (cpu_env)->pc;
12204                     queue_signal(cpu_env, info.si_signo,
12205                                  QEMU_SI_FAULT, &info);
12206                 }
12207                 ret = 0;
12208             }
12209             break;
12210 
12211           /* case SSI_NVPAIRS:
12212              -- Used with SSIN_UACPROC to enable unaligned accesses.
12213              case SSI_IEEE_STATE_AT_SIGNAL:
12214              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12215              -- Not implemented in linux kernel
12216           */
12217         }
12218         return ret;
12219 #endif
12220 #ifdef TARGET_NR_osf_sigprocmask
12221     /* Alpha specific.  */
12222     case TARGET_NR_osf_sigprocmask:
12223         {
12224             abi_ulong mask;
12225             int how;
12226             sigset_t set, oldset;
12227 
12228             switch(arg1) {
12229             case TARGET_SIG_BLOCK:
12230                 how = SIG_BLOCK;
12231                 break;
12232             case TARGET_SIG_UNBLOCK:
12233                 how = SIG_UNBLOCK;
12234                 break;
12235             case TARGET_SIG_SETMASK:
12236                 how = SIG_SETMASK;
12237                 break;
12238             default:
12239                 return -TARGET_EINVAL;
12240             }
12241             mask = arg2;
12242             target_to_host_old_sigset(&set, &mask);
12243             ret = do_sigprocmask(how, &set, &oldset);
12244             if (!ret) {
12245                 host_to_target_old_sigset(&mask, &oldset);
12246                 ret = mask;
12247             }
12248         }
12249         return ret;
12250 #endif
12251 
12252 #ifdef TARGET_NR_getgid32
12253     case TARGET_NR_getgid32:
12254         return get_errno(getgid());
12255 #endif
12256 #ifdef TARGET_NR_geteuid32
12257     case TARGET_NR_geteuid32:
12258         return get_errno(geteuid());
12259 #endif
12260 #ifdef TARGET_NR_getegid32
12261     case TARGET_NR_getegid32:
12262         return get_errno(getegid());
12263 #endif
12264 #ifdef TARGET_NR_setreuid32
12265     case TARGET_NR_setreuid32:
12266         return get_errno(setreuid(arg1, arg2));
12267 #endif
12268 #ifdef TARGET_NR_setregid32
12269     case TARGET_NR_setregid32:
12270         return get_errno(setregid(arg1, arg2));
12271 #endif
12272 #ifdef TARGET_NR_getgroups32
12273     case TARGET_NR_getgroups32:
12274         { /* the same code as for TARGET_NR_getgroups */
12275             int gidsetsize = arg1;
12276             uint32_t *target_grouplist;
12277             g_autofree gid_t *grouplist = NULL;
12278             int i;
12279 
12280             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12281                 return -TARGET_EINVAL;
12282             }
12283             if (gidsetsize > 0) {
12284                 grouplist = g_try_new(gid_t, gidsetsize);
12285                 if (!grouplist) {
12286                     return -TARGET_ENOMEM;
12287                 }
12288             }
12289             ret = get_errno(getgroups(gidsetsize, grouplist));
12290             if (!is_error(ret) && gidsetsize > 0) {
12291                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12292                                              gidsetsize * 4, 0);
12293                 if (!target_grouplist) {
12294                     return -TARGET_EFAULT;
12295                 }
12296                 for (i = 0; i < ret; i++) {
12297                     target_grouplist[i] = tswap32(grouplist[i]);
12298                 }
12299                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12300             }
12301             return ret;
12302         }
12303 #endif
12304 #ifdef TARGET_NR_setgroups32
12305     case TARGET_NR_setgroups32:
12306         { /* the same code as for TARGET_NR_setgroups */
12307             int gidsetsize = arg1;
12308             uint32_t *target_grouplist;
12309             g_autofree gid_t *grouplist = NULL;
12310             int i;
12311 
12312             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12313                 return -TARGET_EINVAL;
12314             }
12315             if (gidsetsize > 0) {
12316                 grouplist = g_try_new(gid_t, gidsetsize);
12317                 if (!grouplist) {
12318                     return -TARGET_ENOMEM;
12319                 }
12320                 target_grouplist = lock_user(VERIFY_READ, arg2,
12321                                              gidsetsize * 4, 1);
12322                 if (!target_grouplist) {
12323                     return -TARGET_EFAULT;
12324                 }
12325                 for (i = 0; i < gidsetsize; i++) {
12326                     grouplist[i] = tswap32(target_grouplist[i]);
12327                 }
12328                 unlock_user(target_grouplist, arg2, 0);
12329             }
12330             return get_errno(sys_setgroups(gidsetsize, grouplist));
12331         }
12332 #endif
12333 #ifdef TARGET_NR_fchown32
12334     case TARGET_NR_fchown32:
12335         return get_errno(fchown(arg1, arg2, arg3));
12336 #endif
12337 #ifdef TARGET_NR_setresuid32
12338     case TARGET_NR_setresuid32:
12339         return get_errno(sys_setresuid(arg1, arg2, arg3));
12340 #endif
12341 #ifdef TARGET_NR_getresuid32
12342     case TARGET_NR_getresuid32:
12343         {
12344             uid_t ruid, euid, suid;
12345             ret = get_errno(getresuid(&ruid, &euid, &suid));
12346             if (!is_error(ret)) {
12347                 if (put_user_u32(ruid, arg1)
12348                     || put_user_u32(euid, arg2)
12349                     || put_user_u32(suid, arg3))
12350                     return -TARGET_EFAULT;
12351             }
12352         }
12353         return ret;
12354 #endif
12355 #ifdef TARGET_NR_setresgid32
12356     case TARGET_NR_setresgid32:
12357         return get_errno(sys_setresgid(arg1, arg2, arg3));
12358 #endif
12359 #ifdef TARGET_NR_getresgid32
12360     case TARGET_NR_getresgid32:
12361         {
12362             gid_t rgid, egid, sgid;
12363             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12364             if (!is_error(ret)) {
12365                 if (put_user_u32(rgid, arg1)
12366                     || put_user_u32(egid, arg2)
12367                     || put_user_u32(sgid, arg3))
12368                     return -TARGET_EFAULT;
12369             }
12370         }
12371         return ret;
12372 #endif
12373 #ifdef TARGET_NR_chown32
12374     case TARGET_NR_chown32:
12375         if (!(p = lock_user_string(arg1)))
12376             return -TARGET_EFAULT;
12377         ret = get_errno(chown(p, arg2, arg3));
12378         unlock_user(p, arg1, 0);
12379         return ret;
12380 #endif
12381 #ifdef TARGET_NR_setuid32
12382     case TARGET_NR_setuid32:
12383         return get_errno(sys_setuid(arg1));
12384 #endif
12385 #ifdef TARGET_NR_setgid32
12386     case TARGET_NR_setgid32:
12387         return get_errno(sys_setgid(arg1));
12388 #endif
12389 #ifdef TARGET_NR_setfsuid32
12390     case TARGET_NR_setfsuid32:
12391         return get_errno(setfsuid(arg1));
12392 #endif
12393 #ifdef TARGET_NR_setfsgid32
12394     case TARGET_NR_setfsgid32:
12395         return get_errno(setfsgid(arg1));
12396 #endif
12397 #ifdef TARGET_NR_mincore
12398     case TARGET_NR_mincore:
12399         {
12400             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12401             if (!a) {
12402                 return -TARGET_ENOMEM;
12403             }
12404             p = lock_user_string(arg3);
12405             if (!p) {
12406                 ret = -TARGET_EFAULT;
12407             } else {
12408                 ret = get_errno(mincore(a, arg2, p));
12409                 unlock_user(p, arg3, ret);
12410             }
12411             unlock_user(a, arg1, 0);
12412         }
12413         return ret;
12414 #endif
12415 #ifdef TARGET_NR_arm_fadvise64_64
12416     case TARGET_NR_arm_fadvise64_64:
12417         /* arm_fadvise64_64 looks like fadvise64_64 but
12418          * with different argument order: fd, advice, offset, len
12419          * rather than the usual fd, offset, len, advice.
12420          * Note that offset and len are both 64-bit so appear as
12421          * pairs of 32-bit registers.
12422          */
12423         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12424                             target_offset64(arg5, arg6), arg2);
12425         return -host_to_target_errno(ret);
12426 #endif
12427 
12428 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12429 
12430 #ifdef TARGET_NR_fadvise64_64
12431     case TARGET_NR_fadvise64_64:
12432 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12433         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12434         ret = arg2;
12435         arg2 = arg3;
12436         arg3 = arg4;
12437         arg4 = arg5;
12438         arg5 = arg6;
12439         arg6 = ret;
12440 #else
12441         /* 6 args: fd, offset (high, low), len (high, low), advice */
12442         if (regpairs_aligned(cpu_env, num)) {
12443             /* offset is in (3,4), len in (5,6) and advice in 7 */
12444             arg2 = arg3;
12445             arg3 = arg4;
12446             arg4 = arg5;
12447             arg5 = arg6;
12448             arg6 = arg7;
12449         }
12450 #endif
12451         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12452                             target_offset64(arg4, arg5), arg6);
12453         return -host_to_target_errno(ret);
12454 #endif
12455 
12456 #ifdef TARGET_NR_fadvise64
12457     case TARGET_NR_fadvise64:
12458         /* 5 args: fd, offset (high, low), len, advice */
12459         if (regpairs_aligned(cpu_env, num)) {
12460             /* offset is in (3,4), len in 5 and advice in 6 */
12461             arg2 = arg3;
12462             arg3 = arg4;
12463             arg4 = arg5;
12464             arg5 = arg6;
12465         }
12466         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12467         return -host_to_target_errno(ret);
12468 #endif
12469 
12470 #else /* not a 32-bit ABI */
12471 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12472 #ifdef TARGET_NR_fadvise64_64
12473     case TARGET_NR_fadvise64_64:
12474 #endif
12475 #ifdef TARGET_NR_fadvise64
12476     case TARGET_NR_fadvise64:
12477 #endif
12478 #ifdef TARGET_S390X
12479         switch (arg4) {
12480         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12481         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12482         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12483         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12484         default: break;
12485         }
12486 #endif
12487         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12488 #endif
12489 #endif /* end of 64-bit ABI fadvise handling */
12490 
12491 #ifdef TARGET_NR_madvise
12492     case TARGET_NR_madvise:
12493         return target_madvise(arg1, arg2, arg3);
12494 #endif
12495 #ifdef TARGET_NR_fcntl64
12496     case TARGET_NR_fcntl64:
12497     {
12498         int cmd;
12499         struct flock fl;
12500         from_flock64_fn *copyfrom = copy_from_user_flock64;
12501         to_flock64_fn *copyto = copy_to_user_flock64;
12502 
12503 #ifdef TARGET_ARM
12504         if (!cpu_env->eabi) {
12505             copyfrom = copy_from_user_oabi_flock64;
12506             copyto = copy_to_user_oabi_flock64;
12507         }
12508 #endif
12509 
12510         cmd = target_to_host_fcntl_cmd(arg2);
12511         if (cmd == -TARGET_EINVAL) {
12512             return cmd;
12513         }
12514 
12515         switch(arg2) {
12516         case TARGET_F_GETLK64:
12517             ret = copyfrom(&fl, arg3);
12518             if (ret) {
12519                 break;
12520             }
12521             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12522             if (ret == 0) {
12523                 ret = copyto(arg3, &fl);
12524             }
12525 	    break;
12526 
12527         case TARGET_F_SETLK64:
12528         case TARGET_F_SETLKW64:
12529             ret = copyfrom(&fl, arg3);
12530             if (ret) {
12531                 break;
12532             }
12533             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12534 	    break;
12535         default:
12536             ret = do_fcntl(arg1, arg2, arg3);
12537             break;
12538         }
12539         return ret;
12540     }
12541 #endif
12542 #ifdef TARGET_NR_cacheflush
12543     case TARGET_NR_cacheflush:
12544         /* self-modifying code is handled automatically, so nothing needed */
12545         return 0;
12546 #endif
12547 #ifdef TARGET_NR_getpagesize
12548     case TARGET_NR_getpagesize:
12549         return TARGET_PAGE_SIZE;
12550 #endif
12551     case TARGET_NR_gettid:
12552         return get_errno(sys_gettid());
12553 #ifdef TARGET_NR_readahead
12554     case TARGET_NR_readahead:
12555 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12556         if (regpairs_aligned(cpu_env, num)) {
12557             arg2 = arg3;
12558             arg3 = arg4;
12559             arg4 = arg5;
12560         }
12561         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12562 #else
12563         ret = get_errno(readahead(arg1, arg2, arg3));
12564 #endif
12565         return ret;
12566 #endif
12567 #ifdef CONFIG_ATTR
12568 #ifdef TARGET_NR_setxattr
12569     case TARGET_NR_listxattr:
12570     case TARGET_NR_llistxattr:
12571     {
12572         void *b = 0;
12573         if (arg2) {
12574             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12575             if (!b) {
12576                 return -TARGET_EFAULT;
12577             }
12578         }
12579         p = lock_user_string(arg1);
12580         if (p) {
12581             if (num == TARGET_NR_listxattr) {
12582                 ret = get_errno(listxattr(p, b, arg3));
12583             } else {
12584                 ret = get_errno(llistxattr(p, b, arg3));
12585             }
12586         } else {
12587             ret = -TARGET_EFAULT;
12588         }
12589         unlock_user(p, arg1, 0);
12590         unlock_user(b, arg2, arg3);
12591         return ret;
12592     }
12593     case TARGET_NR_flistxattr:
12594     {
12595         void *b = 0;
12596         if (arg2) {
12597             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12598             if (!b) {
12599                 return -TARGET_EFAULT;
12600             }
12601         }
12602         ret = get_errno(flistxattr(arg1, b, arg3));
12603         unlock_user(b, arg2, arg3);
12604         return ret;
12605     }
12606     case TARGET_NR_setxattr:
12607     case TARGET_NR_lsetxattr:
12608         {
12609             void *n, *v = 0;
12610             if (arg3) {
12611                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12612                 if (!v) {
12613                     return -TARGET_EFAULT;
12614                 }
12615             }
12616             p = lock_user_string(arg1);
12617             n = lock_user_string(arg2);
12618             if (p && n) {
12619                 if (num == TARGET_NR_setxattr) {
12620                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12621                 } else {
12622                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12623                 }
12624             } else {
12625                 ret = -TARGET_EFAULT;
12626             }
12627             unlock_user(p, arg1, 0);
12628             unlock_user(n, arg2, 0);
12629             unlock_user(v, arg3, 0);
12630         }
12631         return ret;
12632     case TARGET_NR_fsetxattr:
12633         {
12634             void *n, *v = 0;
12635             if (arg3) {
12636                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12637                 if (!v) {
12638                     return -TARGET_EFAULT;
12639                 }
12640             }
12641             n = lock_user_string(arg2);
12642             if (n) {
12643                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12644             } else {
12645                 ret = -TARGET_EFAULT;
12646             }
12647             unlock_user(n, arg2, 0);
12648             unlock_user(v, arg3, 0);
12649         }
12650         return ret;
12651     case TARGET_NR_getxattr:
12652     case TARGET_NR_lgetxattr:
12653         {
12654             void *n, *v = 0;
12655             if (arg3) {
12656                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12657                 if (!v) {
12658                     return -TARGET_EFAULT;
12659                 }
12660             }
12661             p = lock_user_string(arg1);
12662             n = lock_user_string(arg2);
12663             if (p && n) {
12664                 if (num == TARGET_NR_getxattr) {
12665                     ret = get_errno(getxattr(p, n, v, arg4));
12666                 } else {
12667                     ret = get_errno(lgetxattr(p, n, v, arg4));
12668                 }
12669             } else {
12670                 ret = -TARGET_EFAULT;
12671             }
12672             unlock_user(p, arg1, 0);
12673             unlock_user(n, arg2, 0);
12674             unlock_user(v, arg3, arg4);
12675         }
12676         return ret;
12677     case TARGET_NR_fgetxattr:
12678         {
12679             void *n, *v = 0;
12680             if (arg3) {
12681                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12682                 if (!v) {
12683                     return -TARGET_EFAULT;
12684                 }
12685             }
12686             n = lock_user_string(arg2);
12687             if (n) {
12688                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12689             } else {
12690                 ret = -TARGET_EFAULT;
12691             }
12692             unlock_user(n, arg2, 0);
12693             unlock_user(v, arg3, arg4);
12694         }
12695         return ret;
12696     case TARGET_NR_removexattr:
12697     case TARGET_NR_lremovexattr:
12698         {
12699             void *n;
12700             p = lock_user_string(arg1);
12701             n = lock_user_string(arg2);
12702             if (p && n) {
12703                 if (num == TARGET_NR_removexattr) {
12704                     ret = get_errno(removexattr(p, n));
12705                 } else {
12706                     ret = get_errno(lremovexattr(p, n));
12707                 }
12708             } else {
12709                 ret = -TARGET_EFAULT;
12710             }
12711             unlock_user(p, arg1, 0);
12712             unlock_user(n, arg2, 0);
12713         }
12714         return ret;
12715     case TARGET_NR_fremovexattr:
12716         {
12717             void *n;
12718             n = lock_user_string(arg2);
12719             if (n) {
12720                 ret = get_errno(fremovexattr(arg1, n));
12721             } else {
12722                 ret = -TARGET_EFAULT;
12723             }
12724             unlock_user(n, arg2, 0);
12725         }
12726         return ret;
12727 #endif
12728 #endif /* CONFIG_ATTR */
12729 #ifdef TARGET_NR_set_thread_area
12730     case TARGET_NR_set_thread_area:
12731 #if defined(TARGET_MIPS)
12732       cpu_env->active_tc.CP0_UserLocal = arg1;
12733       return 0;
12734 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12735       return do_set_thread_area(cpu_env, arg1);
12736 #elif defined(TARGET_M68K)
12737       {
12738           TaskState *ts = get_task_state(cpu);
12739           ts->tp_value = arg1;
12740           return 0;
12741       }
12742 #else
12743       return -TARGET_ENOSYS;
12744 #endif
12745 #endif
12746 #ifdef TARGET_NR_get_thread_area
12747     case TARGET_NR_get_thread_area:
12748 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12749         return do_get_thread_area(cpu_env, arg1);
12750 #elif defined(TARGET_M68K)
12751         {
12752             TaskState *ts = get_task_state(cpu);
12753             return ts->tp_value;
12754         }
12755 #else
12756         return -TARGET_ENOSYS;
12757 #endif
12758 #endif
12759 #ifdef TARGET_NR_getdomainname
12760     case TARGET_NR_getdomainname:
12761         return -TARGET_ENOSYS;
12762 #endif
12763 
12764 #ifdef TARGET_NR_clock_settime
12765     case TARGET_NR_clock_settime:
12766     {
12767         struct timespec ts;
12768 
12769         ret = target_to_host_timespec(&ts, arg2);
12770         if (!is_error(ret)) {
12771             ret = get_errno(clock_settime(arg1, &ts));
12772         }
12773         return ret;
12774     }
12775 #endif
12776 #ifdef TARGET_NR_clock_settime64
12777     case TARGET_NR_clock_settime64:
12778     {
12779         struct timespec ts;
12780 
12781         ret = target_to_host_timespec64(&ts, arg2);
12782         if (!is_error(ret)) {
12783             ret = get_errno(clock_settime(arg1, &ts));
12784         }
12785         return ret;
12786     }
12787 #endif
12788 #ifdef TARGET_NR_clock_gettime
12789     case TARGET_NR_clock_gettime:
12790     {
12791         struct timespec ts;
12792         ret = get_errno(clock_gettime(arg1, &ts));
12793         if (!is_error(ret)) {
12794             ret = host_to_target_timespec(arg2, &ts);
12795         }
12796         return ret;
12797     }
12798 #endif
12799 #ifdef TARGET_NR_clock_gettime64
12800     case TARGET_NR_clock_gettime64:
12801     {
12802         struct timespec ts;
12803         ret = get_errno(clock_gettime(arg1, &ts));
12804         if (!is_error(ret)) {
12805             ret = host_to_target_timespec64(arg2, &ts);
12806         }
12807         return ret;
12808     }
12809 #endif
12810 #ifdef TARGET_NR_clock_getres
12811     case TARGET_NR_clock_getres:
12812     {
12813         struct timespec ts;
12814         ret = get_errno(clock_getres(arg1, &ts));
12815         if (!is_error(ret)) {
12816             host_to_target_timespec(arg2, &ts);
12817         }
12818         return ret;
12819     }
12820 #endif
12821 #ifdef TARGET_NR_clock_getres_time64
12822     case TARGET_NR_clock_getres_time64:
12823     {
12824         struct timespec ts;
12825         ret = get_errno(clock_getres(arg1, &ts));
12826         if (!is_error(ret)) {
12827             host_to_target_timespec64(arg2, &ts);
12828         }
12829         return ret;
12830     }
12831 #endif
12832 #ifdef TARGET_NR_clock_nanosleep
12833     case TARGET_NR_clock_nanosleep:
12834     {
12835         struct timespec ts;
12836         if (target_to_host_timespec(&ts, arg3)) {
12837             return -TARGET_EFAULT;
12838         }
12839         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12840                                              &ts, arg4 ? &ts : NULL));
12841         /*
12842          * if the call is interrupted by a signal handler, it fails
12843          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12844          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12845          */
12846         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12847             host_to_target_timespec(arg4, &ts)) {
12848               return -TARGET_EFAULT;
12849         }
12850 
12851         return ret;
12852     }
12853 #endif
12854 #ifdef TARGET_NR_clock_nanosleep_time64
12855     case TARGET_NR_clock_nanosleep_time64:
12856     {
12857         struct timespec ts;
12858 
12859         if (target_to_host_timespec64(&ts, arg3)) {
12860             return -TARGET_EFAULT;
12861         }
12862 
12863         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12864                                              &ts, arg4 ? &ts : NULL));
12865 
12866         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12867             host_to_target_timespec64(arg4, &ts)) {
12868             return -TARGET_EFAULT;
12869         }
12870         return ret;
12871     }
12872 #endif
12873 
12874 #if defined(TARGET_NR_set_tid_address)
12875     case TARGET_NR_set_tid_address:
12876     {
12877         TaskState *ts = get_task_state(cpu);
12878         ts->child_tidptr = arg1;
12879         /* do not call host set_tid_address() syscall, instead return tid() */
12880         return get_errno(sys_gettid());
12881     }
12882 #endif
12883 
12884     case TARGET_NR_tkill:
12885         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12886 
12887     case TARGET_NR_tgkill:
12888         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12889                          target_to_host_signal(arg3)));
12890 
12891 #ifdef TARGET_NR_set_robust_list
12892     case TARGET_NR_set_robust_list:
12893     case TARGET_NR_get_robust_list:
12894         /* The ABI for supporting robust futexes has userspace pass
12895          * the kernel a pointer to a linked list which is updated by
12896          * userspace after the syscall; the list is walked by the kernel
12897          * when the thread exits. Since the linked list in QEMU guest
12898          * memory isn't a valid linked list for the host and we have
12899          * no way to reliably intercept the thread-death event, we can't
12900          * support these. Silently return ENOSYS so that guest userspace
12901          * falls back to a non-robust futex implementation (which should
12902          * be OK except in the corner case of the guest crashing while
12903          * holding a mutex that is shared with another process via
12904          * shared memory).
12905          */
12906         return -TARGET_ENOSYS;
12907 #endif
12908 
12909 #if defined(TARGET_NR_utimensat)
12910     case TARGET_NR_utimensat:
12911         {
12912             struct timespec *tsp, ts[2];
12913             if (!arg3) {
12914                 tsp = NULL;
12915             } else {
12916                 if (target_to_host_timespec(ts, arg3)) {
12917                     return -TARGET_EFAULT;
12918                 }
12919                 if (target_to_host_timespec(ts + 1, arg3 +
12920                                             sizeof(struct target_timespec))) {
12921                     return -TARGET_EFAULT;
12922                 }
12923                 tsp = ts;
12924             }
12925             if (!arg2)
12926                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12927             else {
12928                 if (!(p = lock_user_string(arg2))) {
12929                     return -TARGET_EFAULT;
12930                 }
12931                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12932                 unlock_user(p, arg2, 0);
12933             }
12934         }
12935         return ret;
12936 #endif
12937 #ifdef TARGET_NR_utimensat_time64
12938     case TARGET_NR_utimensat_time64:
12939         {
12940             struct timespec *tsp, ts[2];
12941             if (!arg3) {
12942                 tsp = NULL;
12943             } else {
12944                 if (target_to_host_timespec64(ts, arg3)) {
12945                     return -TARGET_EFAULT;
12946                 }
12947                 if (target_to_host_timespec64(ts + 1, arg3 +
12948                                      sizeof(struct target__kernel_timespec))) {
12949                     return -TARGET_EFAULT;
12950                 }
12951                 tsp = ts;
12952             }
12953             if (!arg2)
12954                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12955             else {
12956                 p = lock_user_string(arg2);
12957                 if (!p) {
12958                     return -TARGET_EFAULT;
12959                 }
12960                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12961                 unlock_user(p, arg2, 0);
12962             }
12963         }
12964         return ret;
12965 #endif
12966 #ifdef TARGET_NR_futex
12967     case TARGET_NR_futex:
12968         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12969 #endif
12970 #ifdef TARGET_NR_futex_time64
12971     case TARGET_NR_futex_time64:
12972         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12973 #endif
12974 #ifdef CONFIG_INOTIFY
12975 #if defined(TARGET_NR_inotify_init)
12976     case TARGET_NR_inotify_init:
12977         ret = get_errno(inotify_init());
12978         if (ret >= 0) {
12979             fd_trans_register(ret, &target_inotify_trans);
12980         }
12981         return ret;
12982 #endif
12983 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12984     case TARGET_NR_inotify_init1:
12985         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12986                                           fcntl_flags_tbl)));
12987         if (ret >= 0) {
12988             fd_trans_register(ret, &target_inotify_trans);
12989         }
12990         return ret;
12991 #endif
12992 #if defined(TARGET_NR_inotify_add_watch)
12993     case TARGET_NR_inotify_add_watch:
12994         p = lock_user_string(arg2);
12995         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12996         unlock_user(p, arg2, 0);
12997         return ret;
12998 #endif
12999 #if defined(TARGET_NR_inotify_rm_watch)
13000     case TARGET_NR_inotify_rm_watch:
13001         return get_errno(inotify_rm_watch(arg1, arg2));
13002 #endif
13003 #endif
13004 
13005 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13006     case TARGET_NR_mq_open:
13007         {
13008             struct mq_attr posix_mq_attr;
13009             struct mq_attr *pposix_mq_attr;
13010             int host_flags;
13011 
13012             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13013             pposix_mq_attr = NULL;
13014             if (arg4) {
13015                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13016                     return -TARGET_EFAULT;
13017                 }
13018                 pposix_mq_attr = &posix_mq_attr;
13019             }
13020             p = lock_user_string(arg1 - 1);
13021             if (!p) {
13022                 return -TARGET_EFAULT;
13023             }
13024             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13025             unlock_user (p, arg1, 0);
13026         }
13027         return ret;
13028 
13029     case TARGET_NR_mq_unlink:
13030         p = lock_user_string(arg1 - 1);
13031         if (!p) {
13032             return -TARGET_EFAULT;
13033         }
13034         ret = get_errno(mq_unlink(p));
13035         unlock_user (p, arg1, 0);
13036         return ret;
13037 
13038 #ifdef TARGET_NR_mq_timedsend
13039     case TARGET_NR_mq_timedsend:
13040         {
13041             struct timespec ts;
13042 
13043             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13044             if (arg5 != 0) {
13045                 if (target_to_host_timespec(&ts, arg5)) {
13046                     return -TARGET_EFAULT;
13047                 }
13048                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13049                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13050                     return -TARGET_EFAULT;
13051                 }
13052             } else {
13053                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13054             }
13055             unlock_user (p, arg2, arg3);
13056         }
13057         return ret;
13058 #endif
13059 #ifdef TARGET_NR_mq_timedsend_time64
13060     case TARGET_NR_mq_timedsend_time64:
13061         {
13062             struct timespec ts;
13063 
13064             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13065             if (arg5 != 0) {
13066                 if (target_to_host_timespec64(&ts, arg5)) {
13067                     return -TARGET_EFAULT;
13068                 }
13069                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13070                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13071                     return -TARGET_EFAULT;
13072                 }
13073             } else {
13074                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13075             }
13076             unlock_user(p, arg2, arg3);
13077         }
13078         return ret;
13079 #endif
13080 
13081 #ifdef TARGET_NR_mq_timedreceive
13082     case TARGET_NR_mq_timedreceive:
13083         {
13084             struct timespec ts;
13085             unsigned int prio;
13086 
13087             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13088             if (arg5 != 0) {
13089                 if (target_to_host_timespec(&ts, arg5)) {
13090                     return -TARGET_EFAULT;
13091                 }
13092                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13093                                                      &prio, &ts));
13094                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13095                     return -TARGET_EFAULT;
13096                 }
13097             } else {
13098                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13099                                                      &prio, NULL));
13100             }
13101             unlock_user (p, arg2, arg3);
13102             if (arg4 != 0)
13103                 put_user_u32(prio, arg4);
13104         }
13105         return ret;
13106 #endif
13107 #ifdef TARGET_NR_mq_timedreceive_time64
13108     case TARGET_NR_mq_timedreceive_time64:
13109         {
13110             struct timespec ts;
13111             unsigned int prio;
13112 
13113             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13114             if (arg5 != 0) {
13115                 if (target_to_host_timespec64(&ts, arg5)) {
13116                     return -TARGET_EFAULT;
13117                 }
13118                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13119                                                      &prio, &ts));
13120                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13121                     return -TARGET_EFAULT;
13122                 }
13123             } else {
13124                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13125                                                      &prio, NULL));
13126             }
13127             unlock_user(p, arg2, arg3);
13128             if (arg4 != 0) {
13129                 put_user_u32(prio, arg4);
13130             }
13131         }
13132         return ret;
13133 #endif
13134 
13135     /* Not implemented for now... */
13136 /*     case TARGET_NR_mq_notify: */
13137 /*         break; */
13138 
13139     case TARGET_NR_mq_getsetattr:
13140         {
13141             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13142             ret = 0;
13143             if (arg2 != 0) {
13144                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13145                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13146                                            &posix_mq_attr_out));
13147             } else if (arg3 != 0) {
13148                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13149             }
13150             if (ret == 0 && arg3 != 0) {
13151                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13152             }
13153         }
13154         return ret;
13155 #endif
13156 
13157 #ifdef CONFIG_SPLICE
13158 #ifdef TARGET_NR_tee
13159     case TARGET_NR_tee:
13160         {
13161             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13162         }
13163         return ret;
13164 #endif
13165 #ifdef TARGET_NR_splice
13166     case TARGET_NR_splice:
13167         {
13168             loff_t loff_in, loff_out;
13169             loff_t *ploff_in = NULL, *ploff_out = NULL;
13170             if (arg2) {
13171                 if (get_user_u64(loff_in, arg2)) {
13172                     return -TARGET_EFAULT;
13173                 }
13174                 ploff_in = &loff_in;
13175             }
13176             if (arg4) {
13177                 if (get_user_u64(loff_out, arg4)) {
13178                     return -TARGET_EFAULT;
13179                 }
13180                 ploff_out = &loff_out;
13181             }
13182             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13183             if (arg2) {
13184                 if (put_user_u64(loff_in, arg2)) {
13185                     return -TARGET_EFAULT;
13186                 }
13187             }
13188             if (arg4) {
13189                 if (put_user_u64(loff_out, arg4)) {
13190                     return -TARGET_EFAULT;
13191                 }
13192             }
13193         }
13194         return ret;
13195 #endif
13196 #ifdef TARGET_NR_vmsplice
13197 	case TARGET_NR_vmsplice:
13198         {
13199             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13200             if (vec != NULL) {
13201                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13202                 unlock_iovec(vec, arg2, arg3, 0);
13203             } else {
13204                 ret = -host_to_target_errno(errno);
13205             }
13206         }
13207         return ret;
13208 #endif
13209 #endif /* CONFIG_SPLICE */
13210 #ifdef CONFIG_EVENTFD
13211 #if defined(TARGET_NR_eventfd)
13212     case TARGET_NR_eventfd:
13213         ret = get_errno(eventfd(arg1, 0));
13214         if (ret >= 0) {
13215             fd_trans_register(ret, &target_eventfd_trans);
13216         }
13217         return ret;
13218 #endif
13219 #if defined(TARGET_NR_eventfd2)
13220     case TARGET_NR_eventfd2:
13221     {
13222         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13223         if (arg2 & TARGET_O_NONBLOCK) {
13224             host_flags |= O_NONBLOCK;
13225         }
13226         if (arg2 & TARGET_O_CLOEXEC) {
13227             host_flags |= O_CLOEXEC;
13228         }
13229         ret = get_errno(eventfd(arg1, host_flags));
13230         if (ret >= 0) {
13231             fd_trans_register(ret, &target_eventfd_trans);
13232         }
13233         return ret;
13234     }
13235 #endif
13236 #endif /* CONFIG_EVENTFD  */
13237 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13238     case TARGET_NR_fallocate:
13239 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13240         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13241                                   target_offset64(arg5, arg6)));
13242 #else
13243         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13244 #endif
13245         return ret;
13246 #endif
13247 #if defined(CONFIG_SYNC_FILE_RANGE)
13248 #if defined(TARGET_NR_sync_file_range)
13249     case TARGET_NR_sync_file_range:
13250 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13251 #if defined(TARGET_MIPS)
13252         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13253                                         target_offset64(arg5, arg6), arg7));
13254 #else
13255         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13256                                         target_offset64(arg4, arg5), arg6));
13257 #endif /* !TARGET_MIPS */
13258 #else
13259         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13260 #endif
13261         return ret;
13262 #endif
13263 #if defined(TARGET_NR_sync_file_range2) || \
13264     defined(TARGET_NR_arm_sync_file_range)
13265 #if defined(TARGET_NR_sync_file_range2)
13266     case TARGET_NR_sync_file_range2:
13267 #endif
13268 #if defined(TARGET_NR_arm_sync_file_range)
13269     case TARGET_NR_arm_sync_file_range:
13270 #endif
13271         /* This is like sync_file_range but the arguments are reordered */
13272 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13273         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13274                                         target_offset64(arg5, arg6), arg2));
13275 #else
13276         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13277 #endif
13278         return ret;
13279 #endif
13280 #endif
13281 #if defined(TARGET_NR_signalfd4)
13282     case TARGET_NR_signalfd4:
13283         return do_signalfd4(arg1, arg2, arg4);
13284 #endif
13285 #if defined(TARGET_NR_signalfd)
13286     case TARGET_NR_signalfd:
13287         return do_signalfd4(arg1, arg2, 0);
13288 #endif
13289 #if defined(CONFIG_EPOLL)
13290 #if defined(TARGET_NR_epoll_create)
13291     case TARGET_NR_epoll_create:
13292         return get_errno(epoll_create(arg1));
13293 #endif
13294 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13295     case TARGET_NR_epoll_create1:
13296         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13297 #endif
13298 #if defined(TARGET_NR_epoll_ctl)
13299     case TARGET_NR_epoll_ctl:
13300     {
13301         struct epoll_event ep;
13302         struct epoll_event *epp = 0;
13303         if (arg4) {
13304             if (arg2 != EPOLL_CTL_DEL) {
13305                 struct target_epoll_event *target_ep;
13306                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13307                     return -TARGET_EFAULT;
13308                 }
13309                 ep.events = tswap32(target_ep->events);
13310                 /*
13311                  * The epoll_data_t union is just opaque data to the kernel,
13312                  * so we transfer all 64 bits across and need not worry what
13313                  * actual data type it is.
13314                  */
13315                 ep.data.u64 = tswap64(target_ep->data.u64);
13316                 unlock_user_struct(target_ep, arg4, 0);
13317             }
13318             /*
13319              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13320              * non-null pointer, even though this argument is ignored.
13321              *
13322              */
13323             epp = &ep;
13324         }
13325         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13326     }
13327 #endif
13328 
13329 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13330 #if defined(TARGET_NR_epoll_wait)
13331     case TARGET_NR_epoll_wait:
13332 #endif
13333 #if defined(TARGET_NR_epoll_pwait)
13334     case TARGET_NR_epoll_pwait:
13335 #endif
13336     {
13337         struct target_epoll_event *target_ep;
13338         struct epoll_event *ep;
13339         int epfd = arg1;
13340         int maxevents = arg3;
13341         int timeout = arg4;
13342 
13343         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13344             return -TARGET_EINVAL;
13345         }
13346 
13347         target_ep = lock_user(VERIFY_WRITE, arg2,
13348                               maxevents * sizeof(struct target_epoll_event), 1);
13349         if (!target_ep) {
13350             return -TARGET_EFAULT;
13351         }
13352 
13353         ep = g_try_new(struct epoll_event, maxevents);
13354         if (!ep) {
13355             unlock_user(target_ep, arg2, 0);
13356             return -TARGET_ENOMEM;
13357         }
13358 
13359         switch (num) {
13360 #if defined(TARGET_NR_epoll_pwait)
13361         case TARGET_NR_epoll_pwait:
13362         {
13363             sigset_t *set = NULL;
13364 
13365             if (arg5) {
13366                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13367                 if (ret != 0) {
13368                     break;
13369                 }
13370             }
13371 
13372             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13373                                              set, SIGSET_T_SIZE));
13374 
13375             if (set) {
13376                 finish_sigsuspend_mask(ret);
13377             }
13378             break;
13379         }
13380 #endif
13381 #if defined(TARGET_NR_epoll_wait)
13382         case TARGET_NR_epoll_wait:
13383             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13384                                              NULL, 0));
13385             break;
13386 #endif
13387         default:
13388             ret = -TARGET_ENOSYS;
13389         }
13390         if (!is_error(ret)) {
13391             int i;
13392             for (i = 0; i < ret; i++) {
13393                 target_ep[i].events = tswap32(ep[i].events);
13394                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13395             }
13396             unlock_user(target_ep, arg2,
13397                         ret * sizeof(struct target_epoll_event));
13398         } else {
13399             unlock_user(target_ep, arg2, 0);
13400         }
13401         g_free(ep);
13402         return ret;
13403     }
13404 #endif
13405 #endif
13406 #ifdef TARGET_NR_prlimit64
13407     case TARGET_NR_prlimit64:
13408     {
13409         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13410         struct target_rlimit64 *target_rnew, *target_rold;
13411         struct host_rlimit64 rnew, rold, *rnewp = 0;
13412         int resource = target_to_host_resource(arg2);
13413 
13414         if (arg3 && (resource != RLIMIT_AS &&
13415                      resource != RLIMIT_DATA &&
13416                      resource != RLIMIT_STACK)) {
13417             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13418                 return -TARGET_EFAULT;
13419             }
13420             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13421             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13422             unlock_user_struct(target_rnew, arg3, 0);
13423             rnewp = &rnew;
13424         }
13425 
13426         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13427         if (!is_error(ret) && arg4) {
13428             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13429                 return -TARGET_EFAULT;
13430             }
13431             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13432             __put_user(rold.rlim_max, &target_rold->rlim_max);
13433             unlock_user_struct(target_rold, arg4, 1);
13434         }
13435         return ret;
13436     }
13437 #endif
13438 #ifdef TARGET_NR_gethostname
13439     case TARGET_NR_gethostname:
13440     {
13441         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13442         if (name) {
13443             ret = get_errno(gethostname(name, arg2));
13444             unlock_user(name, arg1, arg2);
13445         } else {
13446             ret = -TARGET_EFAULT;
13447         }
13448         return ret;
13449     }
13450 #endif
13451 #ifdef TARGET_NR_atomic_cmpxchg_32
13452     case TARGET_NR_atomic_cmpxchg_32:
13453     {
13454         /* should use start_exclusive from main.c */
13455         abi_ulong mem_value;
13456         if (get_user_u32(mem_value, arg6)) {
13457             target_siginfo_t info;
13458             info.si_signo = SIGSEGV;
13459             info.si_errno = 0;
13460             info.si_code = TARGET_SEGV_MAPERR;
13461             info._sifields._sigfault._addr = arg6;
13462             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13463             ret = 0xdeadbeef;
13464 
13465         }
13466         if (mem_value == arg2)
13467             put_user_u32(arg1, arg6);
13468         return mem_value;
13469     }
13470 #endif
13471 #ifdef TARGET_NR_atomic_barrier
13472     case TARGET_NR_atomic_barrier:
13473         /* Like the kernel implementation and the
13474            qemu arm barrier, no-op this? */
13475         return 0;
13476 #endif
13477 
13478 #ifdef TARGET_NR_timer_create
13479     case TARGET_NR_timer_create:
13480     {
13481         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13482 
13483         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13484 
13485         int clkid = arg1;
13486         int timer_index = next_free_host_timer();
13487 
13488         if (timer_index < 0) {
13489             ret = -TARGET_EAGAIN;
13490         } else {
13491             timer_t *phtimer = g_posix_timers  + timer_index;
13492 
13493             if (arg2) {
13494                 phost_sevp = &host_sevp;
13495                 ret = target_to_host_sigevent(phost_sevp, arg2);
13496                 if (ret != 0) {
13497                     free_host_timer_slot(timer_index);
13498                     return ret;
13499                 }
13500             }
13501 
13502             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13503             if (ret) {
13504                 free_host_timer_slot(timer_index);
13505             } else {
13506                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13507                     timer_delete(*phtimer);
13508                     free_host_timer_slot(timer_index);
13509                     return -TARGET_EFAULT;
13510                 }
13511             }
13512         }
13513         return ret;
13514     }
13515 #endif
13516 
13517 #ifdef TARGET_NR_timer_settime
13518     case TARGET_NR_timer_settime:
13519     {
13520         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13521          * struct itimerspec * old_value */
13522         target_timer_t timerid = get_timer_id(arg1);
13523 
13524         if (timerid < 0) {
13525             ret = timerid;
13526         } else if (arg3 == 0) {
13527             ret = -TARGET_EINVAL;
13528         } else {
13529             timer_t htimer = g_posix_timers[timerid];
13530             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13531 
13532             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13533                 return -TARGET_EFAULT;
13534             }
13535             ret = get_errno(
13536                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13537             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13538                 return -TARGET_EFAULT;
13539             }
13540         }
13541         return ret;
13542     }
13543 #endif
13544 
13545 #ifdef TARGET_NR_timer_settime64
13546     case TARGET_NR_timer_settime64:
13547     {
13548         target_timer_t timerid = get_timer_id(arg1);
13549 
13550         if (timerid < 0) {
13551             ret = timerid;
13552         } else if (arg3 == 0) {
13553             ret = -TARGET_EINVAL;
13554         } else {
13555             timer_t htimer = g_posix_timers[timerid];
13556             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13557 
13558             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13559                 return -TARGET_EFAULT;
13560             }
13561             ret = get_errno(
13562                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13563             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13564                 return -TARGET_EFAULT;
13565             }
13566         }
13567         return ret;
13568     }
13569 #endif
13570 
13571 #ifdef TARGET_NR_timer_gettime
13572     case TARGET_NR_timer_gettime:
13573     {
13574         /* args: timer_t timerid, struct itimerspec *curr_value */
13575         target_timer_t timerid = get_timer_id(arg1);
13576 
13577         if (timerid < 0) {
13578             ret = timerid;
13579         } else if (!arg2) {
13580             ret = -TARGET_EFAULT;
13581         } else {
13582             timer_t htimer = g_posix_timers[timerid];
13583             struct itimerspec hspec;
13584             ret = get_errno(timer_gettime(htimer, &hspec));
13585 
13586             if (host_to_target_itimerspec(arg2, &hspec)) {
13587                 ret = -TARGET_EFAULT;
13588             }
13589         }
13590         return ret;
13591     }
13592 #endif
13593 
13594 #ifdef TARGET_NR_timer_gettime64
13595     case TARGET_NR_timer_gettime64:
13596     {
13597         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13598         target_timer_t timerid = get_timer_id(arg1);
13599 
13600         if (timerid < 0) {
13601             ret = timerid;
13602         } else if (!arg2) {
13603             ret = -TARGET_EFAULT;
13604         } else {
13605             timer_t htimer = g_posix_timers[timerid];
13606             struct itimerspec hspec;
13607             ret = get_errno(timer_gettime(htimer, &hspec));
13608 
13609             if (host_to_target_itimerspec64(arg2, &hspec)) {
13610                 ret = -TARGET_EFAULT;
13611             }
13612         }
13613         return ret;
13614     }
13615 #endif
13616 
13617 #ifdef TARGET_NR_timer_getoverrun
13618     case TARGET_NR_timer_getoverrun:
13619     {
13620         /* args: timer_t timerid */
13621         target_timer_t timerid = get_timer_id(arg1);
13622 
13623         if (timerid < 0) {
13624             ret = timerid;
13625         } else {
13626             timer_t htimer = g_posix_timers[timerid];
13627             ret = get_errno(timer_getoverrun(htimer));
13628         }
13629         return ret;
13630     }
13631 #endif
13632 
13633 #ifdef TARGET_NR_timer_delete
13634     case TARGET_NR_timer_delete:
13635     {
13636         /* args: timer_t timerid */
13637         target_timer_t timerid = get_timer_id(arg1);
13638 
13639         if (timerid < 0) {
13640             ret = timerid;
13641         } else {
13642             timer_t htimer = g_posix_timers[timerid];
13643             ret = get_errno(timer_delete(htimer));
13644             free_host_timer_slot(timerid);
13645         }
13646         return ret;
13647     }
13648 #endif
13649 
13650 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13651     case TARGET_NR_timerfd_create:
13652         ret = get_errno(timerfd_create(arg1,
13653                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13654         if (ret >= 0) {
13655             fd_trans_register(ret, &target_timerfd_trans);
13656         }
13657         return ret;
13658 #endif
13659 
13660 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13661     case TARGET_NR_timerfd_gettime:
13662         {
13663             struct itimerspec its_curr;
13664 
13665             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13666 
13667             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13668                 return -TARGET_EFAULT;
13669             }
13670         }
13671         return ret;
13672 #endif
13673 
13674 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13675     case TARGET_NR_timerfd_gettime64:
13676         {
13677             struct itimerspec its_curr;
13678 
13679             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13680 
13681             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13682                 return -TARGET_EFAULT;
13683             }
13684         }
13685         return ret;
13686 #endif
13687 
13688 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13689     case TARGET_NR_timerfd_settime:
13690         {
13691             struct itimerspec its_new, its_old, *p_new;
13692 
13693             if (arg3) {
13694                 if (target_to_host_itimerspec(&its_new, arg3)) {
13695                     return -TARGET_EFAULT;
13696                 }
13697                 p_new = &its_new;
13698             } else {
13699                 p_new = NULL;
13700             }
13701 
13702             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13703 
13704             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13705                 return -TARGET_EFAULT;
13706             }
13707         }
13708         return ret;
13709 #endif
13710 
13711 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13712     case TARGET_NR_timerfd_settime64:
13713         {
13714             struct itimerspec its_new, its_old, *p_new;
13715 
13716             if (arg3) {
13717                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13718                     return -TARGET_EFAULT;
13719                 }
13720                 p_new = &its_new;
13721             } else {
13722                 p_new = NULL;
13723             }
13724 
13725             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13726 
13727             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13728                 return -TARGET_EFAULT;
13729             }
13730         }
13731         return ret;
13732 #endif
13733 
13734 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13735     case TARGET_NR_ioprio_get:
13736         return get_errno(ioprio_get(arg1, arg2));
13737 #endif
13738 
13739 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13740     case TARGET_NR_ioprio_set:
13741         return get_errno(ioprio_set(arg1, arg2, arg3));
13742 #endif
13743 
13744 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13745     case TARGET_NR_setns:
13746         return get_errno(setns(arg1, arg2));
13747 #endif
13748 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13749     case TARGET_NR_unshare:
13750         return get_errno(unshare(arg1));
13751 #endif
13752 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13753     case TARGET_NR_kcmp:
13754         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13755 #endif
13756 #ifdef TARGET_NR_swapcontext
13757     case TARGET_NR_swapcontext:
13758         /* PowerPC specific.  */
13759         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13760 #endif
13761 #ifdef TARGET_NR_memfd_create
13762     case TARGET_NR_memfd_create:
13763         p = lock_user_string(arg1);
13764         if (!p) {
13765             return -TARGET_EFAULT;
13766         }
13767         ret = get_errno(memfd_create(p, arg2));
13768         fd_trans_unregister(ret);
13769         unlock_user(p, arg1, 0);
13770         return ret;
13771 #endif
13772 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13773     case TARGET_NR_membarrier:
13774         return get_errno(membarrier(arg1, arg2));
13775 #endif
13776 
13777 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13778     case TARGET_NR_copy_file_range:
13779         {
13780             loff_t inoff, outoff;
13781             loff_t *pinoff = NULL, *poutoff = NULL;
13782 
13783             if (arg2) {
13784                 if (get_user_u64(inoff, arg2)) {
13785                     return -TARGET_EFAULT;
13786                 }
13787                 pinoff = &inoff;
13788             }
13789             if (arg4) {
13790                 if (get_user_u64(outoff, arg4)) {
13791                     return -TARGET_EFAULT;
13792                 }
13793                 poutoff = &outoff;
13794             }
13795             /* Do not sign-extend the count parameter. */
13796             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13797                                                  (abi_ulong)arg5, arg6));
13798             if (!is_error(ret) && ret > 0) {
13799                 if (arg2) {
13800                     if (put_user_u64(inoff, arg2)) {
13801                         return -TARGET_EFAULT;
13802                     }
13803                 }
13804                 if (arg4) {
13805                     if (put_user_u64(outoff, arg4)) {
13806                         return -TARGET_EFAULT;
13807                     }
13808                 }
13809             }
13810         }
13811         return ret;
13812 #endif
13813 
13814 #if defined(TARGET_NR_pivot_root)
13815     case TARGET_NR_pivot_root:
13816         {
13817             void *p2;
13818             p = lock_user_string(arg1); /* new_root */
13819             p2 = lock_user_string(arg2); /* put_old */
13820             if (!p || !p2) {
13821                 ret = -TARGET_EFAULT;
13822             } else {
13823                 ret = get_errno(pivot_root(p, p2));
13824             }
13825             unlock_user(p2, arg2, 0);
13826             unlock_user(p, arg1, 0);
13827         }
13828         return ret;
13829 #endif
13830 
13831 #if defined(TARGET_NR_riscv_hwprobe)
13832     case TARGET_NR_riscv_hwprobe:
13833         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13834 #endif
13835 
13836     default:
13837         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13838         return -TARGET_ENOSYS;
13839     }
13840     return ret;
13841 }
13842 
13843 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13844                     abi_long arg2, abi_long arg3, abi_long arg4,
13845                     abi_long arg5, abi_long arg6, abi_long arg7,
13846                     abi_long arg8)
13847 {
13848     CPUState *cpu = env_cpu(cpu_env);
13849     abi_long ret;
13850 
13851 #ifdef DEBUG_ERESTARTSYS
13852     /* Debug-only code for exercising the syscall-restart code paths
13853      * in the per-architecture cpu main loops: restart every syscall
13854      * the guest makes once before letting it through.
13855      */
13856     {
13857         static bool flag;
13858         flag = !flag;
13859         if (flag) {
13860             return -QEMU_ERESTARTSYS;
13861         }
13862     }
13863 #endif
13864 
13865     record_syscall_start(cpu, num, arg1,
13866                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13867 
13868     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13869         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13870     }
13871 
13872     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13873                       arg5, arg6, arg7, arg8);
13874 
13875     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13876         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13877                           arg3, arg4, arg5, arg6);
13878     }
13879 
13880     record_syscall_return(cpu, num, ret);
13881     return ret;
13882 }
13883