xref: /openbmc/qemu/linux-user/syscall.c (revision 91748d50c7ef4addcc9302160a4b8b3c63d5d024)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "tcg/startup.h"
27 #include "target_mman.h"
28 #include "exec/page-protection.h"
29 #include "exec/mmap-lock.h"
30 #include "exec/tb-flush.h"
31 #include "exec/translation-block.h"
32 #include <elf.h>
33 #include <endian.h>
34 #include <grp.h>
35 #include <sys/ipc.h>
36 #include <sys/msg.h>
37 #include <sys/wait.h>
38 #include <sys/mount.h>
39 #include <sys/file.h>
40 #include <sys/fsuid.h>
41 #include <sys/personality.h>
42 #include <sys/prctl.h>
43 #include <sys/resource.h>
44 #include <sys/swap.h>
45 #include <linux/capability.h>
46 #include <sched.h>
47 #include <sys/timex.h>
48 #include <sys/socket.h>
49 #include <linux/sockios.h>
50 #include <sys/un.h>
51 #include <sys/uio.h>
52 #include <poll.h>
53 #include <sys/times.h>
54 #include <sys/shm.h>
55 #include <sys/sem.h>
56 #include <sys/statfs.h>
57 #include <utime.h>
58 #include <sys/sysinfo.h>
59 #include <sys/signalfd.h>
60 #include <netinet/in.h>
61 #include <netinet/ip.h>
62 #include <netinet/tcp.h>
63 #include <netinet/udp.h>
64 #include <linux/wireless.h>
65 #include <linux/icmp.h>
66 #include <linux/icmpv6.h>
67 #include <linux/if_tun.h>
68 #include <linux/in6.h>
69 #include <linux/errqueue.h>
70 #include <linux/random.h>
71 #ifdef CONFIG_TIMERFD
72 #include <sys/timerfd.h>
73 #endif
74 #ifdef CONFIG_EVENTFD
75 #include <sys/eventfd.h>
76 #endif
77 #ifdef CONFIG_EPOLL
78 #include <sys/epoll.h>
79 #endif
80 #ifdef CONFIG_ATTR
81 #include "qemu/xattr.h"
82 #endif
83 #ifdef CONFIG_SENDFILE
84 #include <sys/sendfile.h>
85 #endif
86 #ifdef HAVE_SYS_KCOV_H
87 #include <sys/kcov.h>
88 #endif
89 
90 #define termios host_termios
91 #define winsize host_winsize
92 #define termio host_termio
93 #define sgttyb host_sgttyb /* same as target */
94 #define tchars host_tchars /* same as target */
95 #define ltchars host_ltchars /* same as target */
96 
97 #include <linux/termios.h>
98 #include <linux/unistd.h>
99 #include <linux/cdrom.h>
100 #include <linux/hdreg.h>
101 #include <linux/soundcard.h>
102 #include <linux/kd.h>
103 #include <linux/mtio.h>
104 #include <linux/fs.h>
105 #include <linux/fd.h>
106 #if defined(CONFIG_FIEMAP)
107 #include <linux/fiemap.h>
108 #endif
109 #include <linux/fb.h>
110 #if defined(CONFIG_USBFS)
111 #include <linux/usbdevice_fs.h>
112 #include <linux/usb/ch9.h>
113 #endif
114 #include <linux/vt.h>
115 #include <linux/dm-ioctl.h>
116 #include <linux/reboot.h>
117 #include <linux/route.h>
118 #include <linux/filter.h>
119 #include <linux/blkpg.h>
120 #include <netpacket/packet.h>
121 #include <linux/netlink.h>
122 #include <linux/if_alg.h>
123 #include <linux/rtc.h>
124 #include <sound/asound.h>
125 #ifdef HAVE_BTRFS_H
126 #include <linux/btrfs.h>
127 #endif
128 #ifdef HAVE_DRM_H
129 #include <libdrm/drm.h>
130 #include <libdrm/i915_drm.h>
131 #endif
132 #include "linux_loop.h"
133 #include "uname.h"
134 
135 #include "qemu.h"
136 #include "user-internals.h"
137 #include "strace.h"
138 #include "signal-common.h"
139 #include "loader.h"
140 #include "user-mmap.h"
141 #include "user/page-protection.h"
142 #include "user/safe-syscall.h"
143 #include "user/signal.h"
144 #include "qemu/guest-random.h"
145 #include "qemu/selfmap.h"
146 #include "user/syscall-trace.h"
147 #include "special-errno.h"
148 #include "qapi/error.h"
149 #include "fd-trans.h"
150 #include "user/cpu_loop.h"
151 
152 #ifndef CLONE_IO
153 #define CLONE_IO                0x80000000      /* Clone io context */
154 #endif
155 
156 /* We can't directly call the host clone syscall, because this will
157  * badly confuse libc (breaking mutexes, for example). So we must
158  * divide clone flags into:
159  *  * flag combinations that look like pthread_create()
160  *  * flag combinations that look like fork()
161  *  * flags we can implement within QEMU itself
162  *  * flags we can't support and will return an error for
163  */
164 /* For thread creation, all these flags must be present; for
165  * fork, none must be present.
166  */
167 #define CLONE_THREAD_FLAGS                              \
168     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
169      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
170 
171 /* These flags are ignored:
172  * CLONE_DETACHED is now ignored by the kernel;
173  * CLONE_IO is just an optimisation hint to the I/O scheduler
174  */
175 #define CLONE_IGNORED_FLAGS                     \
176     (CLONE_DETACHED | CLONE_IO)
177 
178 #ifndef CLONE_PIDFD
179 # define CLONE_PIDFD 0x00001000
180 #endif
181 
182 /* Flags for fork which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_FORK_FLAGS               \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
186 
187 /* Flags for thread creation which we can implement within QEMU itself */
188 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
189     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
190      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
191 
192 #define CLONE_INVALID_FORK_FLAGS                                        \
193     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
194 
195 #define CLONE_INVALID_THREAD_FLAGS                                      \
196     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
197        CLONE_IGNORED_FLAGS))
198 
199 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
200  * have almost all been allocated. We cannot support any of
201  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
202  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
203  * The checks against the invalid thread masks above will catch these.
204  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
205  */
206 
207 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
208  * once. This exercises the codepaths for restart.
209  */
210 //#define DEBUG_ERESTARTSYS
211 
212 //#include <linux/msdos_fs.h>
213 #define VFAT_IOCTL_READDIR_BOTH \
214     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
215 #define VFAT_IOCTL_READDIR_SHORT \
216     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
217 
218 #undef _syscall0
219 #undef _syscall1
220 #undef _syscall2
221 #undef _syscall3
222 #undef _syscall4
223 #undef _syscall5
224 #undef _syscall6
225 
226 #define _syscall0(type,name)		\
227 static type name (void)			\
228 {					\
229 	return syscall(__NR_##name);	\
230 }
231 
232 #define _syscall1(type,name,type1,arg1)		\
233 static type name (type1 arg1)			\
234 {						\
235 	return syscall(__NR_##name, arg1);	\
236 }
237 
238 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
239 static type name (type1 arg1,type2 arg2)		\
240 {							\
241 	return syscall(__NR_##name, arg1, arg2);	\
242 }
243 
244 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
245 static type name (type1 arg1,type2 arg2,type3 arg3)		\
246 {								\
247 	return syscall(__NR_##name, arg1, arg2, arg3);		\
248 }
249 
250 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
251 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
252 {										\
253 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
254 }
255 
256 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
257 		  type5,arg5)							\
258 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
259 {										\
260 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
261 }
262 
263 
264 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
265 		  type5,arg5,type6,arg6)					\
266 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
267                   type6 arg6)							\
268 {										\
269 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
270 }
271 
272 
273 #define __NR_sys_uname __NR_uname
274 #define __NR_sys_getcwd1 __NR_getcwd
275 #define __NR_sys_getdents __NR_getdents
276 #define __NR_sys_getdents64 __NR_getdents64
277 #define __NR_sys_getpriority __NR_getpriority
278 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
279 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
280 #define __NR_sys_syslog __NR_syslog
281 #if defined(__NR_futex)
282 # define __NR_sys_futex __NR_futex
283 #endif
284 #if defined(__NR_futex_time64)
285 # define __NR_sys_futex_time64 __NR_futex_time64
286 #endif
287 #define __NR_sys_statx __NR_statx
288 
289 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
290 #define __NR__llseek __NR_lseek
291 #endif
292 
293 /* Newer kernel ports have llseek() instead of _llseek() */
294 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
295 #define TARGET_NR__llseek TARGET_NR_llseek
296 #endif
297 
298 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
299 #ifndef TARGET_O_NONBLOCK_MASK
300 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
301 #endif
302 
303 #define __NR_sys_gettid __NR_gettid
304 _syscall0(int, sys_gettid)
305 
306 /* For the 64-bit guest on 32-bit host case we must emulate
307  * getdents using getdents64, because otherwise the host
308  * might hand us back more dirent records than we can fit
309  * into the guest buffer after structure format conversion.
310  * Otherwise we emulate getdents with getdents if the host has it.
311  */
312 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
313 #define EMULATE_GETDENTS_WITH_GETDENTS
314 #endif
315 
316 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
317 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
318 #endif
319 #if (defined(TARGET_NR_getdents) && \
320       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
321     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
322 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
323 #endif
324 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
325 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
326           loff_t *, res, unsigned int, wh);
327 #endif
328 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
329 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
330           siginfo_t *, uinfo)
331 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
332 #ifdef __NR_exit_group
333 _syscall1(int,exit_group,int,error_code)
334 #endif
335 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
336 #define __NR_sys_close_range __NR_close_range
337 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
338 #ifndef CLOSE_RANGE_CLOEXEC
339 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
340 #endif
341 #endif
342 #if defined(__NR_futex)
343 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
344           const struct timespec *,timeout,int *,uaddr2,int,val3)
345 #endif
346 #if defined(__NR_futex_time64)
347 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
348           const struct timespec *,timeout,int *,uaddr2,int,val3)
349 #endif
350 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
351 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
352 #endif
353 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
354 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
355                              unsigned int, flags);
356 #endif
357 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
358 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
359 #endif
360 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
361 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
362           unsigned long *, user_mask_ptr);
363 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
364 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
365           unsigned long *, user_mask_ptr);
366 /* sched_attr is not defined in glibc < 2.41 */
367 #ifndef SCHED_ATTR_SIZE_VER0
368 struct sched_attr {
369     uint32_t size;
370     uint32_t sched_policy;
371     uint64_t sched_flags;
372     int32_t sched_nice;
373     uint32_t sched_priority;
374     uint64_t sched_runtime;
375     uint64_t sched_deadline;
376     uint64_t sched_period;
377     uint32_t sched_util_min;
378     uint32_t sched_util_max;
379 };
380 #endif
381 #define __NR_sys_sched_getattr __NR_sched_getattr
382 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
383           unsigned int, size, unsigned int, flags);
384 #define __NR_sys_sched_setattr __NR_sched_setattr
385 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
386           unsigned int, flags);
387 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
388 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
389 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
390 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
391           const struct sched_param *, param);
392 #define __NR_sys_sched_getparam __NR_sched_getparam
393 _syscall2(int, sys_sched_getparam, pid_t, pid,
394           struct sched_param *, param);
395 #define __NR_sys_sched_setparam __NR_sched_setparam
396 _syscall2(int, sys_sched_setparam, pid_t, pid,
397           const struct sched_param *, param);
398 #define __NR_sys_getcpu __NR_getcpu
399 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
400 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
401           void *, arg);
402 _syscall2(int, capget, struct __user_cap_header_struct *, header,
403           struct __user_cap_data_struct *, data);
404 _syscall2(int, capset, struct __user_cap_header_struct *, header,
405           struct __user_cap_data_struct *, data);
406 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
407 _syscall2(int, ioprio_get, int, which, int, who)
408 #endif
409 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
410 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
411 #endif
412 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
413 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
414 #endif
415 
416 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
417 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
418           unsigned long, idx1, unsigned long, idx2)
419 #endif
420 
421 /*
422  * It is assumed that struct statx is architecture independent.
423  */
424 #if defined(TARGET_NR_statx) && defined(__NR_statx)
425 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
426           unsigned int, mask, struct target_statx *, statxbuf)
427 #endif
428 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
429 _syscall2(int, membarrier, int, cmd, int, flags)
430 #endif
431 
432 static const bitmask_transtbl fcntl_flags_tbl[] = {
433   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
434   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
435   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
436   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
437   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
438   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
439   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
440   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
441   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
442   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
443   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
444   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
445   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
446 #if defined(O_DIRECT)
447   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
448 #endif
449 #if defined(O_NOATIME)
450   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
451 #endif
452 #if defined(O_CLOEXEC)
453   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
454 #endif
455 #if defined(O_PATH)
456   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
457 #endif
458 #if defined(O_TMPFILE)
459   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
460 #endif
461   /* Don't terminate the list prematurely on 64-bit host+guest.  */
462 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
463   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
464 #endif
465 };
466 
467 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
468 
469 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
470 #if defined(__NR_utimensat)
471 #define __NR_sys_utimensat __NR_utimensat
472 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
473           const struct timespec *,tsp,int,flags)
474 #else
475 static int sys_utimensat(int dirfd, const char *pathname,
476                          const struct timespec times[2], int flags)
477 {
478     errno = ENOSYS;
479     return -1;
480 }
481 #endif
482 #endif /* TARGET_NR_utimensat */
483 
484 #ifdef TARGET_NR_renameat2
485 #if defined(__NR_renameat2)
486 #define __NR_sys_renameat2 __NR_renameat2
487 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
488           const char *, new, unsigned int, flags)
489 #else
490 static int sys_renameat2(int oldfd, const char *old,
491                          int newfd, const char *new, int flags)
492 {
493     if (flags == 0) {
494         return renameat(oldfd, old, newfd, new);
495     }
496     errno = ENOSYS;
497     return -1;
498 }
499 #endif
500 #endif /* TARGET_NR_renameat2 */
501 
502 #ifdef CONFIG_INOTIFY
503 #include <sys/inotify.h>
504 #else
505 /* Userspace can usually survive runtime without inotify */
506 #undef TARGET_NR_inotify_init
507 #undef TARGET_NR_inotify_init1
508 #undef TARGET_NR_inotify_add_watch
509 #undef TARGET_NR_inotify_rm_watch
510 #endif /* CONFIG_INOTIFY  */
511 
512 #if defined(TARGET_NR_prlimit64)
513 #ifndef __NR_prlimit64
514 # define __NR_prlimit64 -1
515 #endif
516 #define __NR_sys_prlimit64 __NR_prlimit64
517 /* The glibc rlimit structure may not be that used by the underlying syscall */
518 struct host_rlimit64 {
519     uint64_t rlim_cur;
520     uint64_t rlim_max;
521 };
522 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
523           const struct host_rlimit64 *, new_limit,
524           struct host_rlimit64 *, old_limit)
525 #endif
526 
527 
528 #if defined(TARGET_NR_timer_create)
529 /* Maximum of 32 active POSIX timers allowed at any one time. */
530 #define GUEST_TIMER_MAX 32
531 static timer_t g_posix_timers[GUEST_TIMER_MAX];
532 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
533 
534 static inline int next_free_host_timer(void)
535 {
536     int k;
537     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
538         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
539             return k;
540         }
541     }
542     return -1;
543 }
544 
545 static inline void free_host_timer_slot(int id)
546 {
547     qatomic_store_release(g_posix_timer_allocated + id, 0);
548 }
549 #endif
550 
551 static inline int host_to_target_errno(int host_errno)
552 {
553     switch (host_errno) {
554 #define E(X)  case X: return TARGET_##X;
555 #include "errnos.c.inc"
556 #undef E
557     default:
558         return host_errno;
559     }
560 }
561 
562 static inline int target_to_host_errno(int target_errno)
563 {
564     switch (target_errno) {
565 #define E(X)  case TARGET_##X: return X;
566 #include "errnos.c.inc"
567 #undef E
568     default:
569         return target_errno;
570     }
571 }
572 
573 abi_long get_errno(abi_long ret)
574 {
575     if (ret == -1)
576         return -host_to_target_errno(errno);
577     else
578         return ret;
579 }
580 
581 const char *target_strerror(int err)
582 {
583     if (err == QEMU_ERESTARTSYS) {
584         return "To be restarted";
585     }
586     if (err == QEMU_ESIGRETURN) {
587         return "Successful exit from sigreturn";
588     }
589 
590     return strerror(target_to_host_errno(err));
591 }
592 
593 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
594 {
595     int i;
596     uint8_t b;
597     if (usize <= ksize) {
598         return 1;
599     }
600     for (i = ksize; i < usize; i++) {
601         if (get_user_u8(b, addr + i)) {
602             return -TARGET_EFAULT;
603         }
604         if (b != 0) {
605             return 0;
606         }
607     }
608     return 1;
609 }
610 
611 /*
612  * Copies a target struct to a host struct, in a way that guarantees
613  * backwards-compatibility for struct syscall arguments.
614  *
615  * Similar to kernels uaccess.h:copy_struct_from_user()
616  */
617 int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
618 {
619     size_t size = MIN(ksize, usize);
620     size_t rest = MAX(ksize, usize) - size;
621 
622     /* Deal with trailing bytes. */
623     if (usize < ksize) {
624         memset(dst + size, 0, rest);
625     } else if (usize > ksize) {
626         int ret = check_zeroed_user(src, ksize, usize);
627         if (ret <= 0) {
628             return ret ?: -TARGET_E2BIG;
629         }
630     }
631     /* Copy the interoperable parts of the struct. */
632     if (copy_from_user(dst, src, size)) {
633         return -TARGET_EFAULT;
634     }
635     return 0;
636 }
637 
638 #define safe_syscall0(type, name) \
639 static type safe_##name(void) \
640 { \
641     return safe_syscall(__NR_##name); \
642 }
643 
644 #define safe_syscall1(type, name, type1, arg1) \
645 static type safe_##name(type1 arg1) \
646 { \
647     return safe_syscall(__NR_##name, arg1); \
648 }
649 
650 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
651 static type safe_##name(type1 arg1, type2 arg2) \
652 { \
653     return safe_syscall(__NR_##name, arg1, arg2); \
654 }
655 
656 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
657 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
658 { \
659     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
660 }
661 
662 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
663     type4, arg4) \
664 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
665 { \
666     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
667 }
668 
669 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
670     type4, arg4, type5, arg5) \
671 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
672     type5 arg5) \
673 { \
674     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
675 }
676 
677 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
678     type4, arg4, type5, arg5, type6, arg6) \
679 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
680     type5 arg5, type6 arg6) \
681 { \
682     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
683 }
684 
685 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
686 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
687 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
688               int, flags, mode_t, mode)
689 
690 safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
691               const struct open_how_ver0 *, how, size_t, size)
692 
693 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
694 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
695               struct rusage *, rusage)
696 #endif
697 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
698               int, options, struct rusage *, rusage)
699 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
700 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
701               char **, argv, char **, envp, int, flags)
702 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
703     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
704 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
705               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
706 #endif
707 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
708 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
709               struct timespec *, tsp, const sigset_t *, sigmask,
710               size_t, sigsetsize)
711 #endif
712 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
713               int, maxevents, int, timeout, const sigset_t *, sigmask,
714               size_t, sigsetsize)
715 #if defined(__NR_futex)
716 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
717               const struct timespec *,timeout,int *,uaddr2,int,val3)
718 #endif
719 #if defined(__NR_futex_time64)
720 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
721               const struct timespec *,timeout,int *,uaddr2,int,val3)
722 #endif
723 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
724 safe_syscall2(int, kill, pid_t, pid, int, sig)
725 safe_syscall2(int, tkill, int, tid, int, sig)
726 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
727 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
728 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
729 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
730               unsigned long, pos_l, unsigned long, pos_h)
731 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
732               unsigned long, pos_l, unsigned long, pos_h)
733 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
734               socklen_t, addrlen)
735 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
736               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
737 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
738               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
739 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
740 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
741 safe_syscall2(int, flock, int, fd, int, operation)
742 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
743 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
744               const struct timespec *, uts, size_t, sigsetsize)
745 #endif
746 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
747               int, flags)
748 #if defined(TARGET_NR_nanosleep)
749 safe_syscall2(int, nanosleep, const struct timespec *, req,
750               struct timespec *, rem)
751 #endif
752 #if defined(TARGET_NR_clock_nanosleep) || \
753     defined(TARGET_NR_clock_nanosleep_time64)
754 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
755               const struct timespec *, req, struct timespec *, rem)
756 #endif
757 #ifdef __NR_ipc
758 #ifdef __s390x__
759 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
760               void *, ptr)
761 #else
762 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
763               void *, ptr, long, fifth)
764 #endif
765 #endif
766 #ifdef __NR_msgsnd
767 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
768               int, flags)
769 #endif
770 #ifdef __NR_msgrcv
771 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
772               long, msgtype, int, flags)
773 #endif
774 #ifdef __NR_semtimedop
775 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
776               unsigned, nsops, const struct timespec *, timeout)
777 #endif
778 #if defined(TARGET_NR_mq_timedsend) || \
779     defined(TARGET_NR_mq_timedsend_time64)
780 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
781               size_t, len, unsigned, prio, const struct timespec *, timeout)
782 #endif
783 #if defined(TARGET_NR_mq_timedreceive) || \
784     defined(TARGET_NR_mq_timedreceive_time64)
785 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
786               size_t, len, unsigned *, prio, const struct timespec *, timeout)
787 #endif
788 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
789 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
790               int, outfd, loff_t *, poutoff, size_t, length,
791               unsigned int, flags)
792 #endif
793 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
794 safe_syscall4(int, fchmodat2, int, dfd, const char *, filename,
795               unsigned short, mode, unsigned int, flags)
796 #endif
797 
798 /* We do ioctl like this rather than via safe_syscall3 to preserve the
799  * "third argument might be integer or pointer or not present" behaviour of
800  * the libc function.
801  */
802 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
803 /* Similarly for fcntl. Since we always build with LFS enabled,
804  * we should be using the 64-bit structures automatically.
805  */
806 #ifdef __NR_fcntl64
807 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
808 #else
809 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
810 #endif
811 
812 static inline int host_to_target_sock_type(int host_type)
813 {
814     int target_type;
815 
816     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
817     case SOCK_DGRAM:
818         target_type = TARGET_SOCK_DGRAM;
819         break;
820     case SOCK_STREAM:
821         target_type = TARGET_SOCK_STREAM;
822         break;
823     default:
824         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
825         break;
826     }
827 
828 #if defined(SOCK_CLOEXEC)
829     if (host_type & SOCK_CLOEXEC) {
830         target_type |= TARGET_SOCK_CLOEXEC;
831     }
832 #endif
833 
834 #if defined(SOCK_NONBLOCK)
835     if (host_type & SOCK_NONBLOCK) {
836         target_type |= TARGET_SOCK_NONBLOCK;
837     }
838 #endif
839 
840     return target_type;
841 }
842 
843 static abi_ulong target_brk, initial_target_brk;
844 
845 void target_set_brk(abi_ulong new_brk)
846 {
847     target_brk = TARGET_PAGE_ALIGN(new_brk);
848     initial_target_brk = target_brk;
849 }
850 
851 /* do_brk() must return target values and target errnos. */
852 abi_long do_brk(abi_ulong brk_val)
853 {
854     abi_long mapped_addr;
855     abi_ulong new_brk;
856     abi_ulong old_brk;
857 
858     /* brk pointers are always untagged */
859 
860     /* do not allow to shrink below initial brk value */
861     if (brk_val < initial_target_brk) {
862         return target_brk;
863     }
864 
865     new_brk = TARGET_PAGE_ALIGN(brk_val);
866     old_brk = TARGET_PAGE_ALIGN(target_brk);
867 
868     /* new and old target_brk might be on the same page */
869     if (new_brk == old_brk) {
870         target_brk = brk_val;
871         return target_brk;
872     }
873 
874     /* Release heap if necessary */
875     if (new_brk < old_brk) {
876         target_munmap(new_brk, old_brk - new_brk);
877 
878         target_brk = brk_val;
879         return target_brk;
880     }
881 
882     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
883                               PROT_READ | PROT_WRITE,
884                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
885                               -1, 0);
886 
887     if (mapped_addr == old_brk) {
888         target_brk = brk_val;
889         return target_brk;
890     }
891 
892 #if defined(TARGET_ALPHA)
893     /* We (partially) emulate OSF/1 on Alpha, which requires we
894        return a proper errno, not an unchanged brk value.  */
895     return -TARGET_ENOMEM;
896 #endif
897     /* For everything else, return the previous break. */
898     return target_brk;
899 }
900 
901 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
902     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
903 static inline abi_long copy_from_user_fdset(fd_set *fds,
904                                             abi_ulong target_fds_addr,
905                                             int n)
906 {
907     int i, nw, j, k;
908     abi_ulong b, *target_fds;
909 
910     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
911     if (!(target_fds = lock_user(VERIFY_READ,
912                                  target_fds_addr,
913                                  sizeof(abi_ulong) * nw,
914                                  1)))
915         return -TARGET_EFAULT;
916 
917     FD_ZERO(fds);
918     k = 0;
919     for (i = 0; i < nw; i++) {
920         /* grab the abi_ulong */
921         __get_user(b, &target_fds[i]);
922         for (j = 0; j < TARGET_ABI_BITS; j++) {
923             /* check the bit inside the abi_ulong */
924             if ((b >> j) & 1)
925                 FD_SET(k, fds);
926             k++;
927         }
928     }
929 
930     unlock_user(target_fds, target_fds_addr, 0);
931 
932     return 0;
933 }
934 
935 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
936                                                  abi_ulong target_fds_addr,
937                                                  int n)
938 {
939     if (target_fds_addr) {
940         if (copy_from_user_fdset(fds, target_fds_addr, n))
941             return -TARGET_EFAULT;
942         *fds_ptr = fds;
943     } else {
944         *fds_ptr = NULL;
945     }
946     return 0;
947 }
948 
949 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
950                                           const fd_set *fds,
951                                           int n)
952 {
953     int i, nw, j, k;
954     abi_long v;
955     abi_ulong *target_fds;
956 
957     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
958     if (!(target_fds = lock_user(VERIFY_WRITE,
959                                  target_fds_addr,
960                                  sizeof(abi_ulong) * nw,
961                                  0)))
962         return -TARGET_EFAULT;
963 
964     k = 0;
965     for (i = 0; i < nw; i++) {
966         v = 0;
967         for (j = 0; j < TARGET_ABI_BITS; j++) {
968             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
969             k++;
970         }
971         __put_user(v, &target_fds[i]);
972     }
973 
974     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
975 
976     return 0;
977 }
978 #endif
979 
980 #if defined(__alpha__)
981 #define HOST_HZ 1024
982 #else
983 #define HOST_HZ 100
984 #endif
985 
986 static inline abi_long host_to_target_clock_t(long ticks)
987 {
988 #if HOST_HZ == TARGET_HZ
989     return ticks;
990 #else
991     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
992 #endif
993 }
994 
995 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
996                                              const struct rusage *rusage)
997 {
998     struct target_rusage *target_rusage;
999 
1000     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
1001         return -TARGET_EFAULT;
1002     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1003     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1004     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1005     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1006     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1007     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1008     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1009     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1010     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1011     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1012     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1013     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1014     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1015     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1016     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1017     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1018     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1019     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1020     unlock_user_struct(target_rusage, target_addr, 1);
1021 
1022     return 0;
1023 }
1024 
1025 #ifdef TARGET_NR_setrlimit
1026 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1027 {
1028     abi_ulong target_rlim_swap;
1029     rlim_t result;
1030 
1031     target_rlim_swap = tswapal(target_rlim);
1032     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1033         return RLIM_INFINITY;
1034 
1035     result = target_rlim_swap;
1036     if (target_rlim_swap != (rlim_t)result)
1037         return RLIM_INFINITY;
1038 
1039     return result;
1040 }
1041 #endif
1042 
1043 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1044 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1045 {
1046     abi_ulong target_rlim_swap;
1047     abi_ulong result;
1048 
1049     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1050         target_rlim_swap = TARGET_RLIM_INFINITY;
1051     else
1052         target_rlim_swap = rlim;
1053     result = tswapal(target_rlim_swap);
1054 
1055     return result;
1056 }
1057 #endif
1058 
1059 static inline int target_to_host_resource(int code)
1060 {
1061     switch (code) {
1062     case TARGET_RLIMIT_AS:
1063         return RLIMIT_AS;
1064     case TARGET_RLIMIT_CORE:
1065         return RLIMIT_CORE;
1066     case TARGET_RLIMIT_CPU:
1067         return RLIMIT_CPU;
1068     case TARGET_RLIMIT_DATA:
1069         return RLIMIT_DATA;
1070     case TARGET_RLIMIT_FSIZE:
1071         return RLIMIT_FSIZE;
1072     case TARGET_RLIMIT_LOCKS:
1073         return RLIMIT_LOCKS;
1074     case TARGET_RLIMIT_MEMLOCK:
1075         return RLIMIT_MEMLOCK;
1076     case TARGET_RLIMIT_MSGQUEUE:
1077         return RLIMIT_MSGQUEUE;
1078     case TARGET_RLIMIT_NICE:
1079         return RLIMIT_NICE;
1080     case TARGET_RLIMIT_NOFILE:
1081         return RLIMIT_NOFILE;
1082     case TARGET_RLIMIT_NPROC:
1083         return RLIMIT_NPROC;
1084     case TARGET_RLIMIT_RSS:
1085         return RLIMIT_RSS;
1086     case TARGET_RLIMIT_RTPRIO:
1087         return RLIMIT_RTPRIO;
1088 #ifdef RLIMIT_RTTIME
1089     case TARGET_RLIMIT_RTTIME:
1090         return RLIMIT_RTTIME;
1091 #endif
1092     case TARGET_RLIMIT_SIGPENDING:
1093         return RLIMIT_SIGPENDING;
1094     case TARGET_RLIMIT_STACK:
1095         return RLIMIT_STACK;
1096     default:
1097         return code;
1098     }
1099 }
1100 
1101 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1102                                               abi_ulong target_tv_addr)
1103 {
1104     struct target_timeval *target_tv;
1105 
1106     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1107         return -TARGET_EFAULT;
1108     }
1109 
1110     __get_user(tv->tv_sec, &target_tv->tv_sec);
1111     __get_user(tv->tv_usec, &target_tv->tv_usec);
1112 
1113     unlock_user_struct(target_tv, target_tv_addr, 0);
1114 
1115     return 0;
1116 }
1117 
1118 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1119                                             const struct timeval *tv)
1120 {
1121     struct target_timeval *target_tv;
1122 
1123     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1124         return -TARGET_EFAULT;
1125     }
1126 
1127     __put_user(tv->tv_sec, &target_tv->tv_sec);
1128     __put_user(tv->tv_usec, &target_tv->tv_usec);
1129 
1130     unlock_user_struct(target_tv, target_tv_addr, 1);
1131 
1132     return 0;
1133 }
1134 
1135 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1136 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1137                                                 abi_ulong target_tv_addr)
1138 {
1139     struct target__kernel_sock_timeval *target_tv;
1140 
1141     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1142         return -TARGET_EFAULT;
1143     }
1144 
1145     __get_user(tv->tv_sec, &target_tv->tv_sec);
1146     __get_user(tv->tv_usec, &target_tv->tv_usec);
1147 
1148     unlock_user_struct(target_tv, target_tv_addr, 0);
1149 
1150     return 0;
1151 }
1152 #endif
1153 
1154 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1155                                               const struct timeval *tv)
1156 {
1157     struct target__kernel_sock_timeval *target_tv;
1158 
1159     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1160         return -TARGET_EFAULT;
1161     }
1162 
1163     __put_user(tv->tv_sec, &target_tv->tv_sec);
1164     __put_user(tv->tv_usec, &target_tv->tv_usec);
1165 
1166     unlock_user_struct(target_tv, target_tv_addr, 1);
1167 
1168     return 0;
1169 }
1170 
1171 #if defined(TARGET_NR_futex) || \
1172     defined(TARGET_NR_rt_sigtimedwait) || \
1173     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1174     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1175     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1176     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1177     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1178     defined(TARGET_NR_timer_settime) || \
1179     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1180 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1181                                                abi_ulong target_addr)
1182 {
1183     struct target_timespec *target_ts;
1184 
1185     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1186         return -TARGET_EFAULT;
1187     }
1188     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1189     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1190     unlock_user_struct(target_ts, target_addr, 0);
1191     return 0;
1192 }
1193 #endif
1194 
1195 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1196     defined(TARGET_NR_timer_settime64) || \
1197     defined(TARGET_NR_mq_timedsend_time64) || \
1198     defined(TARGET_NR_mq_timedreceive_time64) || \
1199     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1200     defined(TARGET_NR_clock_nanosleep_time64) || \
1201     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1202     defined(TARGET_NR_utimensat) || \
1203     defined(TARGET_NR_utimensat_time64) || \
1204     defined(TARGET_NR_semtimedop_time64) || \
1205     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1206 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1207                                                  abi_ulong target_addr)
1208 {
1209     struct target__kernel_timespec *target_ts;
1210 
1211     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1212         return -TARGET_EFAULT;
1213     }
1214     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1215     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1216     /* in 32bit mode, this drops the padding */
1217     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1218     unlock_user_struct(target_ts, target_addr, 0);
1219     return 0;
1220 }
1221 #endif
1222 
1223 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1224                                                struct timespec *host_ts)
1225 {
1226     struct target_timespec *target_ts;
1227 
1228     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1229         return -TARGET_EFAULT;
1230     }
1231     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1232     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1233     unlock_user_struct(target_ts, target_addr, 1);
1234     return 0;
1235 }
1236 
1237 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1238                                                  struct timespec *host_ts)
1239 {
1240     struct target__kernel_timespec *target_ts;
1241 
1242     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1243         return -TARGET_EFAULT;
1244     }
1245     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1246     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1247     unlock_user_struct(target_ts, target_addr, 1);
1248     return 0;
1249 }
1250 
1251 #if defined(TARGET_NR_gettimeofday)
1252 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1253                                              struct timezone *tz)
1254 {
1255     struct target_timezone *target_tz;
1256 
1257     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1258         return -TARGET_EFAULT;
1259     }
1260 
1261     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1262     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1263 
1264     unlock_user_struct(target_tz, target_tz_addr, 1);
1265 
1266     return 0;
1267 }
1268 #endif
1269 
1270 #if defined(TARGET_NR_settimeofday)
1271 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1272                                                abi_ulong target_tz_addr)
1273 {
1274     struct target_timezone *target_tz;
1275 
1276     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1277         return -TARGET_EFAULT;
1278     }
1279 
1280     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1281     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1282 
1283     unlock_user_struct(target_tz, target_tz_addr, 0);
1284 
1285     return 0;
1286 }
1287 #endif
1288 
1289 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1290 #include <mqueue.h>
1291 
1292 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1293                                               abi_ulong target_mq_attr_addr)
1294 {
1295     struct target_mq_attr *target_mq_attr;
1296 
1297     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1298                           target_mq_attr_addr, 1))
1299         return -TARGET_EFAULT;
1300 
1301     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1302     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1303     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1304     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1305 
1306     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1307 
1308     return 0;
1309 }
1310 
1311 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1312                                             const struct mq_attr *attr)
1313 {
1314     struct target_mq_attr *target_mq_attr;
1315 
1316     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1317                           target_mq_attr_addr, 0))
1318         return -TARGET_EFAULT;
1319 
1320     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1321     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1322     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1323     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1324 
1325     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1326 
1327     return 0;
1328 }
1329 #endif
1330 
1331 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1332 /* do_select() must return target values and target errnos. */
1333 static abi_long do_select(int n,
1334                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1335                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1336 {
1337     fd_set rfds, wfds, efds;
1338     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1339     struct timeval tv;
1340     struct timespec ts, *ts_ptr;
1341     abi_long ret;
1342 
1343     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1344     if (ret) {
1345         return ret;
1346     }
1347     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1348     if (ret) {
1349         return ret;
1350     }
1351     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1352     if (ret) {
1353         return ret;
1354     }
1355 
1356     if (target_tv_addr) {
1357         if (copy_from_user_timeval(&tv, target_tv_addr))
1358             return -TARGET_EFAULT;
1359         ts.tv_sec = tv.tv_sec;
1360         ts.tv_nsec = tv.tv_usec * 1000;
1361         ts_ptr = &ts;
1362     } else {
1363         ts_ptr = NULL;
1364     }
1365 
1366     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1367                                   ts_ptr, NULL));
1368 
1369     if (!is_error(ret)) {
1370         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1371             return -TARGET_EFAULT;
1372         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1373             return -TARGET_EFAULT;
1374         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1375             return -TARGET_EFAULT;
1376 
1377         if (target_tv_addr) {
1378             tv.tv_sec = ts.tv_sec;
1379             tv.tv_usec = ts.tv_nsec / 1000;
1380             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1381                 return -TARGET_EFAULT;
1382             }
1383         }
1384     }
1385 
1386     return ret;
1387 }
1388 
1389 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1390 static abi_long do_old_select(abi_ulong arg1)
1391 {
1392     struct target_sel_arg_struct *sel;
1393     abi_ulong inp, outp, exp, tvp;
1394     long nsel;
1395 
1396     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1397         return -TARGET_EFAULT;
1398     }
1399 
1400     nsel = tswapal(sel->n);
1401     inp = tswapal(sel->inp);
1402     outp = tswapal(sel->outp);
1403     exp = tswapal(sel->exp);
1404     tvp = tswapal(sel->tvp);
1405 
1406     unlock_user_struct(sel, arg1, 0);
1407 
1408     return do_select(nsel, inp, outp, exp, tvp);
1409 }
1410 #endif
1411 #endif
1412 
1413 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1414 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1415                             abi_long arg4, abi_long arg5, abi_long arg6,
1416                             bool time64)
1417 {
1418     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1419     fd_set rfds, wfds, efds;
1420     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1421     struct timespec ts, *ts_ptr;
1422     abi_long ret;
1423 
1424     /*
1425      * The 6th arg is actually two args smashed together,
1426      * so we cannot use the C library.
1427      */
1428     struct {
1429         sigset_t *set;
1430         size_t size;
1431     } sig, *sig_ptr;
1432 
1433     abi_ulong arg_sigset, arg_sigsize, *arg7;
1434 
1435     n = arg1;
1436     rfd_addr = arg2;
1437     wfd_addr = arg3;
1438     efd_addr = arg4;
1439     ts_addr = arg5;
1440 
1441     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1442     if (ret) {
1443         return ret;
1444     }
1445     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1446     if (ret) {
1447         return ret;
1448     }
1449     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1450     if (ret) {
1451         return ret;
1452     }
1453 
1454     /*
1455      * This takes a timespec, and not a timeval, so we cannot
1456      * use the do_select() helper ...
1457      */
1458     if (ts_addr) {
1459         if (time64) {
1460             if (target_to_host_timespec64(&ts, ts_addr)) {
1461                 return -TARGET_EFAULT;
1462             }
1463         } else {
1464             if (target_to_host_timespec(&ts, ts_addr)) {
1465                 return -TARGET_EFAULT;
1466             }
1467         }
1468             ts_ptr = &ts;
1469     } else {
1470         ts_ptr = NULL;
1471     }
1472 
1473     /* Extract the two packed args for the sigset */
1474     sig_ptr = NULL;
1475     if (arg6) {
1476         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1477         if (!arg7) {
1478             return -TARGET_EFAULT;
1479         }
1480         arg_sigset = tswapal(arg7[0]);
1481         arg_sigsize = tswapal(arg7[1]);
1482         unlock_user(arg7, arg6, 0);
1483 
1484         if (arg_sigset) {
1485             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1486             if (ret != 0) {
1487                 return ret;
1488             }
1489             sig_ptr = &sig;
1490             sig.size = SIGSET_T_SIZE;
1491         }
1492     }
1493 
1494     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1495                                   ts_ptr, sig_ptr));
1496 
1497     if (sig_ptr) {
1498         finish_sigsuspend_mask(ret);
1499     }
1500 
1501     if (!is_error(ret)) {
1502         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1503             return -TARGET_EFAULT;
1504         }
1505         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1506             return -TARGET_EFAULT;
1507         }
1508         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1509             return -TARGET_EFAULT;
1510         }
1511         if (time64) {
1512             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1513                 return -TARGET_EFAULT;
1514             }
1515         } else {
1516             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1517                 return -TARGET_EFAULT;
1518             }
1519         }
1520     }
1521     return ret;
1522 }
1523 #endif
1524 
1525 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1526     defined(TARGET_NR_ppoll_time64)
1527 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1528                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1529 {
1530     struct target_pollfd *target_pfd;
1531     unsigned int nfds = arg2;
1532     struct pollfd *pfd;
1533     unsigned int i;
1534     abi_long ret;
1535 
1536     pfd = NULL;
1537     target_pfd = NULL;
1538     if (nfds) {
1539         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1540             return -TARGET_EINVAL;
1541         }
1542         target_pfd = lock_user(VERIFY_WRITE, arg1,
1543                                sizeof(struct target_pollfd) * nfds, 1);
1544         if (!target_pfd) {
1545             return -TARGET_EFAULT;
1546         }
1547 
1548         pfd = alloca(sizeof(struct pollfd) * nfds);
1549         for (i = 0; i < nfds; i++) {
1550             pfd[i].fd = tswap32(target_pfd[i].fd);
1551             pfd[i].events = tswap16(target_pfd[i].events);
1552         }
1553     }
1554     if (ppoll) {
1555         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1556         sigset_t *set = NULL;
1557 
1558         if (arg3) {
1559             if (time64) {
1560                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1561                     unlock_user(target_pfd, arg1, 0);
1562                     return -TARGET_EFAULT;
1563                 }
1564             } else {
1565                 if (target_to_host_timespec(timeout_ts, arg3)) {
1566                     unlock_user(target_pfd, arg1, 0);
1567                     return -TARGET_EFAULT;
1568                 }
1569             }
1570         } else {
1571             timeout_ts = NULL;
1572         }
1573 
1574         if (arg4) {
1575             ret = process_sigsuspend_mask(&set, arg4, arg5);
1576             if (ret != 0) {
1577                 unlock_user(target_pfd, arg1, 0);
1578                 return ret;
1579             }
1580         }
1581 
1582         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1583                                    set, SIGSET_T_SIZE));
1584 
1585         if (set) {
1586             finish_sigsuspend_mask(ret);
1587         }
1588         if (!is_error(ret) && arg3) {
1589             if (time64) {
1590                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1591                     return -TARGET_EFAULT;
1592                 }
1593             } else {
1594                 if (host_to_target_timespec(arg3, timeout_ts)) {
1595                     return -TARGET_EFAULT;
1596                 }
1597             }
1598         }
1599     } else {
1600           struct timespec ts, *pts;
1601 
1602           if (arg3 >= 0) {
1603               /* Convert ms to secs, ns */
1604               ts.tv_sec = arg3 / 1000;
1605               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1606               pts = &ts;
1607           } else {
1608               /* -ve poll() timeout means "infinite" */
1609               pts = NULL;
1610           }
1611           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1612     }
1613 
1614     if (!is_error(ret)) {
1615         for (i = 0; i < nfds; i++) {
1616             target_pfd[i].revents = tswap16(pfd[i].revents);
1617         }
1618     }
1619     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1620     return ret;
1621 }
1622 #endif
1623 
1624 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1625                         int flags, int is_pipe2)
1626 {
1627     int host_pipe[2];
1628     abi_long ret;
1629     ret = pipe2(host_pipe, flags);
1630 
1631     if (is_error(ret))
1632         return get_errno(ret);
1633 
1634     /* Several targets have special calling conventions for the original
1635        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1636     if (!is_pipe2) {
1637 #if defined(TARGET_ALPHA)
1638         cpu_env->ir[IR_A4] = host_pipe[1];
1639         return host_pipe[0];
1640 #elif defined(TARGET_MIPS)
1641         cpu_env->active_tc.gpr[3] = host_pipe[1];
1642         return host_pipe[0];
1643 #elif defined(TARGET_SH4)
1644         cpu_env->gregs[1] = host_pipe[1];
1645         return host_pipe[0];
1646 #elif defined(TARGET_SPARC)
1647         cpu_env->regwptr[1] = host_pipe[1];
1648         return host_pipe[0];
1649 #endif
1650     }
1651 
1652     if (put_user_s32(host_pipe[0], pipedes)
1653         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1654         return -TARGET_EFAULT;
1655     return get_errno(ret);
1656 }
1657 
1658 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1659                                                abi_ulong target_addr,
1660                                                socklen_t len)
1661 {
1662     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1663     sa_family_t sa_family;
1664     struct target_sockaddr *target_saddr;
1665 
1666     if (fd_trans_target_to_host_addr(fd)) {
1667         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1668     }
1669 
1670     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1671     if (!target_saddr)
1672         return -TARGET_EFAULT;
1673 
1674     sa_family = tswap16(target_saddr->sa_family);
1675 
1676     /* Oops. The caller might send a incomplete sun_path; sun_path
1677      * must be terminated by \0 (see the manual page), but
1678      * unfortunately it is quite common to specify sockaddr_un
1679      * length as "strlen(x->sun_path)" while it should be
1680      * "strlen(...) + 1". We'll fix that here if needed.
1681      * Linux kernel has a similar feature.
1682      */
1683 
1684     if (sa_family == AF_UNIX) {
1685         if (len < unix_maxlen && len > 0) {
1686             char *cp = (char*)target_saddr;
1687 
1688             if ( cp[len-1] && !cp[len] )
1689                 len++;
1690         }
1691         if (len > unix_maxlen)
1692             len = unix_maxlen;
1693     }
1694 
1695     memcpy(addr, target_saddr, len);
1696     addr->sa_family = sa_family;
1697     if (sa_family == AF_NETLINK) {
1698         struct sockaddr_nl *nladdr;
1699 
1700         nladdr = (struct sockaddr_nl *)addr;
1701         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1702         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1703     } else if (sa_family == AF_PACKET) {
1704 	struct target_sockaddr_ll *lladdr;
1705 
1706 	lladdr = (struct target_sockaddr_ll *)addr;
1707 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1708 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1709     } else if (sa_family == AF_INET6) {
1710         struct sockaddr_in6 *in6addr;
1711 
1712         in6addr = (struct sockaddr_in6 *)addr;
1713         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1714     }
1715     unlock_user(target_saddr, target_addr, 0);
1716 
1717     return 0;
1718 }
1719 
1720 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1721                                                struct sockaddr *addr,
1722                                                socklen_t len)
1723 {
1724     struct target_sockaddr *target_saddr;
1725 
1726     if (len == 0) {
1727         return 0;
1728     }
1729     assert(addr);
1730 
1731     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1732     if (!target_saddr)
1733         return -TARGET_EFAULT;
1734     memcpy(target_saddr, addr, len);
1735     if (len >= offsetof(struct target_sockaddr, sa_family) +
1736         sizeof(target_saddr->sa_family)) {
1737         target_saddr->sa_family = tswap16(addr->sa_family);
1738     }
1739     if (addr->sa_family == AF_NETLINK &&
1740         len >= sizeof(struct target_sockaddr_nl)) {
1741         struct target_sockaddr_nl *target_nl =
1742                (struct target_sockaddr_nl *)target_saddr;
1743         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1744         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1745     } else if (addr->sa_family == AF_PACKET) {
1746         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1747         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1748         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1749     } else if (addr->sa_family == AF_INET6 &&
1750                len >= sizeof(struct target_sockaddr_in6)) {
1751         struct target_sockaddr_in6 *target_in6 =
1752                (struct target_sockaddr_in6 *)target_saddr;
1753         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1754     }
1755     unlock_user(target_saddr, target_addr, len);
1756 
1757     return 0;
1758 }
1759 
1760 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1761                                            struct target_msghdr *target_msgh)
1762 {
1763     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1764     abi_long msg_controllen;
1765     abi_ulong target_cmsg_addr;
1766     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1767     socklen_t space = 0;
1768 
1769     msg_controllen = tswapal(target_msgh->msg_controllen);
1770     if (msg_controllen < sizeof (struct target_cmsghdr))
1771         goto the_end;
1772     target_cmsg_addr = tswapal(target_msgh->msg_control);
1773     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1774     target_cmsg_start = target_cmsg;
1775     if (!target_cmsg)
1776         return -TARGET_EFAULT;
1777 
1778     while (cmsg && target_cmsg) {
1779         void *data = CMSG_DATA(cmsg);
1780         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1781 
1782         int len = tswapal(target_cmsg->cmsg_len)
1783             - sizeof(struct target_cmsghdr);
1784 
1785         space += CMSG_SPACE(len);
1786         if (space > msgh->msg_controllen) {
1787             space -= CMSG_SPACE(len);
1788             /* This is a QEMU bug, since we allocated the payload
1789              * area ourselves (unlike overflow in host-to-target
1790              * conversion, which is just the guest giving us a buffer
1791              * that's too small). It can't happen for the payload types
1792              * we currently support; if it becomes an issue in future
1793              * we would need to improve our allocation strategy to
1794              * something more intelligent than "twice the size of the
1795              * target buffer we're reading from".
1796              */
1797             qemu_log_mask(LOG_UNIMP,
1798                           ("Unsupported ancillary data %d/%d: "
1799                            "unhandled msg size\n"),
1800                           tswap32(target_cmsg->cmsg_level),
1801                           tswap32(target_cmsg->cmsg_type));
1802             break;
1803         }
1804 
1805         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1806             cmsg->cmsg_level = SOL_SOCKET;
1807         } else {
1808             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1809         }
1810         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1811         cmsg->cmsg_len = CMSG_LEN(len);
1812 
1813         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1814             int *fd = (int *)data;
1815             int *target_fd = (int *)target_data;
1816             int i, numfds = len / sizeof(int);
1817 
1818             for (i = 0; i < numfds; i++) {
1819                 __get_user(fd[i], target_fd + i);
1820             }
1821         } else if (cmsg->cmsg_level == SOL_SOCKET
1822                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1823             struct ucred *cred = (struct ucred *)data;
1824             struct target_ucred *target_cred =
1825                 (struct target_ucred *)target_data;
1826 
1827             __get_user(cred->pid, &target_cred->pid);
1828             __get_user(cred->uid, &target_cred->uid);
1829             __get_user(cred->gid, &target_cred->gid);
1830         } else if (cmsg->cmsg_level == SOL_ALG) {
1831             uint32_t *dst = (uint32_t *)data;
1832 
1833             memcpy(dst, target_data, len);
1834             /* fix endianness of first 32-bit word */
1835             if (len >= sizeof(uint32_t)) {
1836                 *dst = tswap32(*dst);
1837             }
1838         } else {
1839             qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
1840                           cmsg->cmsg_level, cmsg->cmsg_type);
1841             memcpy(data, target_data, len);
1842         }
1843 
1844         cmsg = CMSG_NXTHDR(msgh, cmsg);
1845         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1846                                          target_cmsg_start);
1847     }
1848     unlock_user(target_cmsg, target_cmsg_addr, 0);
1849  the_end:
1850     msgh->msg_controllen = space;
1851     return 0;
1852 }
1853 
1854 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1855                                            struct msghdr *msgh)
1856 {
1857     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1858     abi_long msg_controllen;
1859     abi_ulong target_cmsg_addr;
1860     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1861     socklen_t space = 0;
1862 
1863     msg_controllen = tswapal(target_msgh->msg_controllen);
1864     if (msg_controllen < sizeof (struct target_cmsghdr))
1865         goto the_end;
1866     target_cmsg_addr = tswapal(target_msgh->msg_control);
1867     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1868     target_cmsg_start = target_cmsg;
1869     if (!target_cmsg)
1870         return -TARGET_EFAULT;
1871 
1872     while (cmsg && target_cmsg) {
1873         void *data = CMSG_DATA(cmsg);
1874         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1875 
1876         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1877         int tgt_len, tgt_space;
1878 
1879         /* We never copy a half-header but may copy half-data;
1880          * this is Linux's behaviour in put_cmsg(). Note that
1881          * truncation here is a guest problem (which we report
1882          * to the guest via the CTRUNC bit), unlike truncation
1883          * in target_to_host_cmsg, which is a QEMU bug.
1884          */
1885         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1886             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1887             break;
1888         }
1889 
1890         if (cmsg->cmsg_level == SOL_SOCKET) {
1891             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1892         } else {
1893             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1894         }
1895         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1896 
1897         /* Payload types which need a different size of payload on
1898          * the target must adjust tgt_len here.
1899          */
1900         tgt_len = len;
1901         switch (cmsg->cmsg_level) {
1902         case SOL_SOCKET:
1903             switch (cmsg->cmsg_type) {
1904             case SO_TIMESTAMP:
1905                 tgt_len = sizeof(struct target_timeval);
1906                 break;
1907             default:
1908                 break;
1909             }
1910             break;
1911         default:
1912             break;
1913         }
1914 
1915         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1916             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1917             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1918         }
1919 
1920         /* We must now copy-and-convert len bytes of payload
1921          * into tgt_len bytes of destination space. Bear in mind
1922          * that in both source and destination we may be dealing
1923          * with a truncated value!
1924          */
1925         switch (cmsg->cmsg_level) {
1926         case SOL_SOCKET:
1927             switch (cmsg->cmsg_type) {
1928             case SCM_RIGHTS:
1929             {
1930                 int *fd = (int *)data;
1931                 int *target_fd = (int *)target_data;
1932                 int i, numfds = tgt_len / sizeof(int);
1933 
1934                 for (i = 0; i < numfds; i++) {
1935                     __put_user(fd[i], target_fd + i);
1936                 }
1937                 break;
1938             }
1939             case SO_TIMESTAMP:
1940             {
1941                 struct timeval *tv = (struct timeval *)data;
1942                 struct target_timeval *target_tv =
1943                     (struct target_timeval *)target_data;
1944 
1945                 if (len != sizeof(struct timeval) ||
1946                     tgt_len != sizeof(struct target_timeval)) {
1947                     goto unimplemented;
1948                 }
1949 
1950                 /* copy struct timeval to target */
1951                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1952                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1953                 break;
1954             }
1955             case SCM_CREDENTIALS:
1956             {
1957                 struct ucred *cred = (struct ucred *)data;
1958                 struct target_ucred *target_cred =
1959                     (struct target_ucred *)target_data;
1960 
1961                 __put_user(cred->pid, &target_cred->pid);
1962                 __put_user(cred->uid, &target_cred->uid);
1963                 __put_user(cred->gid, &target_cred->gid);
1964                 break;
1965             }
1966             default:
1967                 goto unimplemented;
1968             }
1969             break;
1970 
1971         case SOL_IP:
1972             switch (cmsg->cmsg_type) {
1973             case IP_TTL:
1974             {
1975                 uint32_t *v = (uint32_t *)data;
1976                 uint32_t *t_int = (uint32_t *)target_data;
1977 
1978                 if (len != sizeof(uint32_t) ||
1979                     tgt_len != sizeof(uint32_t)) {
1980                     goto unimplemented;
1981                 }
1982                 __put_user(*v, t_int);
1983                 break;
1984             }
1985             case IP_RECVERR:
1986             {
1987                 struct errhdr_t {
1988                    struct sock_extended_err ee;
1989                    struct sockaddr_in offender;
1990                 };
1991                 struct errhdr_t *errh = (struct errhdr_t *)data;
1992                 struct errhdr_t *target_errh =
1993                     (struct errhdr_t *)target_data;
1994 
1995                 if (len != sizeof(struct errhdr_t) ||
1996                     tgt_len != sizeof(struct errhdr_t)) {
1997                     goto unimplemented;
1998                 }
1999                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2000                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2001                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2002                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2003                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2004                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2005                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2006                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2007                     (void *) &errh->offender, sizeof(errh->offender));
2008                 break;
2009             }
2010             case IP_PKTINFO:
2011             {
2012                 struct in_pktinfo *pkti = data;
2013                 struct target_in_pktinfo *target_pi = target_data;
2014 
2015                 __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
2016                 target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
2017                 target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
2018                 break;
2019             }
2020             default:
2021                 goto unimplemented;
2022             }
2023             break;
2024 
2025         case SOL_IPV6:
2026             switch (cmsg->cmsg_type) {
2027             case IPV6_HOPLIMIT:
2028             {
2029                 uint32_t *v = (uint32_t *)data;
2030                 uint32_t *t_int = (uint32_t *)target_data;
2031 
2032                 if (len != sizeof(uint32_t) ||
2033                     tgt_len != sizeof(uint32_t)) {
2034                     goto unimplemented;
2035                 }
2036                 __put_user(*v, t_int);
2037                 break;
2038             }
2039             case IPV6_RECVERR:
2040             {
2041                 struct errhdr6_t {
2042                    struct sock_extended_err ee;
2043                    struct sockaddr_in6 offender;
2044                 };
2045                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2046                 struct errhdr6_t *target_errh =
2047                     (struct errhdr6_t *)target_data;
2048 
2049                 if (len != sizeof(struct errhdr6_t) ||
2050                     tgt_len != sizeof(struct errhdr6_t)) {
2051                     goto unimplemented;
2052                 }
2053                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2054                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2055                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2056                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2057                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2058                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2059                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2060                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2061                     (void *) &errh->offender, sizeof(errh->offender));
2062                 break;
2063             }
2064             default:
2065                 goto unimplemented;
2066             }
2067             break;
2068 
2069         default:
2070         unimplemented:
2071             qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
2072                           cmsg->cmsg_level, cmsg->cmsg_type);
2073             memcpy(target_data, data, MIN(len, tgt_len));
2074             if (tgt_len > len) {
2075                 memset(target_data + len, 0, tgt_len - len);
2076             }
2077         }
2078 
2079         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2080         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2081         if (msg_controllen < tgt_space) {
2082             tgt_space = msg_controllen;
2083         }
2084         msg_controllen -= tgt_space;
2085         space += tgt_space;
2086         cmsg = CMSG_NXTHDR(msgh, cmsg);
2087         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2088                                          target_cmsg_start);
2089     }
2090     unlock_user(target_cmsg, target_cmsg_addr, space);
2091  the_end:
2092     target_msgh->msg_controllen = tswapal(space);
2093     return 0;
2094 }
2095 
2096 /* do_setsockopt() Must return target values and target errnos. */
2097 static abi_long do_setsockopt(int sockfd, int level, int optname,
2098                               abi_ulong optval_addr, socklen_t optlen)
2099 {
2100     abi_long ret;
2101     int val;
2102 
2103     switch(level) {
2104     case SOL_TCP:
2105     case SOL_UDP:
2106         /* TCP and UDP options all take an 'int' value.  */
2107         if (optlen < sizeof(uint32_t))
2108             return -TARGET_EINVAL;
2109 
2110         if (get_user_u32(val, optval_addr))
2111             return -TARGET_EFAULT;
2112         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2113         break;
2114     case SOL_IP:
2115         switch(optname) {
2116         case IP_TOS:
2117         case IP_TTL:
2118         case IP_HDRINCL:
2119         case IP_ROUTER_ALERT:
2120         case IP_RECVOPTS:
2121         case IP_RETOPTS:
2122         case IP_PKTINFO:
2123         case IP_MTU_DISCOVER:
2124         case IP_RECVERR:
2125         case IP_RECVTTL:
2126         case IP_RECVTOS:
2127 #ifdef IP_FREEBIND
2128         case IP_FREEBIND:
2129 #endif
2130         case IP_MULTICAST_TTL:
2131         case IP_MULTICAST_LOOP:
2132             val = 0;
2133             if (optlen >= sizeof(uint32_t)) {
2134                 if (get_user_u32(val, optval_addr))
2135                     return -TARGET_EFAULT;
2136             } else if (optlen >= 1) {
2137                 if (get_user_u8(val, optval_addr))
2138                     return -TARGET_EFAULT;
2139             }
2140             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2141             break;
2142         case IP_MULTICAST_IF:
2143         case IP_ADD_MEMBERSHIP:
2144         case IP_DROP_MEMBERSHIP:
2145         {
2146             struct ip_mreqn ip_mreq;
2147             struct target_ip_mreqn *target_smreqn;
2148             int min_size;
2149 
2150             QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
2151                               sizeof(struct target_ip_mreq));
2152 
2153             if (optname == IP_MULTICAST_IF) {
2154                 min_size = sizeof(struct in_addr);
2155             } else {
2156                 min_size = sizeof(struct target_ip_mreq);
2157             }
2158             if (optlen < min_size ||
2159                 optlen > sizeof (struct target_ip_mreqn)) {
2160                 return -TARGET_EINVAL;
2161             }
2162 
2163             target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2164             if (!target_smreqn) {
2165                 return -TARGET_EFAULT;
2166             }
2167             ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
2168             if (optlen >= sizeof(struct target_ip_mreq)) {
2169                 ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
2170                 if (optlen >= sizeof(struct target_ip_mreqn)) {
2171                     __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
2172                     optlen = sizeof(struct ip_mreqn);
2173                 }
2174             }
2175             unlock_user(target_smreqn, optval_addr, 0);
2176             ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
2177             break;
2178         }
2179         case IP_BLOCK_SOURCE:
2180         case IP_UNBLOCK_SOURCE:
2181         case IP_ADD_SOURCE_MEMBERSHIP:
2182         case IP_DROP_SOURCE_MEMBERSHIP:
2183         {
2184             struct ip_mreq_source *ip_mreq_source;
2185 
2186             if (optlen != sizeof (struct target_ip_mreq_source))
2187                 return -TARGET_EINVAL;
2188 
2189             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2190             if (!ip_mreq_source) {
2191                 return -TARGET_EFAULT;
2192             }
2193             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2194             unlock_user (ip_mreq_source, optval_addr, 0);
2195             break;
2196         }
2197         default:
2198             goto unimplemented;
2199         }
2200         break;
2201     case SOL_IPV6:
2202         switch (optname) {
2203         case IPV6_MTU_DISCOVER:
2204         case IPV6_MTU:
2205         case IPV6_V6ONLY:
2206         case IPV6_RECVPKTINFO:
2207         case IPV6_UNICAST_HOPS:
2208         case IPV6_MULTICAST_HOPS:
2209         case IPV6_MULTICAST_LOOP:
2210         case IPV6_RECVERR:
2211         case IPV6_RECVHOPLIMIT:
2212         case IPV6_2292HOPLIMIT:
2213         case IPV6_CHECKSUM:
2214         case IPV6_ADDRFORM:
2215         case IPV6_2292PKTINFO:
2216         case IPV6_RECVTCLASS:
2217         case IPV6_RECVRTHDR:
2218         case IPV6_2292RTHDR:
2219         case IPV6_RECVHOPOPTS:
2220         case IPV6_2292HOPOPTS:
2221         case IPV6_RECVDSTOPTS:
2222         case IPV6_2292DSTOPTS:
2223         case IPV6_TCLASS:
2224         case IPV6_ADDR_PREFERENCES:
2225 #ifdef IPV6_RECVPATHMTU
2226         case IPV6_RECVPATHMTU:
2227 #endif
2228 #ifdef IPV6_TRANSPARENT
2229         case IPV6_TRANSPARENT:
2230 #endif
2231 #ifdef IPV6_FREEBIND
2232         case IPV6_FREEBIND:
2233 #endif
2234 #ifdef IPV6_RECVORIGDSTADDR
2235         case IPV6_RECVORIGDSTADDR:
2236 #endif
2237             val = 0;
2238             if (optlen < sizeof(uint32_t)) {
2239                 return -TARGET_EINVAL;
2240             }
2241             if (get_user_u32(val, optval_addr)) {
2242                 return -TARGET_EFAULT;
2243             }
2244             ret = get_errno(setsockopt(sockfd, level, optname,
2245                                        &val, sizeof(val)));
2246             break;
2247         case IPV6_PKTINFO:
2248         {
2249             struct in6_pktinfo pki;
2250 
2251             if (optlen < sizeof(pki)) {
2252                 return -TARGET_EINVAL;
2253             }
2254 
2255             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2256                 return -TARGET_EFAULT;
2257             }
2258 
2259             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2260 
2261             ret = get_errno(setsockopt(sockfd, level, optname,
2262                                        &pki, sizeof(pki)));
2263             break;
2264         }
2265         case IPV6_ADD_MEMBERSHIP:
2266         case IPV6_DROP_MEMBERSHIP:
2267         {
2268             struct ipv6_mreq ipv6mreq;
2269 
2270             if (optlen < sizeof(ipv6mreq)) {
2271                 return -TARGET_EINVAL;
2272             }
2273 
2274             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2275                 return -TARGET_EFAULT;
2276             }
2277 
2278             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2279 
2280             ret = get_errno(setsockopt(sockfd, level, optname,
2281                                        &ipv6mreq, sizeof(ipv6mreq)));
2282             break;
2283         }
2284         default:
2285             goto unimplemented;
2286         }
2287         break;
2288     case SOL_ICMPV6:
2289         switch (optname) {
2290         case ICMPV6_FILTER:
2291         {
2292             struct icmp6_filter icmp6f;
2293 
2294             if (optlen > sizeof(icmp6f)) {
2295                 optlen = sizeof(icmp6f);
2296             }
2297 
2298             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2299                 return -TARGET_EFAULT;
2300             }
2301 
2302             for (val = 0; val < 8; val++) {
2303                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2304             }
2305 
2306             ret = get_errno(setsockopt(sockfd, level, optname,
2307                                        &icmp6f, optlen));
2308             break;
2309         }
2310         default:
2311             goto unimplemented;
2312         }
2313         break;
2314     case SOL_RAW:
2315         switch (optname) {
2316         case ICMP_FILTER:
2317         case IPV6_CHECKSUM:
2318             /* those take an u32 value */
2319             if (optlen < sizeof(uint32_t)) {
2320                 return -TARGET_EINVAL;
2321             }
2322 
2323             if (get_user_u32(val, optval_addr)) {
2324                 return -TARGET_EFAULT;
2325             }
2326             ret = get_errno(setsockopt(sockfd, level, optname,
2327                                        &val, sizeof(val)));
2328             break;
2329 
2330         default:
2331             goto unimplemented;
2332         }
2333         break;
2334 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2335     case SOL_ALG:
2336         switch (optname) {
2337         case ALG_SET_KEY:
2338         {
2339             char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2340             if (!alg_key) {
2341                 return -TARGET_EFAULT;
2342             }
2343             ret = get_errno(setsockopt(sockfd, level, optname,
2344                                        alg_key, optlen));
2345             unlock_user(alg_key, optval_addr, optlen);
2346             break;
2347         }
2348         case ALG_SET_AEAD_AUTHSIZE:
2349         {
2350             ret = get_errno(setsockopt(sockfd, level, optname,
2351                                        NULL, optlen));
2352             break;
2353         }
2354         default:
2355             goto unimplemented;
2356         }
2357         break;
2358 #endif
2359     case TARGET_SOL_SOCKET:
2360         switch (optname) {
2361         case TARGET_SO_RCVTIMEO:
2362         case TARGET_SO_SNDTIMEO:
2363         {
2364                 struct timeval tv;
2365 
2366                 if (optlen != sizeof(struct target_timeval)) {
2367                     return -TARGET_EINVAL;
2368                 }
2369 
2370                 if (copy_from_user_timeval(&tv, optval_addr)) {
2371                     return -TARGET_EFAULT;
2372                 }
2373 
2374                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2375                                 optname == TARGET_SO_RCVTIMEO ?
2376                                     SO_RCVTIMEO : SO_SNDTIMEO,
2377                                 &tv, sizeof(tv)));
2378                 return ret;
2379         }
2380         case TARGET_SO_ATTACH_FILTER:
2381         {
2382                 struct target_sock_fprog *tfprog;
2383                 struct target_sock_filter *tfilter;
2384                 struct sock_fprog fprog;
2385                 struct sock_filter *filter;
2386                 int i;
2387 
2388                 if (optlen != sizeof(*tfprog)) {
2389                     return -TARGET_EINVAL;
2390                 }
2391                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2392                     return -TARGET_EFAULT;
2393                 }
2394                 if (!lock_user_struct(VERIFY_READ, tfilter,
2395                                       tswapal(tfprog->filter), 0)) {
2396                     unlock_user_struct(tfprog, optval_addr, 1);
2397                     return -TARGET_EFAULT;
2398                 }
2399 
2400                 fprog.len = tswap16(tfprog->len);
2401                 filter = g_try_new(struct sock_filter, fprog.len);
2402                 if (filter == NULL) {
2403                     unlock_user_struct(tfilter, tfprog->filter, 1);
2404                     unlock_user_struct(tfprog, optval_addr, 1);
2405                     return -TARGET_ENOMEM;
2406                 }
2407                 for (i = 0; i < fprog.len; i++) {
2408                     filter[i].code = tswap16(tfilter[i].code);
2409                     filter[i].jt = tfilter[i].jt;
2410                     filter[i].jf = tfilter[i].jf;
2411                     filter[i].k = tswap32(tfilter[i].k);
2412                 }
2413                 fprog.filter = filter;
2414 
2415                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2416                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2417                 g_free(filter);
2418 
2419                 unlock_user_struct(tfilter, tfprog->filter, 1);
2420                 unlock_user_struct(tfprog, optval_addr, 1);
2421                 return ret;
2422         }
2423 	case TARGET_SO_BINDTODEVICE:
2424 	{
2425 		char *dev_ifname, *addr_ifname;
2426 
2427 		if (optlen > IFNAMSIZ - 1) {
2428 		    optlen = IFNAMSIZ - 1;
2429 		}
2430 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2431 		if (!dev_ifname) {
2432 		    return -TARGET_EFAULT;
2433 		}
2434 		optname = SO_BINDTODEVICE;
2435 		addr_ifname = alloca(IFNAMSIZ);
2436 		memcpy(addr_ifname, dev_ifname, optlen);
2437 		addr_ifname[optlen] = 0;
2438 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2439                                            addr_ifname, optlen));
2440 		unlock_user (dev_ifname, optval_addr, 0);
2441 		return ret;
2442 	}
2443         case TARGET_SO_LINGER:
2444         {
2445                 struct linger lg;
2446                 struct target_linger *tlg;
2447 
2448                 if (optlen != sizeof(struct target_linger)) {
2449                     return -TARGET_EINVAL;
2450                 }
2451                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2452                     return -TARGET_EFAULT;
2453                 }
2454                 __get_user(lg.l_onoff, &tlg->l_onoff);
2455                 __get_user(lg.l_linger, &tlg->l_linger);
2456                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2457                                 &lg, sizeof(lg)));
2458                 unlock_user_struct(tlg, optval_addr, 0);
2459                 return ret;
2460         }
2461             /* Options with 'int' argument.  */
2462         case TARGET_SO_DEBUG:
2463 		optname = SO_DEBUG;
2464 		break;
2465         case TARGET_SO_REUSEADDR:
2466 		optname = SO_REUSEADDR;
2467 		break;
2468 #ifdef SO_REUSEPORT
2469         case TARGET_SO_REUSEPORT:
2470                 optname = SO_REUSEPORT;
2471                 break;
2472 #endif
2473         case TARGET_SO_TYPE:
2474 		optname = SO_TYPE;
2475 		break;
2476         case TARGET_SO_ERROR:
2477 		optname = SO_ERROR;
2478 		break;
2479         case TARGET_SO_DONTROUTE:
2480 		optname = SO_DONTROUTE;
2481 		break;
2482         case TARGET_SO_BROADCAST:
2483 		optname = SO_BROADCAST;
2484 		break;
2485         case TARGET_SO_SNDBUF:
2486 		optname = SO_SNDBUF;
2487 		break;
2488         case TARGET_SO_SNDBUFFORCE:
2489                 optname = SO_SNDBUFFORCE;
2490                 break;
2491         case TARGET_SO_RCVBUF:
2492 		optname = SO_RCVBUF;
2493 		break;
2494         case TARGET_SO_RCVBUFFORCE:
2495                 optname = SO_RCVBUFFORCE;
2496                 break;
2497         case TARGET_SO_KEEPALIVE:
2498 		optname = SO_KEEPALIVE;
2499 		break;
2500         case TARGET_SO_OOBINLINE:
2501 		optname = SO_OOBINLINE;
2502 		break;
2503         case TARGET_SO_NO_CHECK:
2504 		optname = SO_NO_CHECK;
2505 		break;
2506         case TARGET_SO_PRIORITY:
2507 		optname = SO_PRIORITY;
2508 		break;
2509 #ifdef SO_BSDCOMPAT
2510         case TARGET_SO_BSDCOMPAT:
2511 		optname = SO_BSDCOMPAT;
2512 		break;
2513 #endif
2514         case TARGET_SO_PASSCRED:
2515 		optname = SO_PASSCRED;
2516 		break;
2517         case TARGET_SO_PASSSEC:
2518                 optname = SO_PASSSEC;
2519                 break;
2520         case TARGET_SO_TIMESTAMP:
2521 		optname = SO_TIMESTAMP;
2522 		break;
2523         case TARGET_SO_RCVLOWAT:
2524 		optname = SO_RCVLOWAT;
2525 		break;
2526         default:
2527             goto unimplemented;
2528         }
2529 	if (optlen < sizeof(uint32_t))
2530             return -TARGET_EINVAL;
2531 
2532 	if (get_user_u32(val, optval_addr))
2533             return -TARGET_EFAULT;
2534 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2535         break;
2536 #ifdef SOL_NETLINK
2537     case SOL_NETLINK:
2538         switch (optname) {
2539         case NETLINK_PKTINFO:
2540         case NETLINK_ADD_MEMBERSHIP:
2541         case NETLINK_DROP_MEMBERSHIP:
2542         case NETLINK_BROADCAST_ERROR:
2543         case NETLINK_NO_ENOBUFS:
2544 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2545         case NETLINK_LISTEN_ALL_NSID:
2546         case NETLINK_CAP_ACK:
2547 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2548 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2549         case NETLINK_EXT_ACK:
2550 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2551 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2552         case NETLINK_GET_STRICT_CHK:
2553 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2554             break;
2555         default:
2556             goto unimplemented;
2557         }
2558         val = 0;
2559         if (optlen < sizeof(uint32_t)) {
2560             return -TARGET_EINVAL;
2561         }
2562         if (get_user_u32(val, optval_addr)) {
2563             return -TARGET_EFAULT;
2564         }
2565         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2566                                    sizeof(val)));
2567         break;
2568 #endif /* SOL_NETLINK */
2569     default:
2570     unimplemented:
2571         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2572                       level, optname);
2573         ret = -TARGET_ENOPROTOOPT;
2574     }
2575     return ret;
2576 }
2577 
2578 /* do_getsockopt() Must return target values and target errnos. */
2579 static abi_long do_getsockopt(int sockfd, int level, int optname,
2580                               abi_ulong optval_addr, abi_ulong optlen)
2581 {
2582     abi_long ret;
2583     int len, val;
2584     socklen_t lv;
2585 
2586     switch(level) {
2587     case TARGET_SOL_SOCKET:
2588         level = SOL_SOCKET;
2589         switch (optname) {
2590         /* These don't just return a single integer */
2591         case TARGET_SO_PEERNAME:
2592             goto unimplemented;
2593         case TARGET_SO_RCVTIMEO: {
2594             struct timeval tv;
2595             socklen_t tvlen;
2596 
2597             optname = SO_RCVTIMEO;
2598 
2599 get_timeout:
2600             if (get_user_u32(len, optlen)) {
2601                 return -TARGET_EFAULT;
2602             }
2603             if (len < 0) {
2604                 return -TARGET_EINVAL;
2605             }
2606 
2607             tvlen = sizeof(tv);
2608             ret = get_errno(getsockopt(sockfd, level, optname,
2609                                        &tv, &tvlen));
2610             if (ret < 0) {
2611                 return ret;
2612             }
2613             if (len > sizeof(struct target_timeval)) {
2614                 len = sizeof(struct target_timeval);
2615             }
2616             if (copy_to_user_timeval(optval_addr, &tv)) {
2617                 return -TARGET_EFAULT;
2618             }
2619             if (put_user_u32(len, optlen)) {
2620                 return -TARGET_EFAULT;
2621             }
2622             break;
2623         }
2624         case TARGET_SO_SNDTIMEO:
2625             optname = SO_SNDTIMEO;
2626             goto get_timeout;
2627         case TARGET_SO_PEERCRED: {
2628             struct ucred cr;
2629             socklen_t crlen;
2630             struct target_ucred *tcr;
2631 
2632             if (get_user_u32(len, optlen)) {
2633                 return -TARGET_EFAULT;
2634             }
2635             if (len < 0) {
2636                 return -TARGET_EINVAL;
2637             }
2638 
2639             crlen = sizeof(cr);
2640             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2641                                        &cr, &crlen));
2642             if (ret < 0) {
2643                 return ret;
2644             }
2645             if (len > crlen) {
2646                 len = crlen;
2647             }
2648             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2649                 return -TARGET_EFAULT;
2650             }
2651             __put_user(cr.pid, &tcr->pid);
2652             __put_user(cr.uid, &tcr->uid);
2653             __put_user(cr.gid, &tcr->gid);
2654             unlock_user_struct(tcr, optval_addr, 1);
2655             if (put_user_u32(len, optlen)) {
2656                 return -TARGET_EFAULT;
2657             }
2658             break;
2659         }
2660         case TARGET_SO_PEERSEC: {
2661             char *name;
2662 
2663             if (get_user_u32(len, optlen)) {
2664                 return -TARGET_EFAULT;
2665             }
2666             if (len < 0) {
2667                 return -TARGET_EINVAL;
2668             }
2669             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2670             if (!name) {
2671                 return -TARGET_EFAULT;
2672             }
2673             lv = len;
2674             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2675                                        name, &lv));
2676             if (put_user_u32(lv, optlen)) {
2677                 ret = -TARGET_EFAULT;
2678             }
2679             unlock_user(name, optval_addr, lv);
2680             break;
2681         }
2682         case TARGET_SO_LINGER:
2683         {
2684             struct linger lg;
2685             socklen_t lglen;
2686             struct target_linger *tlg;
2687 
2688             if (get_user_u32(len, optlen)) {
2689                 return -TARGET_EFAULT;
2690             }
2691             if (len < 0) {
2692                 return -TARGET_EINVAL;
2693             }
2694 
2695             lglen = sizeof(lg);
2696             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2697                                        &lg, &lglen));
2698             if (ret < 0) {
2699                 return ret;
2700             }
2701             if (len > lglen) {
2702                 len = lglen;
2703             }
2704             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2705                 return -TARGET_EFAULT;
2706             }
2707             __put_user(lg.l_onoff, &tlg->l_onoff);
2708             __put_user(lg.l_linger, &tlg->l_linger);
2709             unlock_user_struct(tlg, optval_addr, 1);
2710             if (put_user_u32(len, optlen)) {
2711                 return -TARGET_EFAULT;
2712             }
2713             break;
2714         }
2715         /* Options with 'int' argument.  */
2716         case TARGET_SO_DEBUG:
2717             optname = SO_DEBUG;
2718             goto int_case;
2719         case TARGET_SO_REUSEADDR:
2720             optname = SO_REUSEADDR;
2721             goto int_case;
2722 #ifdef SO_REUSEPORT
2723         case TARGET_SO_REUSEPORT:
2724             optname = SO_REUSEPORT;
2725             goto int_case;
2726 #endif
2727         case TARGET_SO_TYPE:
2728             optname = SO_TYPE;
2729             goto int_case;
2730         case TARGET_SO_ERROR:
2731             optname = SO_ERROR;
2732             goto int_case;
2733         case TARGET_SO_DONTROUTE:
2734             optname = SO_DONTROUTE;
2735             goto int_case;
2736         case TARGET_SO_BROADCAST:
2737             optname = SO_BROADCAST;
2738             goto int_case;
2739         case TARGET_SO_SNDBUF:
2740             optname = SO_SNDBUF;
2741             goto int_case;
2742         case TARGET_SO_RCVBUF:
2743             optname = SO_RCVBUF;
2744             goto int_case;
2745         case TARGET_SO_KEEPALIVE:
2746             optname = SO_KEEPALIVE;
2747             goto int_case;
2748         case TARGET_SO_OOBINLINE:
2749             optname = SO_OOBINLINE;
2750             goto int_case;
2751         case TARGET_SO_NO_CHECK:
2752             optname = SO_NO_CHECK;
2753             goto int_case;
2754         case TARGET_SO_PRIORITY:
2755             optname = SO_PRIORITY;
2756             goto int_case;
2757 #ifdef SO_BSDCOMPAT
2758         case TARGET_SO_BSDCOMPAT:
2759             optname = SO_BSDCOMPAT;
2760             goto int_case;
2761 #endif
2762         case TARGET_SO_PASSCRED:
2763             optname = SO_PASSCRED;
2764             goto int_case;
2765         case TARGET_SO_TIMESTAMP:
2766             optname = SO_TIMESTAMP;
2767             goto int_case;
2768         case TARGET_SO_RCVLOWAT:
2769             optname = SO_RCVLOWAT;
2770             goto int_case;
2771         case TARGET_SO_ACCEPTCONN:
2772             optname = SO_ACCEPTCONN;
2773             goto int_case;
2774         case TARGET_SO_PROTOCOL:
2775             optname = SO_PROTOCOL;
2776             goto int_case;
2777         case TARGET_SO_DOMAIN:
2778             optname = SO_DOMAIN;
2779             goto int_case;
2780         default:
2781             goto int_case;
2782         }
2783         break;
2784     case SOL_TCP:
2785     case SOL_UDP:
2786         /* TCP and UDP options all take an 'int' value.  */
2787     int_case:
2788         if (get_user_u32(len, optlen))
2789             return -TARGET_EFAULT;
2790         if (len < 0)
2791             return -TARGET_EINVAL;
2792         lv = sizeof(lv);
2793         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2794         if (ret < 0)
2795             return ret;
2796         switch (optname) {
2797         case SO_TYPE:
2798             val = host_to_target_sock_type(val);
2799             break;
2800         case SO_ERROR:
2801             val = host_to_target_errno(val);
2802             break;
2803         }
2804         if (len > lv)
2805             len = lv;
2806         if (len == 4) {
2807             if (put_user_u32(val, optval_addr))
2808                 return -TARGET_EFAULT;
2809         } else {
2810             if (put_user_u8(val, optval_addr))
2811                 return -TARGET_EFAULT;
2812         }
2813         if (put_user_u32(len, optlen))
2814             return -TARGET_EFAULT;
2815         break;
2816     case SOL_IP:
2817         switch(optname) {
2818         case IP_TOS:
2819         case IP_TTL:
2820         case IP_HDRINCL:
2821         case IP_ROUTER_ALERT:
2822         case IP_RECVOPTS:
2823         case IP_RETOPTS:
2824         case IP_PKTINFO:
2825         case IP_MTU_DISCOVER:
2826         case IP_RECVERR:
2827         case IP_RECVTOS:
2828 #ifdef IP_FREEBIND
2829         case IP_FREEBIND:
2830 #endif
2831         case IP_MULTICAST_TTL:
2832         case IP_MULTICAST_LOOP:
2833             if (get_user_u32(len, optlen))
2834                 return -TARGET_EFAULT;
2835             if (len < 0)
2836                 return -TARGET_EINVAL;
2837             lv = sizeof(lv);
2838             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2839             if (ret < 0)
2840                 return ret;
2841             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2842                 len = 1;
2843                 if (put_user_u32(len, optlen)
2844                     || put_user_u8(val, optval_addr))
2845                     return -TARGET_EFAULT;
2846             } else {
2847                 if (len > sizeof(int))
2848                     len = sizeof(int);
2849                 if (put_user_u32(len, optlen)
2850                     || put_user_u32(val, optval_addr))
2851                     return -TARGET_EFAULT;
2852             }
2853             break;
2854         default:
2855             ret = -TARGET_ENOPROTOOPT;
2856             break;
2857         }
2858         break;
2859     case SOL_IPV6:
2860         switch (optname) {
2861         case IPV6_MTU_DISCOVER:
2862         case IPV6_MTU:
2863         case IPV6_V6ONLY:
2864         case IPV6_RECVPKTINFO:
2865         case IPV6_UNICAST_HOPS:
2866         case IPV6_MULTICAST_HOPS:
2867         case IPV6_MULTICAST_LOOP:
2868         case IPV6_RECVERR:
2869         case IPV6_RECVHOPLIMIT:
2870         case IPV6_2292HOPLIMIT:
2871         case IPV6_CHECKSUM:
2872         case IPV6_ADDRFORM:
2873         case IPV6_2292PKTINFO:
2874         case IPV6_RECVTCLASS:
2875         case IPV6_RECVRTHDR:
2876         case IPV6_2292RTHDR:
2877         case IPV6_RECVHOPOPTS:
2878         case IPV6_2292HOPOPTS:
2879         case IPV6_RECVDSTOPTS:
2880         case IPV6_2292DSTOPTS:
2881         case IPV6_TCLASS:
2882         case IPV6_ADDR_PREFERENCES:
2883 #ifdef IPV6_RECVPATHMTU
2884         case IPV6_RECVPATHMTU:
2885 #endif
2886 #ifdef IPV6_TRANSPARENT
2887         case IPV6_TRANSPARENT:
2888 #endif
2889 #ifdef IPV6_FREEBIND
2890         case IPV6_FREEBIND:
2891 #endif
2892 #ifdef IPV6_RECVORIGDSTADDR
2893         case IPV6_RECVORIGDSTADDR:
2894 #endif
2895             if (get_user_u32(len, optlen))
2896                 return -TARGET_EFAULT;
2897             if (len < 0)
2898                 return -TARGET_EINVAL;
2899             lv = sizeof(lv);
2900             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2901             if (ret < 0)
2902                 return ret;
2903             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2904                 len = 1;
2905                 if (put_user_u32(len, optlen)
2906                     || put_user_u8(val, optval_addr))
2907                     return -TARGET_EFAULT;
2908             } else {
2909                 if (len > sizeof(int))
2910                     len = sizeof(int);
2911                 if (put_user_u32(len, optlen)
2912                     || put_user_u32(val, optval_addr))
2913                     return -TARGET_EFAULT;
2914             }
2915             break;
2916         default:
2917             ret = -TARGET_ENOPROTOOPT;
2918             break;
2919         }
2920         break;
2921 #ifdef SOL_NETLINK
2922     case SOL_NETLINK:
2923         switch (optname) {
2924         case NETLINK_PKTINFO:
2925         case NETLINK_BROADCAST_ERROR:
2926         case NETLINK_NO_ENOBUFS:
2927 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2928         case NETLINK_LISTEN_ALL_NSID:
2929         case NETLINK_CAP_ACK:
2930 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2931 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2932         case NETLINK_EXT_ACK:
2933 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2934 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2935         case NETLINK_GET_STRICT_CHK:
2936 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2937             if (get_user_u32(len, optlen)) {
2938                 return -TARGET_EFAULT;
2939             }
2940             if (len != sizeof(val)) {
2941                 return -TARGET_EINVAL;
2942             }
2943             lv = len;
2944             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2945             if (ret < 0) {
2946                 return ret;
2947             }
2948             if (put_user_u32(lv, optlen)
2949                 || put_user_u32(val, optval_addr)) {
2950                 return -TARGET_EFAULT;
2951             }
2952             break;
2953 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2954         case NETLINK_LIST_MEMBERSHIPS:
2955         {
2956             uint32_t *results;
2957             int i;
2958             if (get_user_u32(len, optlen)) {
2959                 return -TARGET_EFAULT;
2960             }
2961             if (len < 0) {
2962                 return -TARGET_EINVAL;
2963             }
2964             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2965             if (!results && len > 0) {
2966                 return -TARGET_EFAULT;
2967             }
2968             lv = len;
2969             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2970             if (ret < 0) {
2971                 unlock_user(results, optval_addr, 0);
2972                 return ret;
2973             }
2974             /* swap host endianness to target endianness. */
2975             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2976                 results[i] = tswap32(results[i]);
2977             }
2978             if (put_user_u32(lv, optlen)) {
2979                 return -TARGET_EFAULT;
2980             }
2981             unlock_user(results, optval_addr, 0);
2982             break;
2983         }
2984 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2985         default:
2986             goto unimplemented;
2987         }
2988         break;
2989 #endif /* SOL_NETLINK */
2990     default:
2991     unimplemented:
2992         qemu_log_mask(LOG_UNIMP,
2993                       "getsockopt level=%d optname=%d not yet supported\n",
2994                       level, optname);
2995         ret = -TARGET_EOPNOTSUPP;
2996         break;
2997     }
2998     return ret;
2999 }
3000 
3001 /* Convert target low/high pair representing file offset into the host
3002  * low/high pair. This function doesn't handle offsets bigger than 64 bits
3003  * as the kernel doesn't handle them either.
3004  */
3005 static void target_to_host_low_high(abi_ulong tlow,
3006                                     abi_ulong thigh,
3007                                     unsigned long *hlow,
3008                                     unsigned long *hhigh)
3009 {
3010     uint64_t off = tlow |
3011         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
3012         TARGET_LONG_BITS / 2;
3013 
3014     *hlow = off;
3015     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3016 }
3017 
3018 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3019                                 abi_ulong count, int copy)
3020 {
3021     struct target_iovec *target_vec;
3022     struct iovec *vec;
3023     abi_ulong total_len, max_len;
3024     int i;
3025     int err = 0;
3026     bool bad_address = false;
3027 
3028     if (count == 0) {
3029         errno = 0;
3030         return NULL;
3031     }
3032     if (count > IOV_MAX) {
3033         errno = EINVAL;
3034         return NULL;
3035     }
3036 
3037     vec = g_try_new0(struct iovec, count);
3038     if (vec == NULL) {
3039         errno = ENOMEM;
3040         return NULL;
3041     }
3042 
3043     target_vec = lock_user(VERIFY_READ, target_addr,
3044                            count * sizeof(struct target_iovec), 1);
3045     if (target_vec == NULL) {
3046         err = EFAULT;
3047         goto fail2;
3048     }
3049 
3050     /* ??? If host page size > target page size, this will result in a
3051        value larger than what we can actually support.  */
3052     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3053     total_len = 0;
3054 
3055     for (i = 0; i < count; i++) {
3056         abi_ulong base = tswapal(target_vec[i].iov_base);
3057         abi_long len = tswapal(target_vec[i].iov_len);
3058 
3059         if (len < 0) {
3060             err = EINVAL;
3061             goto fail;
3062         } else if (len == 0) {
3063             /* Zero length pointer is ignored.  */
3064             vec[i].iov_base = 0;
3065         } else {
3066             vec[i].iov_base = lock_user(type, base, len, copy);
3067             /* If the first buffer pointer is bad, this is a fault.  But
3068              * subsequent bad buffers will result in a partial write; this
3069              * is realized by filling the vector with null pointers and
3070              * zero lengths. */
3071             if (!vec[i].iov_base) {
3072                 if (i == 0) {
3073                     err = EFAULT;
3074                     goto fail;
3075                 } else {
3076                     bad_address = true;
3077                 }
3078             }
3079             if (bad_address) {
3080                 len = 0;
3081             }
3082             if (len > max_len - total_len) {
3083                 len = max_len - total_len;
3084             }
3085         }
3086         vec[i].iov_len = len;
3087         total_len += len;
3088     }
3089 
3090     unlock_user(target_vec, target_addr, 0);
3091     return vec;
3092 
3093  fail:
3094     while (--i >= 0) {
3095         if (tswapal(target_vec[i].iov_len) > 0) {
3096             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3097         }
3098     }
3099     unlock_user(target_vec, target_addr, 0);
3100  fail2:
3101     g_free(vec);
3102     errno = err;
3103     return NULL;
3104 }
3105 
3106 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3107                          abi_ulong count, int copy)
3108 {
3109     struct target_iovec *target_vec;
3110     int i;
3111 
3112     target_vec = lock_user(VERIFY_READ, target_addr,
3113                            count * sizeof(struct target_iovec), 1);
3114     if (target_vec) {
3115         for (i = 0; i < count; i++) {
3116             abi_ulong base = tswapal(target_vec[i].iov_base);
3117             abi_long len = tswapal(target_vec[i].iov_len);
3118             if (len < 0) {
3119                 break;
3120             }
3121             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3122         }
3123         unlock_user(target_vec, target_addr, 0);
3124     }
3125 
3126     g_free(vec);
3127 }
3128 
3129 static inline int target_to_host_sock_type(int *type)
3130 {
3131     int host_type = 0;
3132     int target_type = *type;
3133 
3134     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3135     case TARGET_SOCK_DGRAM:
3136         host_type = SOCK_DGRAM;
3137         break;
3138     case TARGET_SOCK_STREAM:
3139         host_type = SOCK_STREAM;
3140         break;
3141     default:
3142         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3143         break;
3144     }
3145     if (target_type & TARGET_SOCK_CLOEXEC) {
3146 #if defined(SOCK_CLOEXEC)
3147         host_type |= SOCK_CLOEXEC;
3148 #else
3149         return -TARGET_EINVAL;
3150 #endif
3151     }
3152     if (target_type & TARGET_SOCK_NONBLOCK) {
3153 #if defined(SOCK_NONBLOCK)
3154         host_type |= SOCK_NONBLOCK;
3155 #elif !defined(O_NONBLOCK)
3156         return -TARGET_EINVAL;
3157 #endif
3158     }
3159     *type = host_type;
3160     return 0;
3161 }
3162 
3163 /* Try to emulate socket type flags after socket creation.  */
3164 static int sock_flags_fixup(int fd, int target_type)
3165 {
3166 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3167     if (target_type & TARGET_SOCK_NONBLOCK) {
3168         int flags = fcntl(fd, F_GETFL);
3169         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3170             close(fd);
3171             return -TARGET_EINVAL;
3172         }
3173     }
3174 #endif
3175     return fd;
3176 }
3177 
3178 /* do_socket() Must return target values and target errnos. */
3179 static abi_long do_socket(int domain, int type, int protocol)
3180 {
3181     int target_type = type;
3182     int ret;
3183 
3184     ret = target_to_host_sock_type(&type);
3185     if (ret) {
3186         return ret;
3187     }
3188 
3189     if (domain == PF_NETLINK && !(
3190 #ifdef CONFIG_RTNETLINK
3191          protocol == NETLINK_ROUTE ||
3192 #endif
3193          protocol == NETLINK_KOBJECT_UEVENT ||
3194          protocol == NETLINK_AUDIT)) {
3195         return -TARGET_EPROTONOSUPPORT;
3196     }
3197 
3198     if (domain == AF_PACKET ||
3199         (domain == AF_INET && type == SOCK_PACKET)) {
3200         protocol = tswap16(protocol);
3201     }
3202 
3203     ret = get_errno(socket(domain, type, protocol));
3204     if (ret >= 0) {
3205         ret = sock_flags_fixup(ret, target_type);
3206         if (type == SOCK_PACKET) {
3207             /* Manage an obsolete case :
3208              * if socket type is SOCK_PACKET, bind by name
3209              */
3210             fd_trans_register(ret, &target_packet_trans);
3211         } else if (domain == PF_NETLINK) {
3212             switch (protocol) {
3213 #ifdef CONFIG_RTNETLINK
3214             case NETLINK_ROUTE:
3215                 fd_trans_register(ret, &target_netlink_route_trans);
3216                 break;
3217 #endif
3218             case NETLINK_KOBJECT_UEVENT:
3219                 /* nothing to do: messages are strings */
3220                 break;
3221             case NETLINK_AUDIT:
3222                 fd_trans_register(ret, &target_netlink_audit_trans);
3223                 break;
3224             default:
3225                 g_assert_not_reached();
3226             }
3227         }
3228     }
3229     return ret;
3230 }
3231 
3232 /* do_bind() Must return target values and target errnos. */
3233 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3234                         socklen_t addrlen)
3235 {
3236     void *addr;
3237     abi_long ret;
3238 
3239     if ((int)addrlen < 0) {
3240         return -TARGET_EINVAL;
3241     }
3242 
3243     addr = alloca(addrlen+1);
3244 
3245     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3246     if (ret)
3247         return ret;
3248 
3249     return get_errno(bind(sockfd, addr, addrlen));
3250 }
3251 
3252 /* do_connect() Must return target values and target errnos. */
3253 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3254                            socklen_t addrlen)
3255 {
3256     void *addr;
3257     abi_long ret;
3258 
3259     if ((int)addrlen < 0) {
3260         return -TARGET_EINVAL;
3261     }
3262 
3263     addr = alloca(addrlen+1);
3264 
3265     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3266     if (ret)
3267         return ret;
3268 
3269     return get_errno(safe_connect(sockfd, addr, addrlen));
3270 }
3271 
3272 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3273 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3274                                       int flags, int send)
3275 {
3276     abi_long ret, len;
3277     struct msghdr msg;
3278     abi_ulong count;
3279     struct iovec *vec;
3280     abi_ulong target_vec;
3281 
3282     if (msgp->msg_name) {
3283         msg.msg_namelen = tswap32(msgp->msg_namelen);
3284         msg.msg_name = alloca(msg.msg_namelen+1);
3285         ret = target_to_host_sockaddr(fd, msg.msg_name,
3286                                       tswapal(msgp->msg_name),
3287                                       msg.msg_namelen);
3288         if (ret == -TARGET_EFAULT) {
3289             /* For connected sockets msg_name and msg_namelen must
3290              * be ignored, so returning EFAULT immediately is wrong.
3291              * Instead, pass a bad msg_name to the host kernel, and
3292              * let it decide whether to return EFAULT or not.
3293              */
3294             msg.msg_name = (void *)-1;
3295         } else if (ret) {
3296             goto out2;
3297         }
3298     } else {
3299         msg.msg_name = NULL;
3300         msg.msg_namelen = 0;
3301     }
3302     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3303     msg.msg_control = alloca(msg.msg_controllen);
3304     memset(msg.msg_control, 0, msg.msg_controllen);
3305 
3306     msg.msg_flags = tswap32(msgp->msg_flags);
3307 
3308     count = tswapal(msgp->msg_iovlen);
3309     target_vec = tswapal(msgp->msg_iov);
3310 
3311     if (count > IOV_MAX) {
3312         /* sendrcvmsg returns a different errno for this condition than
3313          * readv/writev, so we must catch it here before lock_iovec() does.
3314          */
3315         ret = -TARGET_EMSGSIZE;
3316         goto out2;
3317     }
3318 
3319     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3320                      target_vec, count, send);
3321     if (vec == NULL) {
3322         ret = -host_to_target_errno(errno);
3323         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3324         if (!send || ret) {
3325             goto out2;
3326         }
3327     }
3328     msg.msg_iovlen = count;
3329     msg.msg_iov = vec;
3330 
3331     if (send) {
3332         if (fd_trans_target_to_host_data(fd)) {
3333             void *host_msg;
3334 
3335             host_msg = g_malloc(msg.msg_iov->iov_len);
3336             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3337             ret = fd_trans_target_to_host_data(fd)(host_msg,
3338                                                    msg.msg_iov->iov_len);
3339             if (ret >= 0) {
3340                 msg.msg_iov->iov_base = host_msg;
3341                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3342             }
3343             g_free(host_msg);
3344         } else {
3345             ret = target_to_host_cmsg(&msg, msgp);
3346             if (ret == 0) {
3347                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3348             }
3349         }
3350     } else {
3351         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3352         if (!is_error(ret)) {
3353             len = ret;
3354             if (fd_trans_host_to_target_data(fd)) {
3355                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3356                                                MIN(msg.msg_iov->iov_len, len));
3357             }
3358             if (!is_error(ret)) {
3359                 ret = host_to_target_cmsg(msgp, &msg);
3360             }
3361             if (!is_error(ret)) {
3362                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3363                 msgp->msg_flags = tswap32(msg.msg_flags);
3364                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3365                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3366                                     msg.msg_name, msg.msg_namelen);
3367                     if (ret) {
3368                         goto out;
3369                     }
3370                 }
3371 
3372                 ret = len;
3373             }
3374         }
3375     }
3376 
3377 out:
3378     if (vec) {
3379         unlock_iovec(vec, target_vec, count, !send);
3380     }
3381 out2:
3382     return ret;
3383 }
3384 
3385 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3386                                int flags, int send)
3387 {
3388     abi_long ret;
3389     struct target_msghdr *msgp;
3390 
3391     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3392                           msgp,
3393                           target_msg,
3394                           send ? 1 : 0)) {
3395         return -TARGET_EFAULT;
3396     }
3397     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3398     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3399     return ret;
3400 }
3401 
3402 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3403  * so it might not have this *mmsg-specific flag either.
3404  */
3405 #ifndef MSG_WAITFORONE
3406 #define MSG_WAITFORONE 0x10000
3407 #endif
3408 
3409 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3410                                 unsigned int vlen, unsigned int flags,
3411                                 int send)
3412 {
3413     struct target_mmsghdr *mmsgp;
3414     abi_long ret = 0;
3415     int i;
3416 
3417     if (vlen > UIO_MAXIOV) {
3418         vlen = UIO_MAXIOV;
3419     }
3420 
3421     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3422     if (!mmsgp) {
3423         return -TARGET_EFAULT;
3424     }
3425 
3426     for (i = 0; i < vlen; i++) {
3427         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3428         if (is_error(ret)) {
3429             break;
3430         }
3431         mmsgp[i].msg_len = tswap32(ret);
3432         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3433         if (flags & MSG_WAITFORONE) {
3434             flags |= MSG_DONTWAIT;
3435         }
3436     }
3437 
3438     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3439 
3440     /* Return number of datagrams sent if we sent any at all;
3441      * otherwise return the error.
3442      */
3443     if (i) {
3444         return i;
3445     }
3446     return ret;
3447 }
3448 
3449 /* do_accept4() Must return target values and target errnos. */
3450 static abi_long do_accept4(int fd, abi_ulong target_addr,
3451                            abi_ulong target_addrlen_addr, int flags)
3452 {
3453     socklen_t addrlen, ret_addrlen;
3454     void *addr;
3455     abi_long ret;
3456     int host_flags;
3457 
3458     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3459         return -TARGET_EINVAL;
3460     }
3461 
3462     host_flags = 0;
3463     if (flags & TARGET_SOCK_NONBLOCK) {
3464         host_flags |= SOCK_NONBLOCK;
3465     }
3466     if (flags & TARGET_SOCK_CLOEXEC) {
3467         host_flags |= SOCK_CLOEXEC;
3468     }
3469 
3470     if (target_addr == 0) {
3471         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3472     }
3473 
3474     /* linux returns EFAULT if addrlen pointer is invalid */
3475     if (get_user_u32(addrlen, target_addrlen_addr))
3476         return -TARGET_EFAULT;
3477 
3478     if ((int)addrlen < 0) {
3479         return -TARGET_EINVAL;
3480     }
3481 
3482     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3483         return -TARGET_EFAULT;
3484     }
3485 
3486     addr = alloca(addrlen);
3487 
3488     ret_addrlen = addrlen;
3489     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3490     if (!is_error(ret)) {
3491         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3492         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3493             ret = -TARGET_EFAULT;
3494         }
3495     }
3496     return ret;
3497 }
3498 
3499 /* do_getpeername() Must return target values and target errnos. */
3500 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3501                                abi_ulong target_addrlen_addr)
3502 {
3503     socklen_t addrlen, ret_addrlen;
3504     void *addr;
3505     abi_long ret;
3506 
3507     if (get_user_u32(addrlen, target_addrlen_addr))
3508         return -TARGET_EFAULT;
3509 
3510     if ((int)addrlen < 0) {
3511         return -TARGET_EINVAL;
3512     }
3513 
3514     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3515         return -TARGET_EFAULT;
3516     }
3517 
3518     addr = alloca(addrlen);
3519 
3520     ret_addrlen = addrlen;
3521     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3522     if (!is_error(ret)) {
3523         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3524         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3525             ret = -TARGET_EFAULT;
3526         }
3527     }
3528     return ret;
3529 }
3530 
3531 /* do_getsockname() Must return target values and target errnos. */
3532 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3533                                abi_ulong target_addrlen_addr)
3534 {
3535     socklen_t addrlen, ret_addrlen;
3536     void *addr;
3537     abi_long ret;
3538 
3539     if (get_user_u32(addrlen, target_addrlen_addr))
3540         return -TARGET_EFAULT;
3541 
3542     if ((int)addrlen < 0) {
3543         return -TARGET_EINVAL;
3544     }
3545 
3546     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3547         return -TARGET_EFAULT;
3548     }
3549 
3550     addr = alloca(addrlen);
3551 
3552     ret_addrlen = addrlen;
3553     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3554     if (!is_error(ret)) {
3555         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3556         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3557             ret = -TARGET_EFAULT;
3558         }
3559     }
3560     return ret;
3561 }
3562 
3563 /* do_socketpair() Must return target values and target errnos. */
3564 static abi_long do_socketpair(int domain, int type, int protocol,
3565                               abi_ulong target_tab_addr)
3566 {
3567     int tab[2];
3568     abi_long ret;
3569 
3570     target_to_host_sock_type(&type);
3571 
3572     ret = get_errno(socketpair(domain, type, protocol, tab));
3573     if (!is_error(ret)) {
3574         if (put_user_s32(tab[0], target_tab_addr)
3575             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3576             ret = -TARGET_EFAULT;
3577     }
3578     return ret;
3579 }
3580 
3581 /* do_sendto() Must return target values and target errnos. */
3582 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3583                           abi_ulong target_addr, socklen_t addrlen)
3584 {
3585     void *addr;
3586     void *host_msg;
3587     void *copy_msg = NULL;
3588     abi_long ret;
3589 
3590     if ((int)addrlen < 0) {
3591         return -TARGET_EINVAL;
3592     }
3593 
3594     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3595     if (!host_msg)
3596         return -TARGET_EFAULT;
3597     if (fd_trans_target_to_host_data(fd)) {
3598         copy_msg = host_msg;
3599         host_msg = g_malloc(len);
3600         memcpy(host_msg, copy_msg, len);
3601         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3602         if (ret < 0) {
3603             goto fail;
3604         }
3605     }
3606     if (target_addr) {
3607         addr = alloca(addrlen+1);
3608         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3609         if (ret) {
3610             goto fail;
3611         }
3612         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3613     } else {
3614         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3615     }
3616 fail:
3617     if (copy_msg) {
3618         g_free(host_msg);
3619         host_msg = copy_msg;
3620     }
3621     unlock_user(host_msg, msg, 0);
3622     return ret;
3623 }
3624 
3625 /* do_recvfrom() Must return target values and target errnos. */
3626 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3627                             abi_ulong target_addr,
3628                             abi_ulong target_addrlen)
3629 {
3630     socklen_t addrlen, ret_addrlen;
3631     void *addr;
3632     void *host_msg;
3633     abi_long ret;
3634 
3635     if (!msg) {
3636         host_msg = NULL;
3637     } else {
3638         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3639         if (!host_msg) {
3640             return -TARGET_EFAULT;
3641         }
3642     }
3643     if (target_addr) {
3644         if (get_user_u32(addrlen, target_addrlen)) {
3645             ret = -TARGET_EFAULT;
3646             goto fail;
3647         }
3648         if ((int)addrlen < 0) {
3649             ret = -TARGET_EINVAL;
3650             goto fail;
3651         }
3652         addr = alloca(addrlen);
3653         ret_addrlen = addrlen;
3654         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3655                                       addr, &ret_addrlen));
3656     } else {
3657         addr = NULL; /* To keep compiler quiet.  */
3658         addrlen = 0; /* To keep compiler quiet.  */
3659         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3660     }
3661     if (!is_error(ret)) {
3662         if (fd_trans_host_to_target_data(fd)) {
3663             abi_long trans;
3664             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3665             if (is_error(trans)) {
3666                 ret = trans;
3667                 goto fail;
3668             }
3669         }
3670         if (target_addr) {
3671             host_to_target_sockaddr(target_addr, addr,
3672                                     MIN(addrlen, ret_addrlen));
3673             if (put_user_u32(ret_addrlen, target_addrlen)) {
3674                 ret = -TARGET_EFAULT;
3675                 goto fail;
3676             }
3677         }
3678         unlock_user(host_msg, msg, len);
3679     } else {
3680 fail:
3681         unlock_user(host_msg, msg, 0);
3682     }
3683     return ret;
3684 }
3685 
3686 #ifdef TARGET_NR_socketcall
3687 /* do_socketcall() must return target values and target errnos. */
3688 static abi_long do_socketcall(int num, abi_ulong vptr)
3689 {
3690     static const unsigned nargs[] = { /* number of arguments per operation */
3691         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3692         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3693         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3694         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3695         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3696         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3697         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3698         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3699         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3700         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3701         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3702         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3703         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3704         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3705         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3706         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3707         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3708         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3709         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3710         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3711     };
3712     abi_long a[6]; /* max 6 args */
3713     unsigned i;
3714 
3715     /* check the range of the first argument num */
3716     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3717     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3718         return -TARGET_EINVAL;
3719     }
3720     /* ensure we have space for args */
3721     if (nargs[num] > ARRAY_SIZE(a)) {
3722         return -TARGET_EINVAL;
3723     }
3724     /* collect the arguments in a[] according to nargs[] */
3725     for (i = 0; i < nargs[num]; ++i) {
3726         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3727             return -TARGET_EFAULT;
3728         }
3729     }
3730     /* now when we have the args, invoke the appropriate underlying function */
3731     switch (num) {
3732     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3733         return do_socket(a[0], a[1], a[2]);
3734     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3735         return do_bind(a[0], a[1], a[2]);
3736     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3737         return do_connect(a[0], a[1], a[2]);
3738     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3739         return get_errno(listen(a[0], a[1]));
3740     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3741         return do_accept4(a[0], a[1], a[2], 0);
3742     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3743         return do_getsockname(a[0], a[1], a[2]);
3744     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3745         return do_getpeername(a[0], a[1], a[2]);
3746     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3747         return do_socketpair(a[0], a[1], a[2], a[3]);
3748     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3749         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3750     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3751         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3752     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3753         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3754     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3755         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3756     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3757         return get_errno(shutdown(a[0], a[1]));
3758     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3759         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3760     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3761         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3762     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3763         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3764     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3765         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3766     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3767         return do_accept4(a[0], a[1], a[2], a[3]);
3768     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3769         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3770     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3771         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3772     default:
3773         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3774         return -TARGET_EINVAL;
3775     }
3776 }
3777 #endif
3778 
3779 #ifndef TARGET_SEMID64_DS
3780 /* asm-generic version of this struct */
3781 struct target_semid64_ds
3782 {
3783   struct target_ipc_perm sem_perm;
3784   abi_ulong sem_otime;
3785 #if TARGET_ABI_BITS == 32
3786   abi_ulong __unused1;
3787 #endif
3788   abi_ulong sem_ctime;
3789 #if TARGET_ABI_BITS == 32
3790   abi_ulong __unused2;
3791 #endif
3792   abi_ulong sem_nsems;
3793   abi_ulong __unused3;
3794   abi_ulong __unused4;
3795 };
3796 #endif
3797 
3798 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3799                                                abi_ulong target_addr)
3800 {
3801     struct target_ipc_perm *target_ip;
3802     struct target_semid64_ds *target_sd;
3803 
3804     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3805         return -TARGET_EFAULT;
3806     target_ip = &(target_sd->sem_perm);
3807     host_ip->__key = tswap32(target_ip->__key);
3808     host_ip->uid = tswap32(target_ip->uid);
3809     host_ip->gid = tswap32(target_ip->gid);
3810     host_ip->cuid = tswap32(target_ip->cuid);
3811     host_ip->cgid = tswap32(target_ip->cgid);
3812 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3813     host_ip->mode = tswap32(target_ip->mode);
3814 #else
3815     host_ip->mode = tswap16(target_ip->mode);
3816 #endif
3817 #if defined(TARGET_PPC)
3818     host_ip->__seq = tswap32(target_ip->__seq);
3819 #else
3820     host_ip->__seq = tswap16(target_ip->__seq);
3821 #endif
3822     unlock_user_struct(target_sd, target_addr, 0);
3823     return 0;
3824 }
3825 
3826 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3827                                                struct ipc_perm *host_ip)
3828 {
3829     struct target_ipc_perm *target_ip;
3830     struct target_semid64_ds *target_sd;
3831 
3832     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3833         return -TARGET_EFAULT;
3834     target_ip = &(target_sd->sem_perm);
3835     target_ip->__key = tswap32(host_ip->__key);
3836     target_ip->uid = tswap32(host_ip->uid);
3837     target_ip->gid = tswap32(host_ip->gid);
3838     target_ip->cuid = tswap32(host_ip->cuid);
3839     target_ip->cgid = tswap32(host_ip->cgid);
3840 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3841     target_ip->mode = tswap32(host_ip->mode);
3842 #else
3843     target_ip->mode = tswap16(host_ip->mode);
3844 #endif
3845 #if defined(TARGET_PPC)
3846     target_ip->__seq = tswap32(host_ip->__seq);
3847 #else
3848     target_ip->__seq = tswap16(host_ip->__seq);
3849 #endif
3850     unlock_user_struct(target_sd, target_addr, 1);
3851     return 0;
3852 }
3853 
3854 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3855                                                abi_ulong target_addr)
3856 {
3857     struct target_semid64_ds *target_sd;
3858 
3859     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3860         return -TARGET_EFAULT;
3861     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3862         return -TARGET_EFAULT;
3863     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3864     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3865     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3866     unlock_user_struct(target_sd, target_addr, 0);
3867     return 0;
3868 }
3869 
3870 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3871                                                struct semid_ds *host_sd)
3872 {
3873     struct target_semid64_ds *target_sd;
3874 
3875     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3876         return -TARGET_EFAULT;
3877     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3878         return -TARGET_EFAULT;
3879     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3880     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3881     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3882     unlock_user_struct(target_sd, target_addr, 1);
3883     return 0;
3884 }
3885 
3886 struct target_seminfo {
3887     int semmap;
3888     int semmni;
3889     int semmns;
3890     int semmnu;
3891     int semmsl;
3892     int semopm;
3893     int semume;
3894     int semusz;
3895     int semvmx;
3896     int semaem;
3897 };
3898 
3899 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3900                                               struct seminfo *host_seminfo)
3901 {
3902     struct target_seminfo *target_seminfo;
3903     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3904         return -TARGET_EFAULT;
3905     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3906     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3907     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3908     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3909     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3910     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3911     __put_user(host_seminfo->semume, &target_seminfo->semume);
3912     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3913     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3914     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3915     unlock_user_struct(target_seminfo, target_addr, 1);
3916     return 0;
3917 }
3918 
3919 union semun {
3920 	int val;
3921 	struct semid_ds *buf;
3922 	unsigned short *array;
3923 	struct seminfo *__buf;
3924 };
3925 
3926 union target_semun {
3927 	int val;
3928 	abi_ulong buf;
3929 	abi_ulong array;
3930 	abi_ulong __buf;
3931 };
3932 
3933 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3934                                                abi_ulong target_addr)
3935 {
3936     int nsems;
3937     unsigned short *array;
3938     union semun semun;
3939     struct semid_ds semid_ds;
3940     int i, ret;
3941 
3942     semun.buf = &semid_ds;
3943 
3944     ret = semctl(semid, 0, IPC_STAT, semun);
3945     if (ret == -1)
3946         return get_errno(ret);
3947 
3948     nsems = semid_ds.sem_nsems;
3949 
3950     *host_array = g_try_new(unsigned short, nsems);
3951     if (!*host_array) {
3952         return -TARGET_ENOMEM;
3953     }
3954     array = lock_user(VERIFY_READ, target_addr,
3955                       nsems*sizeof(unsigned short), 1);
3956     if (!array) {
3957         g_free(*host_array);
3958         return -TARGET_EFAULT;
3959     }
3960 
3961     for(i=0; i<nsems; i++) {
3962         __get_user((*host_array)[i], &array[i]);
3963     }
3964     unlock_user(array, target_addr, 0);
3965 
3966     return 0;
3967 }
3968 
3969 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3970                                                unsigned short **host_array)
3971 {
3972     int nsems;
3973     unsigned short *array;
3974     union semun semun;
3975     struct semid_ds semid_ds;
3976     int i, ret;
3977 
3978     semun.buf = &semid_ds;
3979 
3980     ret = semctl(semid, 0, IPC_STAT, semun);
3981     if (ret == -1)
3982         return get_errno(ret);
3983 
3984     nsems = semid_ds.sem_nsems;
3985 
3986     array = lock_user(VERIFY_WRITE, target_addr,
3987                       nsems*sizeof(unsigned short), 0);
3988     if (!array)
3989         return -TARGET_EFAULT;
3990 
3991     for(i=0; i<nsems; i++) {
3992         __put_user((*host_array)[i], &array[i]);
3993     }
3994     g_free(*host_array);
3995     unlock_user(array, target_addr, 1);
3996 
3997     return 0;
3998 }
3999 
4000 static inline abi_long do_semctl(int semid, int semnum, int cmd,
4001                                  abi_ulong target_arg)
4002 {
4003     union target_semun target_su = { .buf = target_arg };
4004     union semun arg;
4005     struct semid_ds dsarg;
4006     unsigned short *array = NULL;
4007     struct seminfo seminfo;
4008     abi_long ret = -TARGET_EINVAL;
4009     abi_long err;
4010     cmd &= 0xff;
4011 
4012     switch( cmd ) {
4013 	case GETVAL:
4014 	case SETVAL:
4015             /* In 64 bit cross-endian situations, we will erroneously pick up
4016              * the wrong half of the union for the "val" element.  To rectify
4017              * this, the entire 8-byte structure is byteswapped, followed by
4018 	     * a swap of the 4 byte val field. In other cases, the data is
4019 	     * already in proper host byte order. */
4020 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4021 		target_su.buf = tswapal(target_su.buf);
4022 		arg.val = tswap32(target_su.val);
4023 	    } else {
4024 		arg.val = target_su.val;
4025 	    }
4026             ret = get_errno(semctl(semid, semnum, cmd, arg));
4027             break;
4028 	case GETALL:
4029 	case SETALL:
4030             err = target_to_host_semarray(semid, &array, target_su.array);
4031             if (err)
4032                 return err;
4033             arg.array = array;
4034             ret = get_errno(semctl(semid, semnum, cmd, arg));
4035             err = host_to_target_semarray(semid, target_su.array, &array);
4036             if (err)
4037                 return err;
4038             break;
4039 	case IPC_STAT:
4040 	case IPC_SET:
4041 	case SEM_STAT:
4042             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4043             if (err)
4044                 return err;
4045             arg.buf = &dsarg;
4046             ret = get_errno(semctl(semid, semnum, cmd, arg));
4047             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4048             if (err)
4049                 return err;
4050             break;
4051 	case IPC_INFO:
4052 	case SEM_INFO:
4053             arg.__buf = &seminfo;
4054             ret = get_errno(semctl(semid, semnum, cmd, arg));
4055             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4056             if (err)
4057                 return err;
4058             break;
4059 	case IPC_RMID:
4060 	case GETPID:
4061 	case GETNCNT:
4062 	case GETZCNT:
4063             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4064             break;
4065     }
4066 
4067     return ret;
4068 }
4069 
4070 struct target_sembuf {
4071     unsigned short sem_num;
4072     short sem_op;
4073     short sem_flg;
4074 };
4075 
4076 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4077                                              abi_ulong target_addr,
4078                                              unsigned nsops)
4079 {
4080     struct target_sembuf *target_sembuf;
4081     int i;
4082 
4083     target_sembuf = lock_user(VERIFY_READ, target_addr,
4084                               nsops*sizeof(struct target_sembuf), 1);
4085     if (!target_sembuf)
4086         return -TARGET_EFAULT;
4087 
4088     for(i=0; i<nsops; i++) {
4089         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4090         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4091         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4092     }
4093 
4094     unlock_user(target_sembuf, target_addr, 0);
4095 
4096     return 0;
4097 }
4098 
4099 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4100     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4101 
4102 /*
4103  * This macro is required to handle the s390 variants, which passes the
4104  * arguments in a different order than default.
4105  */
4106 #ifdef __s390x__
4107 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4108   (__nsops), (__timeout), (__sops)
4109 #else
4110 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4111   (__nsops), 0, (__sops), (__timeout)
4112 #endif
4113 
4114 static inline abi_long do_semtimedop(int semid,
4115                                      abi_long ptr,
4116                                      unsigned nsops,
4117                                      abi_long timeout, bool time64)
4118 {
4119     struct sembuf *sops;
4120     struct timespec ts, *pts = NULL;
4121     abi_long ret;
4122 
4123     if (timeout) {
4124         pts = &ts;
4125         if (time64) {
4126             if (target_to_host_timespec64(pts, timeout)) {
4127                 return -TARGET_EFAULT;
4128             }
4129         } else {
4130             if (target_to_host_timespec(pts, timeout)) {
4131                 return -TARGET_EFAULT;
4132             }
4133         }
4134     }
4135 
4136     if (nsops > TARGET_SEMOPM) {
4137         return -TARGET_E2BIG;
4138     }
4139 
4140     sops = g_new(struct sembuf, nsops);
4141 
4142     if (target_to_host_sembuf(sops, ptr, nsops)) {
4143         g_free(sops);
4144         return -TARGET_EFAULT;
4145     }
4146 
4147     ret = -TARGET_ENOSYS;
4148 #ifdef __NR_semtimedop
4149     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4150 #endif
4151 #ifdef __NR_ipc
4152     if (ret == -TARGET_ENOSYS) {
4153         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4154                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4155     }
4156 #endif
4157     g_free(sops);
4158     return ret;
4159 }
4160 #endif
4161 
4162 struct target_msqid_ds
4163 {
4164     struct target_ipc_perm msg_perm;
4165     abi_ulong msg_stime;
4166 #if TARGET_ABI_BITS == 32
4167     abi_ulong __unused1;
4168 #endif
4169     abi_ulong msg_rtime;
4170 #if TARGET_ABI_BITS == 32
4171     abi_ulong __unused2;
4172 #endif
4173     abi_ulong msg_ctime;
4174 #if TARGET_ABI_BITS == 32
4175     abi_ulong __unused3;
4176 #endif
4177     abi_ulong __msg_cbytes;
4178     abi_ulong msg_qnum;
4179     abi_ulong msg_qbytes;
4180     abi_ulong msg_lspid;
4181     abi_ulong msg_lrpid;
4182     abi_ulong __unused4;
4183     abi_ulong __unused5;
4184 };
4185 
4186 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4187                                                abi_ulong target_addr)
4188 {
4189     struct target_msqid_ds *target_md;
4190 
4191     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4192         return -TARGET_EFAULT;
4193     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4194         return -TARGET_EFAULT;
4195     host_md->msg_stime = tswapal(target_md->msg_stime);
4196     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4197     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4198     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4199     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4200     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4201     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4202     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4203     unlock_user_struct(target_md, target_addr, 0);
4204     return 0;
4205 }
4206 
4207 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4208                                                struct msqid_ds *host_md)
4209 {
4210     struct target_msqid_ds *target_md;
4211 
4212     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4213         return -TARGET_EFAULT;
4214     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4215         return -TARGET_EFAULT;
4216     target_md->msg_stime = tswapal(host_md->msg_stime);
4217     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4218     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4219     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4220     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4221     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4222     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4223     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4224     unlock_user_struct(target_md, target_addr, 1);
4225     return 0;
4226 }
4227 
4228 struct target_msginfo {
4229     int msgpool;
4230     int msgmap;
4231     int msgmax;
4232     int msgmnb;
4233     int msgmni;
4234     int msgssz;
4235     int msgtql;
4236     unsigned short int msgseg;
4237 };
4238 
4239 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4240                                               struct msginfo *host_msginfo)
4241 {
4242     struct target_msginfo *target_msginfo;
4243     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4244         return -TARGET_EFAULT;
4245     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4246     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4247     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4248     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4249     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4250     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4251     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4252     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4253     unlock_user_struct(target_msginfo, target_addr, 1);
4254     return 0;
4255 }
4256 
4257 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4258 {
4259     struct msqid_ds dsarg;
4260     struct msginfo msginfo;
4261     abi_long ret = -TARGET_EINVAL;
4262 
4263     cmd &= 0xff;
4264 
4265     switch (cmd) {
4266     case IPC_STAT:
4267     case IPC_SET:
4268     case MSG_STAT:
4269         if (target_to_host_msqid_ds(&dsarg,ptr))
4270             return -TARGET_EFAULT;
4271         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4272         if (host_to_target_msqid_ds(ptr,&dsarg))
4273             return -TARGET_EFAULT;
4274         break;
4275     case IPC_RMID:
4276         ret = get_errno(msgctl(msgid, cmd, NULL));
4277         break;
4278     case IPC_INFO:
4279     case MSG_INFO:
4280         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4281         if (host_to_target_msginfo(ptr, &msginfo))
4282             return -TARGET_EFAULT;
4283         break;
4284     }
4285 
4286     return ret;
4287 }
4288 
4289 struct target_msgbuf {
4290     abi_long mtype;
4291     char	mtext[1];
4292 };
4293 
4294 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4295                                  ssize_t msgsz, int msgflg)
4296 {
4297     struct target_msgbuf *target_mb;
4298     struct msgbuf *host_mb;
4299     abi_long ret = 0;
4300 
4301     if (msgsz < 0) {
4302         return -TARGET_EINVAL;
4303     }
4304 
4305     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4306         return -TARGET_EFAULT;
4307     host_mb = g_try_malloc(msgsz + sizeof(long));
4308     if (!host_mb) {
4309         unlock_user_struct(target_mb, msgp, 0);
4310         return -TARGET_ENOMEM;
4311     }
4312     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4313     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4314     ret = -TARGET_ENOSYS;
4315 #ifdef __NR_msgsnd
4316     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4317 #endif
4318 #ifdef __NR_ipc
4319     if (ret == -TARGET_ENOSYS) {
4320 #ifdef __s390x__
4321         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4322                                  host_mb));
4323 #else
4324         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4325                                  host_mb, 0));
4326 #endif
4327     }
4328 #endif
4329     g_free(host_mb);
4330     unlock_user_struct(target_mb, msgp, 0);
4331 
4332     return ret;
4333 }
4334 
4335 #ifdef __NR_ipc
4336 #if defined(__sparc__)
4337 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4338 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4339 #elif defined(__s390x__)
4340 /* The s390 sys_ipc variant has only five parameters.  */
4341 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4342     ((long int[]){(long int)__msgp, __msgtyp})
4343 #else
4344 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4345     ((long int[]){(long int)__msgp, __msgtyp}), 0
4346 #endif
4347 #endif
4348 
4349 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4350                                  ssize_t msgsz, abi_long msgtyp,
4351                                  int msgflg)
4352 {
4353     struct target_msgbuf *target_mb;
4354     char *target_mtext;
4355     struct msgbuf *host_mb;
4356     abi_long ret = 0;
4357 
4358     if (msgsz < 0) {
4359         return -TARGET_EINVAL;
4360     }
4361 
4362     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4363         return -TARGET_EFAULT;
4364 
4365     host_mb = g_try_malloc(msgsz + sizeof(long));
4366     if (!host_mb) {
4367         ret = -TARGET_ENOMEM;
4368         goto end;
4369     }
4370     ret = -TARGET_ENOSYS;
4371 #ifdef __NR_msgrcv
4372     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4373 #endif
4374 #ifdef __NR_ipc
4375     if (ret == -TARGET_ENOSYS) {
4376         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4377                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4378     }
4379 #endif
4380 
4381     if (ret > 0) {
4382         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4383         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4384         if (!target_mtext) {
4385             ret = -TARGET_EFAULT;
4386             goto end;
4387         }
4388         memcpy(target_mb->mtext, host_mb->mtext, ret);
4389         unlock_user(target_mtext, target_mtext_addr, ret);
4390     }
4391 
4392     target_mb->mtype = tswapal(host_mb->mtype);
4393 
4394 end:
4395     if (target_mb)
4396         unlock_user_struct(target_mb, msgp, 1);
4397     g_free(host_mb);
4398     return ret;
4399 }
4400 
4401 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4402                                                abi_ulong target_addr)
4403 {
4404     struct target_shmid_ds *target_sd;
4405 
4406     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4407         return -TARGET_EFAULT;
4408     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4409         return -TARGET_EFAULT;
4410     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4411     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4412     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4413     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4414     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4415     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4416     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4417     unlock_user_struct(target_sd, target_addr, 0);
4418     return 0;
4419 }
4420 
4421 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4422                                                struct shmid_ds *host_sd)
4423 {
4424     struct target_shmid_ds *target_sd;
4425 
4426     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4427         return -TARGET_EFAULT;
4428     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4429         return -TARGET_EFAULT;
4430     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4431     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4432     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4433     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4434     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4435     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4436     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4437     unlock_user_struct(target_sd, target_addr, 1);
4438     return 0;
4439 }
4440 
4441 struct  target_shminfo {
4442     abi_ulong shmmax;
4443     abi_ulong shmmin;
4444     abi_ulong shmmni;
4445     abi_ulong shmseg;
4446     abi_ulong shmall;
4447 };
4448 
4449 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4450                                               struct shminfo *host_shminfo)
4451 {
4452     struct target_shminfo *target_shminfo;
4453     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4454         return -TARGET_EFAULT;
4455     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4456     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4457     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4458     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4459     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4460     unlock_user_struct(target_shminfo, target_addr, 1);
4461     return 0;
4462 }
4463 
4464 struct target_shm_info {
4465     int used_ids;
4466     abi_ulong shm_tot;
4467     abi_ulong shm_rss;
4468     abi_ulong shm_swp;
4469     abi_ulong swap_attempts;
4470     abi_ulong swap_successes;
4471 };
4472 
4473 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4474                                                struct shm_info *host_shm_info)
4475 {
4476     struct target_shm_info *target_shm_info;
4477     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4478         return -TARGET_EFAULT;
4479     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4480     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4481     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4482     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4483     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4484     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4485     unlock_user_struct(target_shm_info, target_addr, 1);
4486     return 0;
4487 }
4488 
4489 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4490 {
4491     struct shmid_ds dsarg;
4492     struct shminfo shminfo;
4493     struct shm_info shm_info;
4494     abi_long ret = -TARGET_EINVAL;
4495 
4496     cmd &= 0xff;
4497 
4498     switch(cmd) {
4499     case IPC_STAT:
4500     case IPC_SET:
4501     case SHM_STAT:
4502         if (target_to_host_shmid_ds(&dsarg, buf))
4503             return -TARGET_EFAULT;
4504         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4505         if (host_to_target_shmid_ds(buf, &dsarg))
4506             return -TARGET_EFAULT;
4507         break;
4508     case IPC_INFO:
4509         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4510         if (host_to_target_shminfo(buf, &shminfo))
4511             return -TARGET_EFAULT;
4512         break;
4513     case SHM_INFO:
4514         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4515         if (host_to_target_shm_info(buf, &shm_info))
4516             return -TARGET_EFAULT;
4517         break;
4518     case IPC_RMID:
4519     case SHM_LOCK:
4520     case SHM_UNLOCK:
4521         ret = get_errno(shmctl(shmid, cmd, NULL));
4522         break;
4523     }
4524 
4525     return ret;
4526 }
4527 
4528 #ifdef TARGET_NR_ipc
4529 /* ??? This only works with linear mappings.  */
4530 /* do_ipc() must return target values and target errnos. */
4531 static abi_long do_ipc(CPUArchState *cpu_env,
4532                        unsigned int call, abi_long first,
4533                        abi_long second, abi_long third,
4534                        abi_long ptr, abi_long fifth)
4535 {
4536     int version;
4537     abi_long ret = 0;
4538 
4539     version = call >> 16;
4540     call &= 0xffff;
4541 
4542     switch (call) {
4543     case IPCOP_semop:
4544         ret = do_semtimedop(first, ptr, second, 0, false);
4545         break;
4546     case IPCOP_semtimedop:
4547     /*
4548      * The s390 sys_ipc variant has only five parameters instead of six
4549      * (as for default variant) and the only difference is the handling of
4550      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4551      * to a struct timespec where the generic variant uses fifth parameter.
4552      */
4553 #if defined(TARGET_S390X)
4554         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4555 #else
4556         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4557 #endif
4558         break;
4559 
4560     case IPCOP_semget:
4561         ret = get_errno(semget(first, second, third));
4562         break;
4563 
4564     case IPCOP_semctl: {
4565         /* The semun argument to semctl is passed by value, so dereference the
4566          * ptr argument. */
4567         abi_ulong atptr;
4568         get_user_ual(atptr, ptr);
4569         ret = do_semctl(first, second, third, atptr);
4570         break;
4571     }
4572 
4573     case IPCOP_msgget:
4574         ret = get_errno(msgget(first, second));
4575         break;
4576 
4577     case IPCOP_msgsnd:
4578         ret = do_msgsnd(first, ptr, second, third);
4579         break;
4580 
4581     case IPCOP_msgctl:
4582         ret = do_msgctl(first, second, ptr);
4583         break;
4584 
4585     case IPCOP_msgrcv:
4586         switch (version) {
4587         case 0:
4588             {
4589                 struct target_ipc_kludge {
4590                     abi_long msgp;
4591                     abi_long msgtyp;
4592                 } *tmp;
4593 
4594                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4595                     ret = -TARGET_EFAULT;
4596                     break;
4597                 }
4598 
4599                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4600 
4601                 unlock_user_struct(tmp, ptr, 0);
4602                 break;
4603             }
4604         default:
4605             ret = do_msgrcv(first, ptr, second, fifth, third);
4606         }
4607         break;
4608 
4609     case IPCOP_shmat:
4610         switch (version) {
4611         default:
4612         {
4613             abi_ulong raddr;
4614             raddr = target_shmat(cpu_env, first, ptr, second);
4615             if (is_error(raddr))
4616                 return get_errno(raddr);
4617             if (put_user_ual(raddr, third))
4618                 return -TARGET_EFAULT;
4619             break;
4620         }
4621         case 1:
4622             ret = -TARGET_EINVAL;
4623             break;
4624         }
4625 	break;
4626     case IPCOP_shmdt:
4627         ret = target_shmdt(ptr);
4628 	break;
4629 
4630     case IPCOP_shmget:
4631 	/* IPC_* flag values are the same on all linux platforms */
4632 	ret = get_errno(shmget(first, second, third));
4633 	break;
4634 
4635 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4636     case IPCOP_shmctl:
4637         ret = do_shmctl(first, second, ptr);
4638         break;
4639     default:
4640         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4641                       call, version);
4642 	ret = -TARGET_ENOSYS;
4643 	break;
4644     }
4645     return ret;
4646 }
4647 #endif
4648 
4649 /* kernel structure types definitions */
4650 
4651 #define STRUCT(name, ...) STRUCT_ ## name,
4652 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4653 enum {
4654 #include "syscall_types.h"
4655 STRUCT_MAX
4656 };
4657 #undef STRUCT
4658 #undef STRUCT_SPECIAL
4659 
4660 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4661 #define STRUCT_SPECIAL(name)
4662 #include "syscall_types.h"
4663 #undef STRUCT
4664 #undef STRUCT_SPECIAL
4665 
4666 #define MAX_STRUCT_SIZE 4096
4667 
4668 #ifdef CONFIG_FIEMAP
4669 /* So fiemap access checks don't overflow on 32 bit systems.
4670  * This is very slightly smaller than the limit imposed by
4671  * the underlying kernel.
4672  */
4673 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4674                             / sizeof(struct fiemap_extent))
4675 
4676 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4677                                        int fd, int cmd, abi_long arg)
4678 {
4679     /* The parameter for this ioctl is a struct fiemap followed
4680      * by an array of struct fiemap_extent whose size is set
4681      * in fiemap->fm_extent_count. The array is filled in by the
4682      * ioctl.
4683      */
4684     int target_size_in, target_size_out;
4685     struct fiemap *fm;
4686     const argtype *arg_type = ie->arg_type;
4687     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4688     void *argptr, *p;
4689     abi_long ret;
4690     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4691     uint32_t outbufsz;
4692     int free_fm = 0;
4693 
4694     assert(arg_type[0] == TYPE_PTR);
4695     assert(ie->access == IOC_RW);
4696     arg_type++;
4697     target_size_in = thunk_type_size(arg_type, 0);
4698     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4699     if (!argptr) {
4700         return -TARGET_EFAULT;
4701     }
4702     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4703     unlock_user(argptr, arg, 0);
4704     fm = (struct fiemap *)buf_temp;
4705     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4706         return -TARGET_EINVAL;
4707     }
4708 
4709     outbufsz = sizeof (*fm) +
4710         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4711 
4712     if (outbufsz > MAX_STRUCT_SIZE) {
4713         /* We can't fit all the extents into the fixed size buffer.
4714          * Allocate one that is large enough and use it instead.
4715          */
4716         fm = g_try_malloc(outbufsz);
4717         if (!fm) {
4718             return -TARGET_ENOMEM;
4719         }
4720         memcpy(fm, buf_temp, sizeof(struct fiemap));
4721         free_fm = 1;
4722     }
4723     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4724     if (!is_error(ret)) {
4725         target_size_out = target_size_in;
4726         /* An extent_count of 0 means we were only counting the extents
4727          * so there are no structs to copy
4728          */
4729         if (fm->fm_extent_count != 0) {
4730             target_size_out += fm->fm_mapped_extents * extent_size;
4731         }
4732         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4733         if (!argptr) {
4734             ret = -TARGET_EFAULT;
4735         } else {
4736             /* Convert the struct fiemap */
4737             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4738             if (fm->fm_extent_count != 0) {
4739                 p = argptr + target_size_in;
4740                 /* ...and then all the struct fiemap_extents */
4741                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4742                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4743                                   THUNK_TARGET);
4744                     p += extent_size;
4745                 }
4746             }
4747             unlock_user(argptr, arg, target_size_out);
4748         }
4749     }
4750     if (free_fm) {
4751         g_free(fm);
4752     }
4753     return ret;
4754 }
4755 #endif
4756 
4757 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4758                                 int fd, int cmd, abi_long arg)
4759 {
4760     const argtype *arg_type = ie->arg_type;
4761     int target_size;
4762     void *argptr;
4763     int ret;
4764     struct ifconf *host_ifconf;
4765     uint32_t outbufsz;
4766     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4767     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4768     int target_ifreq_size;
4769     int nb_ifreq;
4770     int free_buf = 0;
4771     int i;
4772     int target_ifc_len;
4773     abi_long target_ifc_buf;
4774     int host_ifc_len;
4775     char *host_ifc_buf;
4776 
4777     assert(arg_type[0] == TYPE_PTR);
4778     assert(ie->access == IOC_RW);
4779 
4780     arg_type++;
4781     target_size = thunk_type_size(arg_type, 0);
4782 
4783     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4784     if (!argptr)
4785         return -TARGET_EFAULT;
4786     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4787     unlock_user(argptr, arg, 0);
4788 
4789     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4790     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4791     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4792 
4793     if (target_ifc_buf != 0) {
4794         target_ifc_len = host_ifconf->ifc_len;
4795         nb_ifreq = target_ifc_len / target_ifreq_size;
4796         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4797 
4798         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4799         if (outbufsz > MAX_STRUCT_SIZE) {
4800             /*
4801              * We can't fit all the extents into the fixed size buffer.
4802              * Allocate one that is large enough and use it instead.
4803              */
4804             host_ifconf = g_try_malloc(outbufsz);
4805             if (!host_ifconf) {
4806                 return -TARGET_ENOMEM;
4807             }
4808             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4809             free_buf = 1;
4810         }
4811         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4812 
4813         host_ifconf->ifc_len = host_ifc_len;
4814     } else {
4815       host_ifc_buf = NULL;
4816     }
4817     host_ifconf->ifc_buf = host_ifc_buf;
4818 
4819     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4820     if (!is_error(ret)) {
4821 	/* convert host ifc_len to target ifc_len */
4822 
4823         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4824         target_ifc_len = nb_ifreq * target_ifreq_size;
4825         host_ifconf->ifc_len = target_ifc_len;
4826 
4827 	/* restore target ifc_buf */
4828 
4829         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4830 
4831 	/* copy struct ifconf to target user */
4832 
4833         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4834         if (!argptr)
4835             return -TARGET_EFAULT;
4836         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4837         unlock_user(argptr, arg, target_size);
4838 
4839         if (target_ifc_buf != 0) {
4840             /* copy ifreq[] to target user */
4841             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4842             for (i = 0; i < nb_ifreq ; i++) {
4843                 thunk_convert(argptr + i * target_ifreq_size,
4844                               host_ifc_buf + i * sizeof(struct ifreq),
4845                               ifreq_arg_type, THUNK_TARGET);
4846             }
4847             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4848         }
4849     }
4850 
4851     if (free_buf) {
4852         g_free(host_ifconf);
4853     }
4854 
4855     return ret;
4856 }
4857 
4858 #if defined(CONFIG_USBFS)
4859 #if HOST_LONG_BITS > 64
4860 #error USBDEVFS thunks do not support >64 bit hosts yet.
4861 #endif
4862 struct live_urb {
4863     uint64_t target_urb_adr;
4864     uint64_t target_buf_adr;
4865     char *target_buf_ptr;
4866     struct usbdevfs_urb host_urb;
4867 };
4868 
4869 static GHashTable *usbdevfs_urb_hashtable(void)
4870 {
4871     static GHashTable *urb_hashtable;
4872 
4873     if (!urb_hashtable) {
4874         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4875     }
4876     return urb_hashtable;
4877 }
4878 
4879 static void urb_hashtable_insert(struct live_urb *urb)
4880 {
4881     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4882     g_hash_table_insert(urb_hashtable, urb, urb);
4883 }
4884 
4885 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4886 {
4887     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4888     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4889 }
4890 
4891 static void urb_hashtable_remove(struct live_urb *urb)
4892 {
4893     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4894     g_hash_table_remove(urb_hashtable, urb);
4895 }
4896 
4897 static abi_long
4898 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4899                           int fd, int cmd, abi_long arg)
4900 {
4901     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4902     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4903     struct live_urb *lurb;
4904     void *argptr;
4905     uint64_t hurb;
4906     int target_size;
4907     uintptr_t target_urb_adr;
4908     abi_long ret;
4909 
4910     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4911 
4912     memset(buf_temp, 0, sizeof(uint64_t));
4913     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4914     if (is_error(ret)) {
4915         return ret;
4916     }
4917 
4918     memcpy(&hurb, buf_temp, sizeof(uint64_t));
4919     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
4920     if (!lurb->target_urb_adr) {
4921         return -TARGET_EFAULT;
4922     }
4923     urb_hashtable_remove(lurb);
4924     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
4925         lurb->host_urb.buffer_length);
4926     lurb->target_buf_ptr = NULL;
4927 
4928     /* restore the guest buffer pointer */
4929     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
4930 
4931     /* update the guest urb struct */
4932     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
4933     if (!argptr) {
4934         g_free(lurb);
4935         return -TARGET_EFAULT;
4936     }
4937     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
4938     unlock_user(argptr, lurb->target_urb_adr, target_size);
4939 
4940     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
4941     /* write back the urb handle */
4942     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4943     if (!argptr) {
4944         g_free(lurb);
4945         return -TARGET_EFAULT;
4946     }
4947 
4948     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
4949     target_urb_adr = lurb->target_urb_adr;
4950     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
4951     unlock_user(argptr, arg, target_size);
4952 
4953     g_free(lurb);
4954     return ret;
4955 }
4956 
4957 static abi_long
4958 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
4959                              uint8_t *buf_temp __attribute__((unused)),
4960                              int fd, int cmd, abi_long arg)
4961 {
4962     struct live_urb *lurb;
4963 
4964     /* map target address back to host URB with metadata. */
4965     lurb = urb_hashtable_lookup(arg);
4966     if (!lurb) {
4967         return -TARGET_EFAULT;
4968     }
4969     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
4970 }
4971 
4972 static abi_long
4973 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
4974                             int fd, int cmd, abi_long arg)
4975 {
4976     const argtype *arg_type = ie->arg_type;
4977     int target_size;
4978     abi_long ret;
4979     void *argptr;
4980     int rw_dir;
4981     struct live_urb *lurb;
4982 
4983     /*
4984      * each submitted URB needs to map to a unique ID for the
4985      * kernel, and that unique ID needs to be a pointer to
4986      * host memory.  hence, we need to malloc for each URB.
4987      * isochronous transfers have a variable length struct.
4988      */
4989     arg_type++;
4990     target_size = thunk_type_size(arg_type, THUNK_TARGET);
4991 
4992     /* construct host copy of urb and metadata */
4993     lurb = g_try_new0(struct live_urb, 1);
4994     if (!lurb) {
4995         return -TARGET_ENOMEM;
4996     }
4997 
4998     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4999     if (!argptr) {
5000         g_free(lurb);
5001         return -TARGET_EFAULT;
5002     }
5003     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5004     unlock_user(argptr, arg, 0);
5005 
5006     lurb->target_urb_adr = arg;
5007     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5008 
5009     /* buffer space used depends on endpoint type so lock the entire buffer */
5010     /* control type urbs should check the buffer contents for true direction */
5011     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5012     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5013         lurb->host_urb.buffer_length, 1);
5014     if (lurb->target_buf_ptr == NULL) {
5015         g_free(lurb);
5016         return -TARGET_EFAULT;
5017     }
5018 
5019     /* update buffer pointer in host copy */
5020     lurb->host_urb.buffer = lurb->target_buf_ptr;
5021 
5022     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5023     if (is_error(ret)) {
5024         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5025         g_free(lurb);
5026     } else {
5027         urb_hashtable_insert(lurb);
5028     }
5029 
5030     return ret;
5031 }
5032 #endif /* CONFIG_USBFS */
5033 
5034 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5035                             int cmd, abi_long arg)
5036 {
5037     void *argptr;
5038     struct dm_ioctl *host_dm;
5039     abi_long guest_data;
5040     uint32_t guest_data_size;
5041     int target_size;
5042     const argtype *arg_type = ie->arg_type;
5043     abi_long ret;
5044     void *big_buf = NULL;
5045     char *host_data;
5046 
5047     arg_type++;
5048     target_size = thunk_type_size(arg_type, 0);
5049     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5050     if (!argptr) {
5051         ret = -TARGET_EFAULT;
5052         goto out;
5053     }
5054     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5055     unlock_user(argptr, arg, 0);
5056 
5057     /* buf_temp is too small, so fetch things into a bigger buffer */
5058     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5059     memcpy(big_buf, buf_temp, target_size);
5060     buf_temp = big_buf;
5061     host_dm = big_buf;
5062 
5063     guest_data = arg + host_dm->data_start;
5064     if ((guest_data - arg) < 0) {
5065         ret = -TARGET_EINVAL;
5066         goto out;
5067     }
5068     guest_data_size = host_dm->data_size - host_dm->data_start;
5069     host_data = (char*)host_dm + host_dm->data_start;
5070 
5071     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5072     if (!argptr) {
5073         ret = -TARGET_EFAULT;
5074         goto out;
5075     }
5076 
5077     switch (ie->host_cmd) {
5078     case DM_REMOVE_ALL:
5079     case DM_LIST_DEVICES:
5080     case DM_DEV_CREATE:
5081     case DM_DEV_REMOVE:
5082     case DM_DEV_SUSPEND:
5083     case DM_DEV_STATUS:
5084     case DM_DEV_WAIT:
5085     case DM_TABLE_STATUS:
5086     case DM_TABLE_CLEAR:
5087     case DM_TABLE_DEPS:
5088     case DM_LIST_VERSIONS:
5089         /* no input data */
5090         break;
5091     case DM_DEV_RENAME:
5092     case DM_DEV_SET_GEOMETRY:
5093         /* data contains only strings */
5094         memcpy(host_data, argptr, guest_data_size);
5095         break;
5096     case DM_TARGET_MSG:
5097         memcpy(host_data, argptr, guest_data_size);
5098         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5099         break;
5100     case DM_TABLE_LOAD:
5101     {
5102         void *gspec = argptr;
5103         void *cur_data = host_data;
5104         const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5105         int spec_size = thunk_type_size(dm_arg_type, 0);
5106         int i;
5107 
5108         for (i = 0; i < host_dm->target_count; i++) {
5109             struct dm_target_spec *spec = cur_data;
5110             uint32_t next;
5111             int slen;
5112 
5113             thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
5114             slen = strlen((char*)gspec + spec_size) + 1;
5115             next = spec->next;
5116             spec->next = sizeof(*spec) + slen;
5117             strcpy((char*)&spec[1], gspec + spec_size);
5118             gspec += next;
5119             cur_data += spec->next;
5120         }
5121         break;
5122     }
5123     default:
5124         ret = -TARGET_EINVAL;
5125         unlock_user(argptr, guest_data, 0);
5126         goto out;
5127     }
5128     unlock_user(argptr, guest_data, 0);
5129 
5130     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5131     if (!is_error(ret)) {
5132         guest_data = arg + host_dm->data_start;
5133         guest_data_size = host_dm->data_size - host_dm->data_start;
5134         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5135         switch (ie->host_cmd) {
5136         case DM_REMOVE_ALL:
5137         case DM_DEV_CREATE:
5138         case DM_DEV_REMOVE:
5139         case DM_DEV_RENAME:
5140         case DM_DEV_SUSPEND:
5141         case DM_DEV_STATUS:
5142         case DM_TABLE_LOAD:
5143         case DM_TABLE_CLEAR:
5144         case DM_TARGET_MSG:
5145         case DM_DEV_SET_GEOMETRY:
5146             /* no return data */
5147             break;
5148         case DM_LIST_DEVICES:
5149         {
5150             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5151             uint32_t remaining_data = guest_data_size;
5152             void *cur_data = argptr;
5153             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5154             int nl_size = 12; /* can't use thunk_size due to alignment */
5155 
5156             while (1) {
5157                 uint32_t next = nl->next;
5158                 if (next) {
5159                     nl->next = nl_size + (strlen(nl->name) + 1);
5160                 }
5161                 if (remaining_data < nl->next) {
5162                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5163                     break;
5164                 }
5165                 thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
5166                 strcpy(cur_data + nl_size, nl->name);
5167                 cur_data += nl->next;
5168                 remaining_data -= nl->next;
5169                 if (!next) {
5170                     break;
5171                 }
5172                 nl = (void*)nl + next;
5173             }
5174             break;
5175         }
5176         case DM_DEV_WAIT:
5177         case DM_TABLE_STATUS:
5178         {
5179             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5180             void *cur_data = argptr;
5181             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5182             int spec_size = thunk_type_size(dm_arg_type, 0);
5183             int i;
5184 
5185             for (i = 0; i < host_dm->target_count; i++) {
5186                 uint32_t next = spec->next;
5187                 int slen = strlen((char*)&spec[1]) + 1;
5188                 spec->next = (cur_data - argptr) + spec_size + slen;
5189                 if (guest_data_size < spec->next) {
5190                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5191                     break;
5192                 }
5193                 thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
5194                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5195                 cur_data = argptr + spec->next;
5196                 spec = (void*)host_dm + host_dm->data_start + next;
5197             }
5198             break;
5199         }
5200         case DM_TABLE_DEPS:
5201         {
5202             void *hdata = (void*)host_dm + host_dm->data_start;
5203             int count = *(uint32_t*)hdata;
5204             uint64_t *hdev = hdata + 8;
5205             uint64_t *gdev = argptr + 8;
5206             int i;
5207 
5208             *(uint32_t*)argptr = tswap32(count);
5209             for (i = 0; i < count; i++) {
5210                 *gdev = tswap64(*hdev);
5211                 gdev++;
5212                 hdev++;
5213             }
5214             break;
5215         }
5216         case DM_LIST_VERSIONS:
5217         {
5218             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5219             uint32_t remaining_data = guest_data_size;
5220             void *cur_data = argptr;
5221             const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5222             int vers_size = thunk_type_size(dm_arg_type, 0);
5223 
5224             while (1) {
5225                 uint32_t next = vers->next;
5226                 if (next) {
5227                     vers->next = vers_size + (strlen(vers->name) + 1);
5228                 }
5229                 if (remaining_data < vers->next) {
5230                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5231                     break;
5232                 }
5233                 thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
5234                 strcpy(cur_data + vers_size, vers->name);
5235                 cur_data += vers->next;
5236                 remaining_data -= vers->next;
5237                 if (!next) {
5238                     break;
5239                 }
5240                 vers = (void*)vers + next;
5241             }
5242             break;
5243         }
5244         default:
5245             unlock_user(argptr, guest_data, 0);
5246             ret = -TARGET_EINVAL;
5247             goto out;
5248         }
5249         unlock_user(argptr, guest_data, guest_data_size);
5250 
5251         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5252         if (!argptr) {
5253             ret = -TARGET_EFAULT;
5254             goto out;
5255         }
5256         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5257         unlock_user(argptr, arg, target_size);
5258     }
5259 out:
5260     g_free(big_buf);
5261     return ret;
5262 }
5263 
5264 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5265                                int cmd, abi_long arg)
5266 {
5267     void *argptr;
5268     int target_size;
5269     const argtype *arg_type = ie->arg_type;
5270     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5271     abi_long ret;
5272 
5273     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5274     struct blkpg_partition host_part;
5275 
5276     /* Read and convert blkpg */
5277     arg_type++;
5278     target_size = thunk_type_size(arg_type, 0);
5279     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5280     if (!argptr) {
5281         ret = -TARGET_EFAULT;
5282         goto out;
5283     }
5284     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5285     unlock_user(argptr, arg, 0);
5286 
5287     switch (host_blkpg->op) {
5288     case BLKPG_ADD_PARTITION:
5289     case BLKPG_DEL_PARTITION:
5290         /* payload is struct blkpg_partition */
5291         break;
5292     default:
5293         /* Unknown opcode */
5294         ret = -TARGET_EINVAL;
5295         goto out;
5296     }
5297 
5298     /* Read and convert blkpg->data */
5299     arg = (abi_long)(uintptr_t)host_blkpg->data;
5300     target_size = thunk_type_size(part_arg_type, 0);
5301     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5302     if (!argptr) {
5303         ret = -TARGET_EFAULT;
5304         goto out;
5305     }
5306     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5307     unlock_user(argptr, arg, 0);
5308 
5309     /* Swizzle the data pointer to our local copy and call! */
5310     host_blkpg->data = &host_part;
5311     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5312 
5313 out:
5314     return ret;
5315 }
5316 
5317 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5318                                 int fd, int cmd, abi_long arg)
5319 {
5320     const argtype *arg_type = ie->arg_type;
5321     const StructEntry *se;
5322     const argtype *field_types;
5323     const int *dst_offsets, *src_offsets;
5324     int target_size;
5325     void *argptr;
5326     abi_ulong *target_rt_dev_ptr = NULL;
5327     unsigned long *host_rt_dev_ptr = NULL;
5328     abi_long ret;
5329     int i;
5330 
5331     assert(ie->access == IOC_W);
5332     assert(*arg_type == TYPE_PTR);
5333     arg_type++;
5334     assert(*arg_type == TYPE_STRUCT);
5335     target_size = thunk_type_size(arg_type, 0);
5336     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5337     if (!argptr) {
5338         return -TARGET_EFAULT;
5339     }
5340     arg_type++;
5341     assert(*arg_type == (int)STRUCT_rtentry);
5342     se = struct_entries + *arg_type++;
5343     assert(se->convert[0] == NULL);
5344     /* convert struct here to be able to catch rt_dev string */
5345     field_types = se->field_types;
5346     dst_offsets = se->field_offsets[THUNK_HOST];
5347     src_offsets = se->field_offsets[THUNK_TARGET];
5348     for (i = 0; i < se->nb_fields; i++) {
5349         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5350             assert(*field_types == TYPE_PTRVOID);
5351             target_rt_dev_ptr = argptr + src_offsets[i];
5352             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5353             if (*target_rt_dev_ptr != 0) {
5354                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5355                                                   tswapal(*target_rt_dev_ptr));
5356                 if (!*host_rt_dev_ptr) {
5357                     unlock_user(argptr, arg, 0);
5358                     return -TARGET_EFAULT;
5359                 }
5360             } else {
5361                 *host_rt_dev_ptr = 0;
5362             }
5363             field_types++;
5364             continue;
5365         }
5366         field_types = thunk_convert(buf_temp + dst_offsets[i],
5367                                     argptr + src_offsets[i],
5368                                     field_types, THUNK_HOST);
5369     }
5370     unlock_user(argptr, arg, 0);
5371 
5372     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5373 
5374     assert(host_rt_dev_ptr != NULL);
5375     assert(target_rt_dev_ptr != NULL);
5376     if (*host_rt_dev_ptr != 0) {
5377         unlock_user((void *)*host_rt_dev_ptr,
5378                     *target_rt_dev_ptr, 0);
5379     }
5380     return ret;
5381 }
5382 
5383 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5384                                      int fd, int cmd, abi_long arg)
5385 {
5386     int sig = target_to_host_signal(arg);
5387     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5388 }
5389 
5390 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5391                                     int fd, int cmd, abi_long arg)
5392 {
5393     struct timeval tv;
5394     abi_long ret;
5395 
5396     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5397     if (is_error(ret)) {
5398         return ret;
5399     }
5400 
5401     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5402         if (copy_to_user_timeval(arg, &tv)) {
5403             return -TARGET_EFAULT;
5404         }
5405     } else {
5406         if (copy_to_user_timeval64(arg, &tv)) {
5407             return -TARGET_EFAULT;
5408         }
5409     }
5410 
5411     return ret;
5412 }
5413 
5414 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5415                                       int fd, int cmd, abi_long arg)
5416 {
5417     struct timespec ts;
5418     abi_long ret;
5419 
5420     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5421     if (is_error(ret)) {
5422         return ret;
5423     }
5424 
5425     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5426         if (host_to_target_timespec(arg, &ts)) {
5427             return -TARGET_EFAULT;
5428         }
5429     } else{
5430         if (host_to_target_timespec64(arg, &ts)) {
5431             return -TARGET_EFAULT;
5432         }
5433     }
5434 
5435     return ret;
5436 }
5437 
5438 #ifdef TIOCGPTPEER
5439 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5440                                      int fd, int cmd, abi_long arg)
5441 {
5442     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5443     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5444 }
5445 #endif
5446 
5447 #ifdef HAVE_DRM_H
5448 
5449 static void unlock_drm_version(struct drm_version *host_ver,
5450                                struct target_drm_version *target_ver,
5451                                bool copy)
5452 {
5453     unlock_user(host_ver->name, target_ver->name,
5454                                 copy ? host_ver->name_len : 0);
5455     unlock_user(host_ver->date, target_ver->date,
5456                                 copy ? host_ver->date_len : 0);
5457     unlock_user(host_ver->desc, target_ver->desc,
5458                                 copy ? host_ver->desc_len : 0);
5459 }
5460 
5461 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5462                                           struct target_drm_version *target_ver)
5463 {
5464     memset(host_ver, 0, sizeof(*host_ver));
5465 
5466     __get_user(host_ver->name_len, &target_ver->name_len);
5467     if (host_ver->name_len) {
5468         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5469                                    target_ver->name_len, 0);
5470         if (!host_ver->name) {
5471             return -EFAULT;
5472         }
5473     }
5474 
5475     __get_user(host_ver->date_len, &target_ver->date_len);
5476     if (host_ver->date_len) {
5477         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5478                                    target_ver->date_len, 0);
5479         if (!host_ver->date) {
5480             goto err;
5481         }
5482     }
5483 
5484     __get_user(host_ver->desc_len, &target_ver->desc_len);
5485     if (host_ver->desc_len) {
5486         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5487                                    target_ver->desc_len, 0);
5488         if (!host_ver->desc) {
5489             goto err;
5490         }
5491     }
5492 
5493     return 0;
5494 err:
5495     unlock_drm_version(host_ver, target_ver, false);
5496     return -EFAULT;
5497 }
5498 
5499 static inline void host_to_target_drmversion(
5500                                           struct target_drm_version *target_ver,
5501                                           struct drm_version *host_ver)
5502 {
5503     __put_user(host_ver->version_major, &target_ver->version_major);
5504     __put_user(host_ver->version_minor, &target_ver->version_minor);
5505     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5506     __put_user(host_ver->name_len, &target_ver->name_len);
5507     __put_user(host_ver->date_len, &target_ver->date_len);
5508     __put_user(host_ver->desc_len, &target_ver->desc_len);
5509     unlock_drm_version(host_ver, target_ver, true);
5510 }
5511 
5512 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5513                              int fd, int cmd, abi_long arg)
5514 {
5515     struct drm_version *ver;
5516     struct target_drm_version *target_ver;
5517     abi_long ret;
5518 
5519     switch (ie->host_cmd) {
5520     case DRM_IOCTL_VERSION:
5521         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5522             return -TARGET_EFAULT;
5523         }
5524         ver = (struct drm_version *)buf_temp;
5525         ret = target_to_host_drmversion(ver, target_ver);
5526         if (!is_error(ret)) {
5527             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5528             if (is_error(ret)) {
5529                 unlock_drm_version(ver, target_ver, false);
5530             } else {
5531                 host_to_target_drmversion(target_ver, ver);
5532             }
5533         }
5534         unlock_user_struct(target_ver, arg, 0);
5535         return ret;
5536     }
5537     return -TARGET_ENOSYS;
5538 }
5539 
5540 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5541                                            struct drm_i915_getparam *gparam,
5542                                            int fd, abi_long arg)
5543 {
5544     abi_long ret;
5545     int value;
5546     struct target_drm_i915_getparam *target_gparam;
5547 
5548     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5549         return -TARGET_EFAULT;
5550     }
5551 
5552     __get_user(gparam->param, &target_gparam->param);
5553     gparam->value = &value;
5554     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5555     put_user_s32(value, target_gparam->value);
5556 
5557     unlock_user_struct(target_gparam, arg, 0);
5558     return ret;
5559 }
5560 
5561 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5562                                   int fd, int cmd, abi_long arg)
5563 {
5564     switch (ie->host_cmd) {
5565     case DRM_IOCTL_I915_GETPARAM:
5566         return do_ioctl_drm_i915_getparam(ie,
5567                                           (struct drm_i915_getparam *)buf_temp,
5568                                           fd, arg);
5569     default:
5570         return -TARGET_ENOSYS;
5571     }
5572 }
5573 
5574 #endif
5575 
5576 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5577                                         int fd, int cmd, abi_long arg)
5578 {
5579     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5580     struct tun_filter *target_filter;
5581     char *target_addr;
5582 
5583     assert(ie->access == IOC_W);
5584 
5585     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5586     if (!target_filter) {
5587         return -TARGET_EFAULT;
5588     }
5589     filter->flags = tswap16(target_filter->flags);
5590     filter->count = tswap16(target_filter->count);
5591     unlock_user(target_filter, arg, 0);
5592 
5593     if (filter->count) {
5594         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5595             MAX_STRUCT_SIZE) {
5596             return -TARGET_EFAULT;
5597         }
5598 
5599         target_addr = lock_user(VERIFY_READ,
5600                                 arg + offsetof(struct tun_filter, addr),
5601                                 filter->count * ETH_ALEN, 1);
5602         if (!target_addr) {
5603             return -TARGET_EFAULT;
5604         }
5605         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5606         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5607     }
5608 
5609     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5610 }
5611 
5612 IOCTLEntry ioctl_entries[] = {
5613 #define IOCTL(cmd, access, ...) \
5614     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5615 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5616     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5617 #define IOCTL_IGNORE(cmd) \
5618     { TARGET_ ## cmd, 0, #cmd },
5619 #include "ioctls.h"
5620     { 0, 0, },
5621 };
5622 
5623 /* ??? Implement proper locking for ioctls.  */
5624 /* do_ioctl() Must return target values and target errnos. */
5625 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5626 {
5627     const IOCTLEntry *ie;
5628     const argtype *arg_type;
5629     abi_long ret;
5630     uint8_t buf_temp[MAX_STRUCT_SIZE];
5631     int target_size;
5632     void *argptr;
5633 
5634     ie = ioctl_entries;
5635     for(;;) {
5636         if (ie->target_cmd == 0) {
5637             qemu_log_mask(
5638                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5639             return -TARGET_ENOTTY;
5640         }
5641         if (ie->target_cmd == cmd)
5642             break;
5643         ie++;
5644     }
5645     arg_type = ie->arg_type;
5646     if (ie->do_ioctl) {
5647         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5648     } else if (!ie->host_cmd) {
5649         /* Some architectures define BSD ioctls in their headers
5650            that are not implemented in Linux.  */
5651         return -TARGET_ENOTTY;
5652     }
5653 
5654     switch(arg_type[0]) {
5655     case TYPE_NULL:
5656         /* no argument */
5657         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5658         break;
5659     case TYPE_PTRVOID:
5660     case TYPE_INT:
5661     case TYPE_LONG:
5662     case TYPE_ULONG:
5663         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5664         break;
5665     case TYPE_PTR:
5666         arg_type++;
5667         target_size = thunk_type_size(arg_type, 0);
5668         switch(ie->access) {
5669         case IOC_R:
5670             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5671             if (!is_error(ret)) {
5672                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5673                 if (!argptr)
5674                     return -TARGET_EFAULT;
5675                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5676                 unlock_user(argptr, arg, target_size);
5677             }
5678             break;
5679         case IOC_W:
5680             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5681             if (!argptr)
5682                 return -TARGET_EFAULT;
5683             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5684             unlock_user(argptr, arg, 0);
5685             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5686             break;
5687         default:
5688         case IOC_RW:
5689             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5690             if (!argptr)
5691                 return -TARGET_EFAULT;
5692             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5693             unlock_user(argptr, arg, 0);
5694             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5695             if (!is_error(ret)) {
5696                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5697                 if (!argptr)
5698                     return -TARGET_EFAULT;
5699                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5700                 unlock_user(argptr, arg, target_size);
5701             }
5702             break;
5703         }
5704         break;
5705     default:
5706         qemu_log_mask(LOG_UNIMP,
5707                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5708                       (long)cmd, arg_type[0]);
5709         ret = -TARGET_ENOTTY;
5710         break;
5711     }
5712     return ret;
5713 }
5714 
5715 static const bitmask_transtbl iflag_tbl[] = {
5716         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5717         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5718         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5719         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5720         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5721         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5722         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5723         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5724         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5725         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5726         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5727         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5728         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5729         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5730         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5731 };
5732 
5733 static const bitmask_transtbl oflag_tbl[] = {
5734 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5735 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5736 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5737 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5738 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5739 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5740 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5741 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5742 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5743 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5744 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5745 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5746 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5747 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5748 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5749 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5750 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5751 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5752 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5753 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5754 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5755 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5756 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5757 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5758 };
5759 
5760 static const bitmask_transtbl cflag_tbl[] = {
5761 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5762 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5763 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5764 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5765 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5766 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5767 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5768 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5769 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5770 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5771 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5772 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5773 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5774 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5775 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5776 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5777 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5778 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5779 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5780 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5781 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5782 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5783 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5784 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5785 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5786 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5787 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5788 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5789 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5790 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5791 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5792 };
5793 
5794 static const bitmask_transtbl lflag_tbl[] = {
5795   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5796   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5797   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5798   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5799   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5800   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5801   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5802   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5803   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5804   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5805   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5806   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5807   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5808   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5809   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5810   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5811 };
5812 
5813 static void target_to_host_termios (void *dst, const void *src)
5814 {
5815     struct host_termios *host = dst;
5816     const struct target_termios *target = src;
5817 
5818     host->c_iflag =
5819         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5820     host->c_oflag =
5821         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5822     host->c_cflag =
5823         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5824     host->c_lflag =
5825         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5826     host->c_line = target->c_line;
5827 
5828     memset(host->c_cc, 0, sizeof(host->c_cc));
5829     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5830     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5831     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5832     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5833     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5834     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5835     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5836     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5837     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5838     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5839     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5840     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5841     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5842     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5843     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5844     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5845     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5846 }
5847 
5848 static void host_to_target_termios (void *dst, const void *src)
5849 {
5850     struct target_termios *target = dst;
5851     const struct host_termios *host = src;
5852 
5853     target->c_iflag =
5854         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5855     target->c_oflag =
5856         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5857     target->c_cflag =
5858         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5859     target->c_lflag =
5860         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5861     target->c_line = host->c_line;
5862 
5863     memset(target->c_cc, 0, sizeof(target->c_cc));
5864     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5865     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5866     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5867     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5868     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5869     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5870     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5871     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5872     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5873     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5874     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5875     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5876     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5877     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5878     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5879     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5880     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5881 }
5882 
5883 static const StructEntry struct_termios_def = {
5884     .convert = { host_to_target_termios, target_to_host_termios },
5885     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5886     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5887     .print = print_termios,
5888 };
5889 
5890 /* If the host does not provide these bits, they may be safely discarded. */
5891 #ifndef MAP_SYNC
5892 #define MAP_SYNC 0
5893 #endif
5894 #ifndef MAP_UNINITIALIZED
5895 #define MAP_UNINITIALIZED 0
5896 #endif
5897 
5898 static const bitmask_transtbl mmap_flags_tbl[] = {
5899     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5900     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5901       MAP_ANONYMOUS, MAP_ANONYMOUS },
5902     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5903       MAP_GROWSDOWN, MAP_GROWSDOWN },
5904     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5905       MAP_DENYWRITE, MAP_DENYWRITE },
5906     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5907       MAP_EXECUTABLE, MAP_EXECUTABLE },
5908     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5909     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5910       MAP_NORESERVE, MAP_NORESERVE },
5911     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
5912     /* MAP_STACK had been ignored by the kernel for quite some time.
5913        Recognize it for the target insofar as we do not want to pass
5914        it through to the host.  */
5915     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
5916     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
5917     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
5918     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
5919       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
5920     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
5921       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
5922 };
5923 
5924 /*
5925  * Arrange for legacy / undefined architecture specific flags to be
5926  * ignored by mmap handling code.
5927  */
5928 #ifndef TARGET_MAP_32BIT
5929 #define TARGET_MAP_32BIT 0
5930 #endif
5931 #ifndef TARGET_MAP_HUGE_2MB
5932 #define TARGET_MAP_HUGE_2MB 0
5933 #endif
5934 #ifndef TARGET_MAP_HUGE_1GB
5935 #define TARGET_MAP_HUGE_1GB 0
5936 #endif
5937 
5938 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
5939                         int target_flags, int fd, off_t offset)
5940 {
5941     /*
5942      * The historical set of flags that all mmap types implicitly support.
5943      */
5944     enum {
5945         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
5946                                | TARGET_MAP_PRIVATE
5947                                | TARGET_MAP_FIXED
5948                                | TARGET_MAP_ANONYMOUS
5949                                | TARGET_MAP_DENYWRITE
5950                                | TARGET_MAP_EXECUTABLE
5951                                | TARGET_MAP_UNINITIALIZED
5952                                | TARGET_MAP_GROWSDOWN
5953                                | TARGET_MAP_LOCKED
5954                                | TARGET_MAP_NORESERVE
5955                                | TARGET_MAP_POPULATE
5956                                | TARGET_MAP_NONBLOCK
5957                                | TARGET_MAP_STACK
5958                                | TARGET_MAP_HUGETLB
5959                                | TARGET_MAP_32BIT
5960                                | TARGET_MAP_HUGE_2MB
5961                                | TARGET_MAP_HUGE_1GB
5962     };
5963     int host_flags;
5964 
5965     switch (target_flags & TARGET_MAP_TYPE) {
5966     case TARGET_MAP_PRIVATE:
5967         host_flags = MAP_PRIVATE;
5968         break;
5969     case TARGET_MAP_SHARED:
5970         host_flags = MAP_SHARED;
5971         break;
5972     case TARGET_MAP_SHARED_VALIDATE:
5973         /*
5974          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
5975          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
5976          */
5977         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
5978             return -TARGET_EOPNOTSUPP;
5979         }
5980         host_flags = MAP_SHARED_VALIDATE;
5981         if (target_flags & TARGET_MAP_SYNC) {
5982             host_flags |= MAP_SYNC;
5983         }
5984         break;
5985     default:
5986         return -TARGET_EINVAL;
5987     }
5988     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
5989 
5990     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
5991 }
5992 
5993 /*
5994  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
5995  *       TARGET_I386 is defined if TARGET_X86_64 is defined
5996  */
5997 #if defined(TARGET_I386)
5998 
5999 /* NOTE: there is really one LDT for all the threads */
6000 static uint8_t *ldt_table;
6001 
6002 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6003 {
6004     int size;
6005     void *p;
6006 
6007     if (!ldt_table)
6008         return 0;
6009     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6010     if (size > bytecount)
6011         size = bytecount;
6012     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6013     if (!p)
6014         return -TARGET_EFAULT;
6015     /* ??? Should this by byteswapped?  */
6016     memcpy(p, ldt_table, size);
6017     unlock_user(p, ptr, size);
6018     return size;
6019 }
6020 
6021 /* XXX: add locking support */
6022 static abi_long write_ldt(CPUX86State *env,
6023                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6024 {
6025     struct target_modify_ldt_ldt_s ldt_info;
6026     struct target_modify_ldt_ldt_s *target_ldt_info;
6027     int seg_32bit, contents, read_exec_only, limit_in_pages;
6028     int seg_not_present, useable, lm;
6029     uint32_t *lp, entry_1, entry_2;
6030 
6031     if (bytecount != sizeof(ldt_info))
6032         return -TARGET_EINVAL;
6033     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6034         return -TARGET_EFAULT;
6035     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6036     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6037     ldt_info.limit = tswap32(target_ldt_info->limit);
6038     ldt_info.flags = tswap32(target_ldt_info->flags);
6039     unlock_user_struct(target_ldt_info, ptr, 0);
6040 
6041     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6042         return -TARGET_EINVAL;
6043     seg_32bit = ldt_info.flags & 1;
6044     contents = (ldt_info.flags >> 1) & 3;
6045     read_exec_only = (ldt_info.flags >> 3) & 1;
6046     limit_in_pages = (ldt_info.flags >> 4) & 1;
6047     seg_not_present = (ldt_info.flags >> 5) & 1;
6048     useable = (ldt_info.flags >> 6) & 1;
6049 #ifdef TARGET_ABI32
6050     lm = 0;
6051 #else
6052     lm = (ldt_info.flags >> 7) & 1;
6053 #endif
6054     if (contents == 3) {
6055         if (oldmode)
6056             return -TARGET_EINVAL;
6057         if (seg_not_present == 0)
6058             return -TARGET_EINVAL;
6059     }
6060     /* allocate the LDT */
6061     if (!ldt_table) {
6062         env->ldt.base = target_mmap(0,
6063                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6064                                     PROT_READ|PROT_WRITE,
6065                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6066         if (env->ldt.base == -1)
6067             return -TARGET_ENOMEM;
6068         memset(g2h_untagged(env->ldt.base), 0,
6069                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6070         env->ldt.limit = 0xffff;
6071         ldt_table = g2h_untagged(env->ldt.base);
6072     }
6073 
6074     /* NOTE: same code as Linux kernel */
6075     /* Allow LDTs to be cleared by the user. */
6076     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6077         if (oldmode ||
6078             (contents == 0		&&
6079              read_exec_only == 1	&&
6080              seg_32bit == 0		&&
6081              limit_in_pages == 0	&&
6082              seg_not_present == 1	&&
6083              useable == 0 )) {
6084             entry_1 = 0;
6085             entry_2 = 0;
6086             goto install;
6087         }
6088     }
6089 
6090     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6091         (ldt_info.limit & 0x0ffff);
6092     entry_2 = (ldt_info.base_addr & 0xff000000) |
6093         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6094         (ldt_info.limit & 0xf0000) |
6095         ((read_exec_only ^ 1) << 9) |
6096         (contents << 10) |
6097         ((seg_not_present ^ 1) << 15) |
6098         (seg_32bit << 22) |
6099         (limit_in_pages << 23) |
6100         (lm << 21) |
6101         0x7000;
6102     if (!oldmode)
6103         entry_2 |= (useable << 20);
6104 
6105     /* Install the new entry ...  */
6106 install:
6107     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6108     lp[0] = tswap32(entry_1);
6109     lp[1] = tswap32(entry_2);
6110     return 0;
6111 }
6112 
6113 /* specific and weird i386 syscalls */
6114 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6115                               unsigned long bytecount)
6116 {
6117     abi_long ret;
6118 
6119     switch (func) {
6120     case 0:
6121         ret = read_ldt(ptr, bytecount);
6122         break;
6123     case 1:
6124         ret = write_ldt(env, ptr, bytecount, 1);
6125         break;
6126     case 0x11:
6127         ret = write_ldt(env, ptr, bytecount, 0);
6128         break;
6129     default:
6130         ret = -TARGET_ENOSYS;
6131         break;
6132     }
6133     return ret;
6134 }
6135 
6136 #if defined(TARGET_ABI32)
6137 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6138 {
6139     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6140     struct target_modify_ldt_ldt_s ldt_info;
6141     struct target_modify_ldt_ldt_s *target_ldt_info;
6142     int seg_32bit, contents, read_exec_only, limit_in_pages;
6143     int seg_not_present, useable, lm;
6144     uint32_t *lp, entry_1, entry_2;
6145     int i;
6146 
6147     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6148     if (!target_ldt_info)
6149         return -TARGET_EFAULT;
6150     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6151     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6152     ldt_info.limit = tswap32(target_ldt_info->limit);
6153     ldt_info.flags = tswap32(target_ldt_info->flags);
6154     if (ldt_info.entry_number == -1) {
6155         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6156             if (gdt_table[i] == 0) {
6157                 ldt_info.entry_number = i;
6158                 target_ldt_info->entry_number = tswap32(i);
6159                 break;
6160             }
6161         }
6162     }
6163     unlock_user_struct(target_ldt_info, ptr, 1);
6164 
6165     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6166         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6167            return -TARGET_EINVAL;
6168     seg_32bit = ldt_info.flags & 1;
6169     contents = (ldt_info.flags >> 1) & 3;
6170     read_exec_only = (ldt_info.flags >> 3) & 1;
6171     limit_in_pages = (ldt_info.flags >> 4) & 1;
6172     seg_not_present = (ldt_info.flags >> 5) & 1;
6173     useable = (ldt_info.flags >> 6) & 1;
6174 #ifdef TARGET_ABI32
6175     lm = 0;
6176 #else
6177     lm = (ldt_info.flags >> 7) & 1;
6178 #endif
6179 
6180     if (contents == 3) {
6181         if (seg_not_present == 0)
6182             return -TARGET_EINVAL;
6183     }
6184 
6185     /* NOTE: same code as Linux kernel */
6186     /* Allow LDTs to be cleared by the user. */
6187     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6188         if ((contents == 0             &&
6189              read_exec_only == 1       &&
6190              seg_32bit == 0            &&
6191              limit_in_pages == 0       &&
6192              seg_not_present == 1      &&
6193              useable == 0 )) {
6194             entry_1 = 0;
6195             entry_2 = 0;
6196             goto install;
6197         }
6198     }
6199 
6200     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6201         (ldt_info.limit & 0x0ffff);
6202     entry_2 = (ldt_info.base_addr & 0xff000000) |
6203         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6204         (ldt_info.limit & 0xf0000) |
6205         ((read_exec_only ^ 1) << 9) |
6206         (contents << 10) |
6207         ((seg_not_present ^ 1) << 15) |
6208         (seg_32bit << 22) |
6209         (limit_in_pages << 23) |
6210         (useable << 20) |
6211         (lm << 21) |
6212         0x7000;
6213 
6214     /* Install the new entry ...  */
6215 install:
6216     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6217     lp[0] = tswap32(entry_1);
6218     lp[1] = tswap32(entry_2);
6219     return 0;
6220 }
6221 
6222 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6223 {
6224     struct target_modify_ldt_ldt_s *target_ldt_info;
6225     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6226     uint32_t base_addr, limit, flags;
6227     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6228     int seg_not_present, useable, lm;
6229     uint32_t *lp, entry_1, entry_2;
6230 
6231     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6232     if (!target_ldt_info)
6233         return -TARGET_EFAULT;
6234     idx = tswap32(target_ldt_info->entry_number);
6235     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6236         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6237         unlock_user_struct(target_ldt_info, ptr, 1);
6238         return -TARGET_EINVAL;
6239     }
6240     lp = (uint32_t *)(gdt_table + idx);
6241     entry_1 = tswap32(lp[0]);
6242     entry_2 = tswap32(lp[1]);
6243 
6244     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6245     contents = (entry_2 >> 10) & 3;
6246     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6247     seg_32bit = (entry_2 >> 22) & 1;
6248     limit_in_pages = (entry_2 >> 23) & 1;
6249     useable = (entry_2 >> 20) & 1;
6250 #ifdef TARGET_ABI32
6251     lm = 0;
6252 #else
6253     lm = (entry_2 >> 21) & 1;
6254 #endif
6255     flags = (seg_32bit << 0) | (contents << 1) |
6256         (read_exec_only << 3) | (limit_in_pages << 4) |
6257         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6258     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6259     base_addr = (entry_1 >> 16) |
6260         (entry_2 & 0xff000000) |
6261         ((entry_2 & 0xff) << 16);
6262     target_ldt_info->base_addr = tswapal(base_addr);
6263     target_ldt_info->limit = tswap32(limit);
6264     target_ldt_info->flags = tswap32(flags);
6265     unlock_user_struct(target_ldt_info, ptr, 1);
6266     return 0;
6267 }
6268 
6269 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6270 {
6271     return -TARGET_ENOSYS;
6272 }
6273 #else
6274 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6275 {
6276     abi_long ret = 0;
6277     abi_ulong val;
6278     int idx;
6279 
6280     switch(code) {
6281     case TARGET_ARCH_SET_GS:
6282     case TARGET_ARCH_SET_FS:
6283         if (code == TARGET_ARCH_SET_GS)
6284             idx = R_GS;
6285         else
6286             idx = R_FS;
6287         cpu_x86_load_seg(env, idx, 0);
6288         env->segs[idx].base = addr;
6289         break;
6290     case TARGET_ARCH_GET_GS:
6291     case TARGET_ARCH_GET_FS:
6292         if (code == TARGET_ARCH_GET_GS)
6293             idx = R_GS;
6294         else
6295             idx = R_FS;
6296         val = env->segs[idx].base;
6297         if (put_user(val, addr, abi_ulong))
6298             ret = -TARGET_EFAULT;
6299         break;
6300     default:
6301         ret = -TARGET_EINVAL;
6302         break;
6303     }
6304     return ret;
6305 }
6306 #endif /* defined(TARGET_ABI32 */
6307 #endif /* defined(TARGET_I386) */
6308 
6309 /*
6310  * These constants are generic.  Supply any that are missing from the host.
6311  */
6312 #ifndef PR_SET_NAME
6313 # define PR_SET_NAME    15
6314 # define PR_GET_NAME    16
6315 #endif
6316 #ifndef PR_SET_FP_MODE
6317 # define PR_SET_FP_MODE 45
6318 # define PR_GET_FP_MODE 46
6319 # define PR_FP_MODE_FR   (1 << 0)
6320 # define PR_FP_MODE_FRE  (1 << 1)
6321 #endif
6322 #ifndef PR_SVE_SET_VL
6323 # define PR_SVE_SET_VL  50
6324 # define PR_SVE_GET_VL  51
6325 # define PR_SVE_VL_LEN_MASK  0xffff
6326 # define PR_SVE_VL_INHERIT   (1 << 17)
6327 #endif
6328 #ifndef PR_PAC_RESET_KEYS
6329 # define PR_PAC_RESET_KEYS  54
6330 # define PR_PAC_APIAKEY   (1 << 0)
6331 # define PR_PAC_APIBKEY   (1 << 1)
6332 # define PR_PAC_APDAKEY   (1 << 2)
6333 # define PR_PAC_APDBKEY   (1 << 3)
6334 # define PR_PAC_APGAKEY   (1 << 4)
6335 #endif
6336 #ifndef PR_SET_TAGGED_ADDR_CTRL
6337 # define PR_SET_TAGGED_ADDR_CTRL 55
6338 # define PR_GET_TAGGED_ADDR_CTRL 56
6339 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6340 #endif
6341 #ifndef PR_SET_IO_FLUSHER
6342 # define PR_SET_IO_FLUSHER 57
6343 # define PR_GET_IO_FLUSHER 58
6344 #endif
6345 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6346 # define PR_SET_SYSCALL_USER_DISPATCH 59
6347 #endif
6348 #ifndef PR_SME_SET_VL
6349 # define PR_SME_SET_VL  63
6350 # define PR_SME_GET_VL  64
6351 # define PR_SME_VL_LEN_MASK  0xffff
6352 # define PR_SME_VL_INHERIT   (1 << 17)
6353 #endif
6354 
6355 #include "target_prctl.h"
6356 
6357 static abi_long do_prctl_inval0(CPUArchState *env)
6358 {
6359     return -TARGET_EINVAL;
6360 }
6361 
6362 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6363 {
6364     return -TARGET_EINVAL;
6365 }
6366 
6367 #ifndef do_prctl_get_fp_mode
6368 #define do_prctl_get_fp_mode do_prctl_inval0
6369 #endif
6370 #ifndef do_prctl_set_fp_mode
6371 #define do_prctl_set_fp_mode do_prctl_inval1
6372 #endif
6373 #ifndef do_prctl_sve_get_vl
6374 #define do_prctl_sve_get_vl do_prctl_inval0
6375 #endif
6376 #ifndef do_prctl_sve_set_vl
6377 #define do_prctl_sve_set_vl do_prctl_inval1
6378 #endif
6379 #ifndef do_prctl_reset_keys
6380 #define do_prctl_reset_keys do_prctl_inval1
6381 #endif
6382 #ifndef do_prctl_set_tagged_addr_ctrl
6383 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6384 #endif
6385 #ifndef do_prctl_get_tagged_addr_ctrl
6386 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6387 #endif
6388 #ifndef do_prctl_get_unalign
6389 #define do_prctl_get_unalign do_prctl_inval1
6390 #endif
6391 #ifndef do_prctl_set_unalign
6392 #define do_prctl_set_unalign do_prctl_inval1
6393 #endif
6394 #ifndef do_prctl_sme_get_vl
6395 #define do_prctl_sme_get_vl do_prctl_inval0
6396 #endif
6397 #ifndef do_prctl_sme_set_vl
6398 #define do_prctl_sme_set_vl do_prctl_inval1
6399 #endif
6400 
6401 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6402                          abi_long arg3, abi_long arg4, abi_long arg5)
6403 {
6404     abi_long ret;
6405 
6406     switch (option) {
6407     case PR_GET_PDEATHSIG:
6408         {
6409             int deathsig;
6410             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6411                                   arg3, arg4, arg5));
6412             if (!is_error(ret) &&
6413                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6414                 return -TARGET_EFAULT;
6415             }
6416             return ret;
6417         }
6418     case PR_SET_PDEATHSIG:
6419         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6420                                arg3, arg4, arg5));
6421     case PR_GET_NAME:
6422         {
6423             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6424             if (!name) {
6425                 return -TARGET_EFAULT;
6426             }
6427             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6428                                   arg3, arg4, arg5));
6429             unlock_user(name, arg2, 16);
6430             return ret;
6431         }
6432     case PR_SET_NAME:
6433         {
6434             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6435             if (!name) {
6436                 return -TARGET_EFAULT;
6437             }
6438             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6439                                   arg3, arg4, arg5));
6440             unlock_user(name, arg2, 0);
6441             return ret;
6442         }
6443     case PR_GET_FP_MODE:
6444         return do_prctl_get_fp_mode(env);
6445     case PR_SET_FP_MODE:
6446         return do_prctl_set_fp_mode(env, arg2);
6447     case PR_SVE_GET_VL:
6448         return do_prctl_sve_get_vl(env);
6449     case PR_SVE_SET_VL:
6450         return do_prctl_sve_set_vl(env, arg2);
6451     case PR_SME_GET_VL:
6452         return do_prctl_sme_get_vl(env);
6453     case PR_SME_SET_VL:
6454         return do_prctl_sme_set_vl(env, arg2);
6455     case PR_PAC_RESET_KEYS:
6456         if (arg3 || arg4 || arg5) {
6457             return -TARGET_EINVAL;
6458         }
6459         return do_prctl_reset_keys(env, arg2);
6460     case PR_SET_TAGGED_ADDR_CTRL:
6461         if (arg3 || arg4 || arg5) {
6462             return -TARGET_EINVAL;
6463         }
6464         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6465     case PR_GET_TAGGED_ADDR_CTRL:
6466         if (arg2 || arg3 || arg4 || arg5) {
6467             return -TARGET_EINVAL;
6468         }
6469         return do_prctl_get_tagged_addr_ctrl(env);
6470 
6471     case PR_GET_UNALIGN:
6472         return do_prctl_get_unalign(env, arg2);
6473     case PR_SET_UNALIGN:
6474         return do_prctl_set_unalign(env, arg2);
6475 
6476     case PR_CAP_AMBIENT:
6477     case PR_CAPBSET_READ:
6478     case PR_CAPBSET_DROP:
6479     case PR_GET_DUMPABLE:
6480     case PR_SET_DUMPABLE:
6481     case PR_GET_KEEPCAPS:
6482     case PR_SET_KEEPCAPS:
6483     case PR_GET_SECUREBITS:
6484     case PR_SET_SECUREBITS:
6485     case PR_GET_TIMING:
6486     case PR_SET_TIMING:
6487     case PR_GET_TIMERSLACK:
6488     case PR_SET_TIMERSLACK:
6489     case PR_MCE_KILL:
6490     case PR_MCE_KILL_GET:
6491     case PR_GET_NO_NEW_PRIVS:
6492     case PR_SET_NO_NEW_PRIVS:
6493     case PR_GET_IO_FLUSHER:
6494     case PR_SET_IO_FLUSHER:
6495     case PR_SET_CHILD_SUBREAPER:
6496     case PR_GET_SPECULATION_CTRL:
6497     case PR_SET_SPECULATION_CTRL:
6498         /* Some prctl options have no pointer arguments and we can pass on. */
6499         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6500 
6501     case PR_GET_CHILD_SUBREAPER:
6502         {
6503             int val;
6504             ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
6505                                   arg3, arg4, arg5));
6506             if (!is_error(ret) && put_user_s32(val, arg2)) {
6507                 return -TARGET_EFAULT;
6508             }
6509             return ret;
6510         }
6511 
6512     case PR_GET_TID_ADDRESS:
6513         {
6514             TaskState *ts = get_task_state(env_cpu(env));
6515             return put_user_ual(ts->child_tidptr, arg2);
6516         }
6517 
6518     case PR_GET_FPEXC:
6519     case PR_SET_FPEXC:
6520         /* Was used for SPE on PowerPC. */
6521         return -TARGET_EINVAL;
6522 
6523     case PR_GET_ENDIAN:
6524     case PR_SET_ENDIAN:
6525     case PR_GET_FPEMU:
6526     case PR_SET_FPEMU:
6527     case PR_SET_MM:
6528     case PR_GET_SECCOMP:
6529     case PR_SET_SECCOMP:
6530     case PR_SET_SYSCALL_USER_DISPATCH:
6531     case PR_GET_THP_DISABLE:
6532     case PR_SET_THP_DISABLE:
6533     case PR_GET_TSC:
6534     case PR_SET_TSC:
6535         /* Disable to prevent the target disabling stuff we need. */
6536         return -TARGET_EINVAL;
6537 
6538     default:
6539         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6540                       option);
6541         return -TARGET_EINVAL;
6542     }
6543 }
6544 
6545 #define NEW_STACK_SIZE 0x40000
6546 
6547 
6548 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6549 typedef struct {
6550     CPUArchState *env;
6551     pthread_mutex_t mutex;
6552     pthread_cond_t cond;
6553     pthread_t thread;
6554     uint32_t tid;
6555     abi_ulong child_tidptr;
6556     abi_ulong parent_tidptr;
6557     sigset_t sigmask;
6558 } new_thread_info;
6559 
6560 static void *clone_func(void *arg)
6561 {
6562     new_thread_info *info = arg;
6563     CPUArchState *env;
6564     CPUState *cpu;
6565     TaskState *ts;
6566 
6567     rcu_register_thread();
6568     tcg_register_thread();
6569     env = info->env;
6570     cpu = env_cpu(env);
6571     thread_cpu = cpu;
6572     ts = get_task_state(cpu);
6573     info->tid = sys_gettid();
6574     task_settid(ts);
6575     if (info->child_tidptr)
6576         put_user_u32(info->tid, info->child_tidptr);
6577     if (info->parent_tidptr)
6578         put_user_u32(info->tid, info->parent_tidptr);
6579     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6580     /* Enable signals.  */
6581     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6582     /* Signal to the parent that we're ready.  */
6583     pthread_mutex_lock(&info->mutex);
6584     pthread_cond_broadcast(&info->cond);
6585     pthread_mutex_unlock(&info->mutex);
6586     /* Wait until the parent has finished initializing the tls state.  */
6587     pthread_mutex_lock(&clone_lock);
6588     pthread_mutex_unlock(&clone_lock);
6589     cpu_loop(env);
6590     /* never exits */
6591     return NULL;
6592 }
6593 
6594 /* do_fork() Must return host values and target errnos (unlike most
6595    do_*() functions). */
6596 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6597                    abi_ulong parent_tidptr, target_ulong newtls,
6598                    abi_ulong child_tidptr)
6599 {
6600     CPUState *cpu = env_cpu(env);
6601     int ret;
6602     TaskState *ts;
6603     CPUState *new_cpu;
6604     CPUArchState *new_env;
6605     sigset_t sigmask;
6606 
6607     flags &= ~CLONE_IGNORED_FLAGS;
6608 
6609     /* Emulate vfork() with fork() */
6610     if (flags & CLONE_VFORK)
6611         flags &= ~(CLONE_VFORK | CLONE_VM);
6612 
6613     if (flags & CLONE_VM) {
6614         TaskState *parent_ts = get_task_state(cpu);
6615         new_thread_info info;
6616         pthread_attr_t attr;
6617 
6618         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6619             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6620             return -TARGET_EINVAL;
6621         }
6622 
6623         ts = g_new0(TaskState, 1);
6624         init_task_state(ts);
6625 
6626         /* Grab a mutex so that thread setup appears atomic.  */
6627         pthread_mutex_lock(&clone_lock);
6628 
6629         /*
6630          * If this is our first additional thread, we need to ensure we
6631          * generate code for parallel execution and flush old translations.
6632          * Do this now so that the copy gets CF_PARALLEL too.
6633          */
6634         if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
6635             tcg_cflags_set(cpu, CF_PARALLEL);
6636             tb_flush(cpu);
6637         }
6638 
6639         /* we create a new CPU instance. */
6640         new_env = cpu_copy(env);
6641         /* Init regs that differ from the parent.  */
6642         cpu_clone_regs_child(new_env, newsp, flags);
6643         cpu_clone_regs_parent(env, flags);
6644         new_cpu = env_cpu(new_env);
6645         new_cpu->opaque = ts;
6646         ts->bprm = parent_ts->bprm;
6647         ts->info = parent_ts->info;
6648         ts->signal_mask = parent_ts->signal_mask;
6649 
6650         if (flags & CLONE_CHILD_CLEARTID) {
6651             ts->child_tidptr = child_tidptr;
6652         }
6653 
6654         if (flags & CLONE_SETTLS) {
6655             cpu_set_tls (new_env, newtls);
6656         }
6657 
6658         memset(&info, 0, sizeof(info));
6659         pthread_mutex_init(&info.mutex, NULL);
6660         pthread_mutex_lock(&info.mutex);
6661         pthread_cond_init(&info.cond, NULL);
6662         info.env = new_env;
6663         if (flags & CLONE_CHILD_SETTID) {
6664             info.child_tidptr = child_tidptr;
6665         }
6666         if (flags & CLONE_PARENT_SETTID) {
6667             info.parent_tidptr = parent_tidptr;
6668         }
6669 
6670         ret = pthread_attr_init(&attr);
6671         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6672         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6673         /* It is not safe to deliver signals until the child has finished
6674            initializing, so temporarily block all signals.  */
6675         sigfillset(&sigmask);
6676         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6677         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6678 
6679         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6680         /* TODO: Free new CPU state if thread creation failed.  */
6681 
6682         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6683         pthread_attr_destroy(&attr);
6684         if (ret == 0) {
6685             /* Wait for the child to initialize.  */
6686             pthread_cond_wait(&info.cond, &info.mutex);
6687             ret = info.tid;
6688         } else {
6689             ret = -1;
6690         }
6691         pthread_mutex_unlock(&info.mutex);
6692         pthread_cond_destroy(&info.cond);
6693         pthread_mutex_destroy(&info.mutex);
6694         pthread_mutex_unlock(&clone_lock);
6695     } else {
6696         /* if no CLONE_VM, we consider it is a fork */
6697         if (flags & CLONE_INVALID_FORK_FLAGS) {
6698             return -TARGET_EINVAL;
6699         }
6700 
6701         /* We can't support custom termination signals */
6702         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6703             return -TARGET_EINVAL;
6704         }
6705 
6706 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6707         if (flags & CLONE_PIDFD) {
6708             return -TARGET_EINVAL;
6709         }
6710 #endif
6711 
6712         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6713         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6714             return -TARGET_EINVAL;
6715         }
6716 
6717         if (block_signals()) {
6718             return -QEMU_ERESTARTSYS;
6719         }
6720 
6721         fork_start();
6722         ret = fork();
6723         if (ret == 0) {
6724             /* Child Process.  */
6725             cpu_clone_regs_child(env, newsp, flags);
6726             fork_end(ret);
6727             /* There is a race condition here.  The parent process could
6728                theoretically read the TID in the child process before the child
6729                tid is set.  This would require using either ptrace
6730                (not implemented) or having *_tidptr to point at a shared memory
6731                mapping.  We can't repeat the spinlock hack used above because
6732                the child process gets its own copy of the lock.  */
6733             if (flags & CLONE_CHILD_SETTID)
6734                 put_user_u32(sys_gettid(), child_tidptr);
6735             if (flags & CLONE_PARENT_SETTID)
6736                 put_user_u32(sys_gettid(), parent_tidptr);
6737             ts = get_task_state(cpu);
6738             if (flags & CLONE_SETTLS)
6739                 cpu_set_tls (env, newtls);
6740             if (flags & CLONE_CHILD_CLEARTID)
6741                 ts->child_tidptr = child_tidptr;
6742         } else {
6743             cpu_clone_regs_parent(env, flags);
6744             if (flags & CLONE_PIDFD) {
6745                 int pid_fd = 0;
6746 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6747                 int pid_child = ret;
6748                 pid_fd = pidfd_open(pid_child, 0);
6749                 if (pid_fd >= 0) {
6750                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6751                                                | FD_CLOEXEC);
6752                 } else {
6753                         pid_fd = 0;
6754                 }
6755 #endif
6756                 put_user_u32(pid_fd, parent_tidptr);
6757             }
6758             fork_end(ret);
6759         }
6760         g_assert(!cpu_in_exclusive_context(cpu));
6761     }
6762     return ret;
6763 }
6764 
6765 /* warning : doesn't handle linux specific flags... */
6766 static int target_to_host_fcntl_cmd(int cmd)
6767 {
6768     int ret;
6769 
6770     switch(cmd) {
6771     case TARGET_F_DUPFD:
6772     case TARGET_F_GETFD:
6773     case TARGET_F_SETFD:
6774     case TARGET_F_GETFL:
6775     case TARGET_F_SETFL:
6776     case TARGET_F_OFD_GETLK:
6777     case TARGET_F_OFD_SETLK:
6778     case TARGET_F_OFD_SETLKW:
6779         ret = cmd;
6780         break;
6781     case TARGET_F_GETLK:
6782         ret = F_GETLK;
6783         break;
6784     case TARGET_F_SETLK:
6785         ret = F_SETLK;
6786         break;
6787     case TARGET_F_SETLKW:
6788         ret = F_SETLKW;
6789         break;
6790     case TARGET_F_GETOWN:
6791         ret = F_GETOWN;
6792         break;
6793     case TARGET_F_SETOWN:
6794         ret = F_SETOWN;
6795         break;
6796     case TARGET_F_GETSIG:
6797         ret = F_GETSIG;
6798         break;
6799     case TARGET_F_SETSIG:
6800         ret = F_SETSIG;
6801         break;
6802 #if TARGET_ABI_BITS == 32
6803     case TARGET_F_GETLK64:
6804         ret = F_GETLK;
6805         break;
6806     case TARGET_F_SETLK64:
6807         ret = F_SETLK;
6808         break;
6809     case TARGET_F_SETLKW64:
6810         ret = F_SETLKW;
6811         break;
6812 #endif
6813     case TARGET_F_SETLEASE:
6814         ret = F_SETLEASE;
6815         break;
6816     case TARGET_F_GETLEASE:
6817         ret = F_GETLEASE;
6818         break;
6819 #ifdef F_DUPFD_CLOEXEC
6820     case TARGET_F_DUPFD_CLOEXEC:
6821         ret = F_DUPFD_CLOEXEC;
6822         break;
6823 #endif
6824     case TARGET_F_NOTIFY:
6825         ret = F_NOTIFY;
6826         break;
6827 #ifdef F_GETOWN_EX
6828     case TARGET_F_GETOWN_EX:
6829         ret = F_GETOWN_EX;
6830         break;
6831 #endif
6832 #ifdef F_SETOWN_EX
6833     case TARGET_F_SETOWN_EX:
6834         ret = F_SETOWN_EX;
6835         break;
6836 #endif
6837 #ifdef F_SETPIPE_SZ
6838     case TARGET_F_SETPIPE_SZ:
6839         ret = F_SETPIPE_SZ;
6840         break;
6841     case TARGET_F_GETPIPE_SZ:
6842         ret = F_GETPIPE_SZ;
6843         break;
6844 #endif
6845 #ifdef F_ADD_SEALS
6846     case TARGET_F_ADD_SEALS:
6847         ret = F_ADD_SEALS;
6848         break;
6849     case TARGET_F_GET_SEALS:
6850         ret = F_GET_SEALS;
6851         break;
6852 #endif
6853     default:
6854         ret = -TARGET_EINVAL;
6855         break;
6856     }
6857 
6858 #if defined(__powerpc64__)
6859     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6860      * is not supported by kernel. The glibc fcntl call actually adjusts
6861      * them to 5, 6 and 7 before making the syscall(). Since we make the
6862      * syscall directly, adjust to what is supported by the kernel.
6863      */
6864     if (ret >= F_GETLK && ret <= F_SETLKW) {
6865         ret -= F_GETLK - 5;
6866     }
6867 #endif
6868 
6869     return ret;
6870 }
6871 
6872 #define FLOCK_TRANSTBL \
6873     switch (type) { \
6874     TRANSTBL_CONVERT(F_RDLCK); \
6875     TRANSTBL_CONVERT(F_WRLCK); \
6876     TRANSTBL_CONVERT(F_UNLCK); \
6877     }
6878 
6879 static int target_to_host_flock(int type)
6880 {
6881 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6882     FLOCK_TRANSTBL
6883 #undef  TRANSTBL_CONVERT
6884     return -TARGET_EINVAL;
6885 }
6886 
6887 static int host_to_target_flock(int type)
6888 {
6889 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6890     FLOCK_TRANSTBL
6891 #undef  TRANSTBL_CONVERT
6892     /* if we don't know how to convert the value coming
6893      * from the host we copy to the target field as-is
6894      */
6895     return type;
6896 }
6897 
6898 static inline abi_long copy_from_user_flock(struct flock *fl,
6899                                             abi_ulong target_flock_addr)
6900 {
6901     struct target_flock *target_fl;
6902     int l_type;
6903 
6904     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6905         return -TARGET_EFAULT;
6906     }
6907 
6908     __get_user(l_type, &target_fl->l_type);
6909     l_type = target_to_host_flock(l_type);
6910     if (l_type < 0) {
6911         return l_type;
6912     }
6913     fl->l_type = l_type;
6914     __get_user(fl->l_whence, &target_fl->l_whence);
6915     __get_user(fl->l_start, &target_fl->l_start);
6916     __get_user(fl->l_len, &target_fl->l_len);
6917     __get_user(fl->l_pid, &target_fl->l_pid);
6918     unlock_user_struct(target_fl, target_flock_addr, 0);
6919     return 0;
6920 }
6921 
6922 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6923                                           const struct flock *fl)
6924 {
6925     struct target_flock *target_fl;
6926     short l_type;
6927 
6928     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6929         return -TARGET_EFAULT;
6930     }
6931 
6932     l_type = host_to_target_flock(fl->l_type);
6933     __put_user(l_type, &target_fl->l_type);
6934     __put_user(fl->l_whence, &target_fl->l_whence);
6935     __put_user(fl->l_start, &target_fl->l_start);
6936     __put_user(fl->l_len, &target_fl->l_len);
6937     __put_user(fl->l_pid, &target_fl->l_pid);
6938     unlock_user_struct(target_fl, target_flock_addr, 1);
6939     return 0;
6940 }
6941 
6942 typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
6943 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
6944 
6945 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6946 struct target_oabi_flock64 {
6947     abi_short l_type;
6948     abi_short l_whence;
6949     abi_llong l_start;
6950     abi_llong l_len;
6951     abi_int   l_pid;
6952 } QEMU_PACKED;
6953 
6954 static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
6955                                                    abi_ulong target_flock_addr)
6956 {
6957     struct target_oabi_flock64 *target_fl;
6958     int l_type;
6959 
6960     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6961         return -TARGET_EFAULT;
6962     }
6963 
6964     __get_user(l_type, &target_fl->l_type);
6965     l_type = target_to_host_flock(l_type);
6966     if (l_type < 0) {
6967         return l_type;
6968     }
6969     fl->l_type = l_type;
6970     __get_user(fl->l_whence, &target_fl->l_whence);
6971     __get_user(fl->l_start, &target_fl->l_start);
6972     __get_user(fl->l_len, &target_fl->l_len);
6973     __get_user(fl->l_pid, &target_fl->l_pid);
6974     unlock_user_struct(target_fl, target_flock_addr, 0);
6975     return 0;
6976 }
6977 
6978 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
6979                                                  const struct flock *fl)
6980 {
6981     struct target_oabi_flock64 *target_fl;
6982     short l_type;
6983 
6984     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6985         return -TARGET_EFAULT;
6986     }
6987 
6988     l_type = host_to_target_flock(fl->l_type);
6989     __put_user(l_type, &target_fl->l_type);
6990     __put_user(fl->l_whence, &target_fl->l_whence);
6991     __put_user(fl->l_start, &target_fl->l_start);
6992     __put_user(fl->l_len, &target_fl->l_len);
6993     __put_user(fl->l_pid, &target_fl->l_pid);
6994     unlock_user_struct(target_fl, target_flock_addr, 1);
6995     return 0;
6996 }
6997 #endif
6998 
6999 static inline abi_long copy_from_user_flock64(struct flock *fl,
7000                                               abi_ulong target_flock_addr)
7001 {
7002     struct target_flock64 *target_fl;
7003     int l_type;
7004 
7005     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7006         return -TARGET_EFAULT;
7007     }
7008 
7009     __get_user(l_type, &target_fl->l_type);
7010     l_type = target_to_host_flock(l_type);
7011     if (l_type < 0) {
7012         return l_type;
7013     }
7014     fl->l_type = l_type;
7015     __get_user(fl->l_whence, &target_fl->l_whence);
7016     __get_user(fl->l_start, &target_fl->l_start);
7017     __get_user(fl->l_len, &target_fl->l_len);
7018     __get_user(fl->l_pid, &target_fl->l_pid);
7019     unlock_user_struct(target_fl, target_flock_addr, 0);
7020     return 0;
7021 }
7022 
7023 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7024                                             const struct flock *fl)
7025 {
7026     struct target_flock64 *target_fl;
7027     short l_type;
7028 
7029     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7030         return -TARGET_EFAULT;
7031     }
7032 
7033     l_type = host_to_target_flock(fl->l_type);
7034     __put_user(l_type, &target_fl->l_type);
7035     __put_user(fl->l_whence, &target_fl->l_whence);
7036     __put_user(fl->l_start, &target_fl->l_start);
7037     __put_user(fl->l_len, &target_fl->l_len);
7038     __put_user(fl->l_pid, &target_fl->l_pid);
7039     unlock_user_struct(target_fl, target_flock_addr, 1);
7040     return 0;
7041 }
7042 
7043 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7044 {
7045     struct flock fl;
7046 #ifdef F_GETOWN_EX
7047     struct f_owner_ex fox;
7048     struct target_f_owner_ex *target_fox;
7049 #endif
7050     abi_long ret;
7051     int host_cmd = target_to_host_fcntl_cmd(cmd);
7052 
7053     if (host_cmd == -TARGET_EINVAL)
7054 	    return host_cmd;
7055 
7056     switch(cmd) {
7057     case TARGET_F_GETLK:
7058         ret = copy_from_user_flock(&fl, arg);
7059         if (ret) {
7060             return ret;
7061         }
7062         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7063         if (ret == 0) {
7064             ret = copy_to_user_flock(arg, &fl);
7065         }
7066         break;
7067 
7068     case TARGET_F_SETLK:
7069     case TARGET_F_SETLKW:
7070         ret = copy_from_user_flock(&fl, arg);
7071         if (ret) {
7072             return ret;
7073         }
7074         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7075         break;
7076 
7077     case TARGET_F_GETLK64:
7078     case TARGET_F_OFD_GETLK:
7079         ret = copy_from_user_flock64(&fl, arg);
7080         if (ret) {
7081             return ret;
7082         }
7083         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7084         if (ret == 0) {
7085             ret = copy_to_user_flock64(arg, &fl);
7086         }
7087         break;
7088     case TARGET_F_SETLK64:
7089     case TARGET_F_SETLKW64:
7090     case TARGET_F_OFD_SETLK:
7091     case TARGET_F_OFD_SETLKW:
7092         ret = copy_from_user_flock64(&fl, arg);
7093         if (ret) {
7094             return ret;
7095         }
7096         ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
7097         break;
7098 
7099     case TARGET_F_GETFL:
7100         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7101         if (ret >= 0) {
7102             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7103             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7104             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7105                 ret |= TARGET_O_LARGEFILE;
7106             }
7107         }
7108         break;
7109 
7110     case TARGET_F_SETFL:
7111         ret = get_errno(safe_fcntl(fd, host_cmd,
7112                                    target_to_host_bitmask(arg,
7113                                                           fcntl_flags_tbl)));
7114         break;
7115 
7116 #ifdef F_GETOWN_EX
7117     case TARGET_F_GETOWN_EX:
7118         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7119         if (ret >= 0) {
7120             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7121                 return -TARGET_EFAULT;
7122             target_fox->type = tswap32(fox.type);
7123             target_fox->pid = tswap32(fox.pid);
7124             unlock_user_struct(target_fox, arg, 1);
7125         }
7126         break;
7127 #endif
7128 
7129 #ifdef F_SETOWN_EX
7130     case TARGET_F_SETOWN_EX:
7131         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7132             return -TARGET_EFAULT;
7133         fox.type = tswap32(target_fox->type);
7134         fox.pid = tswap32(target_fox->pid);
7135         unlock_user_struct(target_fox, arg, 0);
7136         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7137         break;
7138 #endif
7139 
7140     case TARGET_F_SETSIG:
7141         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7142         break;
7143 
7144     case TARGET_F_GETSIG:
7145         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7146         break;
7147 
7148     case TARGET_F_SETOWN:
7149     case TARGET_F_GETOWN:
7150     case TARGET_F_SETLEASE:
7151     case TARGET_F_GETLEASE:
7152     case TARGET_F_SETPIPE_SZ:
7153     case TARGET_F_GETPIPE_SZ:
7154     case TARGET_F_ADD_SEALS:
7155     case TARGET_F_GET_SEALS:
7156         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7157         break;
7158 
7159     default:
7160         ret = get_errno(safe_fcntl(fd, cmd, arg));
7161         break;
7162     }
7163     return ret;
7164 }
7165 
7166 #ifdef USE_UID16
7167 
7168 static inline int high2lowuid(int uid)
7169 {
7170     if (uid > 65535)
7171         return 65534;
7172     else
7173         return uid;
7174 }
7175 
7176 static inline int high2lowgid(int gid)
7177 {
7178     if (gid > 65535)
7179         return 65534;
7180     else
7181         return gid;
7182 }
7183 
7184 static inline int low2highuid(int uid)
7185 {
7186     if ((int16_t)uid == -1)
7187         return -1;
7188     else
7189         return uid;
7190 }
7191 
7192 static inline int low2highgid(int gid)
7193 {
7194     if ((int16_t)gid == -1)
7195         return -1;
7196     else
7197         return gid;
7198 }
7199 static inline int tswapid(int id)
7200 {
7201     return tswap16(id);
7202 }
7203 
7204 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7205 
7206 #else /* !USE_UID16 */
7207 static inline int high2lowuid(int uid)
7208 {
7209     return uid;
7210 }
7211 static inline int high2lowgid(int gid)
7212 {
7213     return gid;
7214 }
7215 static inline int low2highuid(int uid)
7216 {
7217     return uid;
7218 }
7219 static inline int low2highgid(int gid)
7220 {
7221     return gid;
7222 }
7223 static inline int tswapid(int id)
7224 {
7225     return tswap32(id);
7226 }
7227 
7228 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7229 
7230 #endif /* USE_UID16 */
7231 
7232 /* We must do direct syscalls for setting UID/GID, because we want to
7233  * implement the Linux system call semantics of "change only for this thread",
7234  * not the libc/POSIX semantics of "change for all threads in process".
7235  * (See http://ewontfix.com/17/ for more details.)
7236  * We use the 32-bit version of the syscalls if present; if it is not
7237  * then either the host architecture supports 32-bit UIDs natively with
7238  * the standard syscall, or the 16-bit UID is the best we can do.
7239  */
7240 #ifdef __NR_setuid32
7241 #define __NR_sys_setuid __NR_setuid32
7242 #else
7243 #define __NR_sys_setuid __NR_setuid
7244 #endif
7245 #ifdef __NR_setgid32
7246 #define __NR_sys_setgid __NR_setgid32
7247 #else
7248 #define __NR_sys_setgid __NR_setgid
7249 #endif
7250 #ifdef __NR_setresuid32
7251 #define __NR_sys_setresuid __NR_setresuid32
7252 #else
7253 #define __NR_sys_setresuid __NR_setresuid
7254 #endif
7255 #ifdef __NR_setresgid32
7256 #define __NR_sys_setresgid __NR_setresgid32
7257 #else
7258 #define __NR_sys_setresgid __NR_setresgid
7259 #endif
7260 #ifdef __NR_setgroups32
7261 #define __NR_sys_setgroups __NR_setgroups32
7262 #else
7263 #define __NR_sys_setgroups __NR_setgroups
7264 #endif
7265 #ifdef __NR_sys_setreuid32
7266 #define __NR_sys_setreuid __NR_setreuid32
7267 #else
7268 #define __NR_sys_setreuid __NR_setreuid
7269 #endif
7270 #ifdef __NR_sys_setregid32
7271 #define __NR_sys_setregid __NR_setregid32
7272 #else
7273 #define __NR_sys_setregid __NR_setregid
7274 #endif
7275 
7276 _syscall1(int, sys_setuid, uid_t, uid)
7277 _syscall1(int, sys_setgid, gid_t, gid)
7278 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7279 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7280 _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
7281 _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
7282 _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
7283 
7284 void syscall_init(void)
7285 {
7286     IOCTLEntry *ie;
7287     const argtype *arg_type;
7288     int size;
7289 
7290     thunk_init(STRUCT_MAX);
7291 
7292 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7293 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7294 #include "syscall_types.h"
7295 #undef STRUCT
7296 #undef STRUCT_SPECIAL
7297 
7298     /* we patch the ioctl size if necessary. We rely on the fact that
7299        no ioctl has all the bits at '1' in the size field */
7300     ie = ioctl_entries;
7301     while (ie->target_cmd != 0) {
7302         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7303             TARGET_IOC_SIZEMASK) {
7304             arg_type = ie->arg_type;
7305             if (arg_type[0] != TYPE_PTR) {
7306                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7307                         ie->target_cmd);
7308                 exit(1);
7309             }
7310             arg_type++;
7311             size = thunk_type_size(arg_type, 0);
7312             ie->target_cmd = (ie->target_cmd &
7313                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7314                 (size << TARGET_IOC_SIZESHIFT);
7315         }
7316 
7317         /* automatic consistency check if same arch */
7318 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7319     (defined(__x86_64__) && defined(TARGET_X86_64))
7320         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7321             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7322                     ie->name, ie->target_cmd, ie->host_cmd);
7323         }
7324 #endif
7325         ie++;
7326     }
7327 }
7328 
7329 #ifdef TARGET_NR_truncate64
7330 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7331                                          abi_long arg2,
7332                                          abi_long arg3,
7333                                          abi_long arg4)
7334 {
7335     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7336         arg2 = arg3;
7337         arg3 = arg4;
7338     }
7339     return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
7340 }
7341 #endif
7342 
7343 #ifdef TARGET_NR_ftruncate64
7344 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7345                                           abi_long arg2,
7346                                           abi_long arg3,
7347                                           abi_long arg4)
7348 {
7349     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7350         arg2 = arg3;
7351         arg3 = arg4;
7352     }
7353     return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
7354 }
7355 #endif
7356 
7357 #if defined(TARGET_NR_timer_settime) || \
7358     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7359 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7360                                                  abi_ulong target_addr)
7361 {
7362     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7363                                 offsetof(struct target_itimerspec,
7364                                          it_interval)) ||
7365         target_to_host_timespec(&host_its->it_value, target_addr +
7366                                 offsetof(struct target_itimerspec,
7367                                          it_value))) {
7368         return -TARGET_EFAULT;
7369     }
7370 
7371     return 0;
7372 }
7373 #endif
7374 
7375 #if defined(TARGET_NR_timer_settime64) || \
7376     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7377 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7378                                                    abi_ulong target_addr)
7379 {
7380     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7381                                   offsetof(struct target__kernel_itimerspec,
7382                                            it_interval)) ||
7383         target_to_host_timespec64(&host_its->it_value, target_addr +
7384                                   offsetof(struct target__kernel_itimerspec,
7385                                            it_value))) {
7386         return -TARGET_EFAULT;
7387     }
7388 
7389     return 0;
7390 }
7391 #endif
7392 
7393 #if ((defined(TARGET_NR_timerfd_gettime) || \
7394       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7395       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7396 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7397                                                  struct itimerspec *host_its)
7398 {
7399     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7400                                                        it_interval),
7401                                 &host_its->it_interval) ||
7402         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7403                                                        it_value),
7404                                 &host_its->it_value)) {
7405         return -TARGET_EFAULT;
7406     }
7407     return 0;
7408 }
7409 #endif
7410 
7411 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7412       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7413       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7414 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7415                                                    struct itimerspec *host_its)
7416 {
7417     if (host_to_target_timespec64(target_addr +
7418                                   offsetof(struct target__kernel_itimerspec,
7419                                            it_interval),
7420                                   &host_its->it_interval) ||
7421         host_to_target_timespec64(target_addr +
7422                                   offsetof(struct target__kernel_itimerspec,
7423                                            it_value),
7424                                   &host_its->it_value)) {
7425         return -TARGET_EFAULT;
7426     }
7427     return 0;
7428 }
7429 #endif
7430 
7431 #if defined(TARGET_NR_adjtimex) || \
7432     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7433 static inline abi_long target_to_host_timex(struct timex *host_tx,
7434                                             abi_long target_addr)
7435 {
7436     struct target_timex *target_tx;
7437 
7438     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7439         return -TARGET_EFAULT;
7440     }
7441 
7442     __get_user(host_tx->modes, &target_tx->modes);
7443     __get_user(host_tx->offset, &target_tx->offset);
7444     __get_user(host_tx->freq, &target_tx->freq);
7445     __get_user(host_tx->maxerror, &target_tx->maxerror);
7446     __get_user(host_tx->esterror, &target_tx->esterror);
7447     __get_user(host_tx->status, &target_tx->status);
7448     __get_user(host_tx->constant, &target_tx->constant);
7449     __get_user(host_tx->precision, &target_tx->precision);
7450     __get_user(host_tx->tolerance, &target_tx->tolerance);
7451     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7452     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7453     __get_user(host_tx->tick, &target_tx->tick);
7454     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7455     __get_user(host_tx->jitter, &target_tx->jitter);
7456     __get_user(host_tx->shift, &target_tx->shift);
7457     __get_user(host_tx->stabil, &target_tx->stabil);
7458     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7459     __get_user(host_tx->calcnt, &target_tx->calcnt);
7460     __get_user(host_tx->errcnt, &target_tx->errcnt);
7461     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7462     __get_user(host_tx->tai, &target_tx->tai);
7463 
7464     unlock_user_struct(target_tx, target_addr, 0);
7465     return 0;
7466 }
7467 
7468 static inline abi_long host_to_target_timex(abi_long target_addr,
7469                                             struct timex *host_tx)
7470 {
7471     struct target_timex *target_tx;
7472 
7473     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7474         return -TARGET_EFAULT;
7475     }
7476 
7477     __put_user(host_tx->modes, &target_tx->modes);
7478     __put_user(host_tx->offset, &target_tx->offset);
7479     __put_user(host_tx->freq, &target_tx->freq);
7480     __put_user(host_tx->maxerror, &target_tx->maxerror);
7481     __put_user(host_tx->esterror, &target_tx->esterror);
7482     __put_user(host_tx->status, &target_tx->status);
7483     __put_user(host_tx->constant, &target_tx->constant);
7484     __put_user(host_tx->precision, &target_tx->precision);
7485     __put_user(host_tx->tolerance, &target_tx->tolerance);
7486     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7487     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7488     __put_user(host_tx->tick, &target_tx->tick);
7489     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7490     __put_user(host_tx->jitter, &target_tx->jitter);
7491     __put_user(host_tx->shift, &target_tx->shift);
7492     __put_user(host_tx->stabil, &target_tx->stabil);
7493     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7494     __put_user(host_tx->calcnt, &target_tx->calcnt);
7495     __put_user(host_tx->errcnt, &target_tx->errcnt);
7496     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7497     __put_user(host_tx->tai, &target_tx->tai);
7498 
7499     unlock_user_struct(target_tx, target_addr, 1);
7500     return 0;
7501 }
7502 #endif
7503 
7504 
7505 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7506 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7507                                               abi_long target_addr)
7508 {
7509     struct target__kernel_timex *target_tx;
7510 
7511     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7512                                  offsetof(struct target__kernel_timex,
7513                                           time))) {
7514         return -TARGET_EFAULT;
7515     }
7516 
7517     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7518         return -TARGET_EFAULT;
7519     }
7520 
7521     __get_user(host_tx->modes, &target_tx->modes);
7522     __get_user(host_tx->offset, &target_tx->offset);
7523     __get_user(host_tx->freq, &target_tx->freq);
7524     __get_user(host_tx->maxerror, &target_tx->maxerror);
7525     __get_user(host_tx->esterror, &target_tx->esterror);
7526     __get_user(host_tx->status, &target_tx->status);
7527     __get_user(host_tx->constant, &target_tx->constant);
7528     __get_user(host_tx->precision, &target_tx->precision);
7529     __get_user(host_tx->tolerance, &target_tx->tolerance);
7530     __get_user(host_tx->tick, &target_tx->tick);
7531     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7532     __get_user(host_tx->jitter, &target_tx->jitter);
7533     __get_user(host_tx->shift, &target_tx->shift);
7534     __get_user(host_tx->stabil, &target_tx->stabil);
7535     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7536     __get_user(host_tx->calcnt, &target_tx->calcnt);
7537     __get_user(host_tx->errcnt, &target_tx->errcnt);
7538     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7539     __get_user(host_tx->tai, &target_tx->tai);
7540 
7541     unlock_user_struct(target_tx, target_addr, 0);
7542     return 0;
7543 }
7544 
7545 static inline abi_long host_to_target_timex64(abi_long target_addr,
7546                                               struct timex *host_tx)
7547 {
7548     struct target__kernel_timex *target_tx;
7549 
7550    if (copy_to_user_timeval64(target_addr +
7551                               offsetof(struct target__kernel_timex, time),
7552                               &host_tx->time)) {
7553         return -TARGET_EFAULT;
7554     }
7555 
7556     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7557         return -TARGET_EFAULT;
7558     }
7559 
7560     __put_user(host_tx->modes, &target_tx->modes);
7561     __put_user(host_tx->offset, &target_tx->offset);
7562     __put_user(host_tx->freq, &target_tx->freq);
7563     __put_user(host_tx->maxerror, &target_tx->maxerror);
7564     __put_user(host_tx->esterror, &target_tx->esterror);
7565     __put_user(host_tx->status, &target_tx->status);
7566     __put_user(host_tx->constant, &target_tx->constant);
7567     __put_user(host_tx->precision, &target_tx->precision);
7568     __put_user(host_tx->tolerance, &target_tx->tolerance);
7569     __put_user(host_tx->tick, &target_tx->tick);
7570     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7571     __put_user(host_tx->jitter, &target_tx->jitter);
7572     __put_user(host_tx->shift, &target_tx->shift);
7573     __put_user(host_tx->stabil, &target_tx->stabil);
7574     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7575     __put_user(host_tx->calcnt, &target_tx->calcnt);
7576     __put_user(host_tx->errcnt, &target_tx->errcnt);
7577     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7578     __put_user(host_tx->tai, &target_tx->tai);
7579 
7580     unlock_user_struct(target_tx, target_addr, 1);
7581     return 0;
7582 }
7583 #endif
7584 
7585 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7586 #define sigev_notify_thread_id _sigev_un._tid
7587 #endif
7588 
7589 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7590                                                abi_ulong target_addr)
7591 {
7592     struct target_sigevent *target_sevp;
7593 
7594     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7595         return -TARGET_EFAULT;
7596     }
7597 
7598     /* This union is awkward on 64 bit systems because it has a 32 bit
7599      * integer and a pointer in it; we follow the conversion approach
7600      * used for handling sigval types in signal.c so the guest should get
7601      * the correct value back even if we did a 64 bit byteswap and it's
7602      * using the 32 bit integer.
7603      */
7604     host_sevp->sigev_value.sival_ptr =
7605         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7606     host_sevp->sigev_signo =
7607         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7608     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7609     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7610 
7611     unlock_user_struct(target_sevp, target_addr, 1);
7612     return 0;
7613 }
7614 
7615 #if defined(TARGET_NR_mlockall)
7616 static inline int target_to_host_mlockall_arg(int arg)
7617 {
7618     int result = 0;
7619 
7620     if (arg & TARGET_MCL_CURRENT) {
7621         result |= MCL_CURRENT;
7622     }
7623     if (arg & TARGET_MCL_FUTURE) {
7624         result |= MCL_FUTURE;
7625     }
7626 #ifdef MCL_ONFAULT
7627     if (arg & TARGET_MCL_ONFAULT) {
7628         result |= MCL_ONFAULT;
7629     }
7630 #endif
7631 
7632     return result;
7633 }
7634 #endif
7635 
7636 static inline int target_to_host_msync_arg(abi_long arg)
7637 {
7638     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7639            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7640            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7641            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7642 }
7643 
7644 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7645      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7646      defined(TARGET_NR_newfstatat))
7647 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7648                                              abi_ulong target_addr,
7649                                              struct stat *host_st)
7650 {
7651 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7652     if (cpu_env->eabi) {
7653         struct target_eabi_stat64 *target_st;
7654 
7655         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7656             return -TARGET_EFAULT;
7657         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7658         __put_user(host_st->st_dev, &target_st->st_dev);
7659         __put_user(host_st->st_ino, &target_st->st_ino);
7660 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7661         __put_user(host_st->st_ino, &target_st->__st_ino);
7662 #endif
7663         __put_user(host_st->st_mode, &target_st->st_mode);
7664         __put_user(host_st->st_nlink, &target_st->st_nlink);
7665         __put_user(host_st->st_uid, &target_st->st_uid);
7666         __put_user(host_st->st_gid, &target_st->st_gid);
7667         __put_user(host_st->st_rdev, &target_st->st_rdev);
7668         __put_user(host_st->st_size, &target_st->st_size);
7669         __put_user(host_st->st_blksize, &target_st->st_blksize);
7670         __put_user(host_st->st_blocks, &target_st->st_blocks);
7671         __put_user(host_st->st_atime, &target_st->target_st_atime);
7672         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7673         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7674 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7675         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7676         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7677         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7678 #endif
7679         unlock_user_struct(target_st, target_addr, 1);
7680     } else
7681 #endif
7682     {
7683 #if defined(TARGET_HAS_STRUCT_STAT64)
7684         struct target_stat64 *target_st;
7685 #else
7686         struct target_stat *target_st;
7687 #endif
7688 
7689         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7690             return -TARGET_EFAULT;
7691         memset(target_st, 0, sizeof(*target_st));
7692         __put_user(host_st->st_dev, &target_st->st_dev);
7693         __put_user(host_st->st_ino, &target_st->st_ino);
7694 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7695         __put_user(host_st->st_ino, &target_st->__st_ino);
7696 #endif
7697         __put_user(host_st->st_mode, &target_st->st_mode);
7698         __put_user(host_st->st_nlink, &target_st->st_nlink);
7699         __put_user(host_st->st_uid, &target_st->st_uid);
7700         __put_user(host_st->st_gid, &target_st->st_gid);
7701         __put_user(host_st->st_rdev, &target_st->st_rdev);
7702         /* XXX: better use of kernel struct */
7703         __put_user(host_st->st_size, &target_st->st_size);
7704         __put_user(host_st->st_blksize, &target_st->st_blksize);
7705         __put_user(host_st->st_blocks, &target_st->st_blocks);
7706         __put_user(host_st->st_atime, &target_st->target_st_atime);
7707         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7708         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7709 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7710         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7711         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7712         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7713 #endif
7714         unlock_user_struct(target_st, target_addr, 1);
7715     }
7716 
7717     return 0;
7718 }
7719 #endif
7720 
7721 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7722 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7723                                             abi_ulong target_addr)
7724 {
7725     struct target_statx *target_stx;
7726 
7727     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7728         return -TARGET_EFAULT;
7729     }
7730     memset(target_stx, 0, sizeof(*target_stx));
7731 
7732     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7733     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7734     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7735     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7736     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7737     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7738     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7739     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7740     __put_user(host_stx->stx_size, &target_stx->stx_size);
7741     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7742     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7743     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7744     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7745     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7746     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7747     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7748     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7749     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7750     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7751     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7752     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7753     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7754     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7755 
7756     unlock_user_struct(target_stx, target_addr, 1);
7757 
7758     return 0;
7759 }
7760 #endif
7761 
7762 static int do_sys_futex(int *uaddr, int op, int val,
7763                          const struct timespec *timeout, int *uaddr2,
7764                          int val3)
7765 {
7766 #if HOST_LONG_BITS == 64
7767 #if defined(__NR_futex)
7768     /* always a 64-bit time_t, it doesn't define _time64 version  */
7769     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7770 
7771 #endif
7772 #else /* HOST_LONG_BITS == 64 */
7773 #if defined(__NR_futex_time64)
7774     if (sizeof(timeout->tv_sec) == 8) {
7775         /* _time64 function on 32bit arch */
7776         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7777     }
7778 #endif
7779 #if defined(__NR_futex)
7780     /* old function on 32bit arch */
7781     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7782 #endif
7783 #endif /* HOST_LONG_BITS == 64 */
7784     g_assert_not_reached();
7785 }
7786 
7787 static int do_safe_futex(int *uaddr, int op, int val,
7788                          const struct timespec *timeout, int *uaddr2,
7789                          int val3)
7790 {
7791 #if HOST_LONG_BITS == 64
7792 #if defined(__NR_futex)
7793     /* always a 64-bit time_t, it doesn't define _time64 version  */
7794     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7795 #endif
7796 #else /* HOST_LONG_BITS == 64 */
7797 #if defined(__NR_futex_time64)
7798     if (sizeof(timeout->tv_sec) == 8) {
7799         /* _time64 function on 32bit arch */
7800         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7801                                            val3));
7802     }
7803 #endif
7804 #if defined(__NR_futex)
7805     /* old function on 32bit arch */
7806     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7807 #endif
7808 #endif /* HOST_LONG_BITS == 64 */
7809     return -TARGET_ENOSYS;
7810 }
7811 
7812 /* ??? Using host futex calls even when target atomic operations
7813    are not really atomic probably breaks things.  However implementing
7814    futexes locally would make futexes shared between multiple processes
7815    tricky.  However they're probably useless because guest atomic
7816    operations won't work either.  */
7817 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7818 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7819                     int op, int val, target_ulong timeout,
7820                     target_ulong uaddr2, int val3)
7821 {
7822     struct timespec ts, *pts = NULL;
7823     void *haddr2 = NULL;
7824     int base_op;
7825 
7826     /* We assume FUTEX_* constants are the same on both host and target. */
7827 #ifdef FUTEX_CMD_MASK
7828     base_op = op & FUTEX_CMD_MASK;
7829 #else
7830     base_op = op;
7831 #endif
7832     switch (base_op) {
7833     case FUTEX_WAIT:
7834     case FUTEX_WAIT_BITSET:
7835         val = tswap32(val);
7836         break;
7837     case FUTEX_WAIT_REQUEUE_PI:
7838         val = tswap32(val);
7839         haddr2 = g2h(cpu, uaddr2);
7840         break;
7841     case FUTEX_LOCK_PI:
7842     case FUTEX_LOCK_PI2:
7843         break;
7844     case FUTEX_WAKE:
7845     case FUTEX_WAKE_BITSET:
7846     case FUTEX_TRYLOCK_PI:
7847     case FUTEX_UNLOCK_PI:
7848         timeout = 0;
7849         break;
7850     case FUTEX_FD:
7851         val = target_to_host_signal(val);
7852         timeout = 0;
7853         break;
7854     case FUTEX_CMP_REQUEUE:
7855     case FUTEX_CMP_REQUEUE_PI:
7856         val3 = tswap32(val3);
7857         /* fall through */
7858     case FUTEX_REQUEUE:
7859     case FUTEX_WAKE_OP:
7860         /*
7861          * For these, the 4th argument is not TIMEOUT, but VAL2.
7862          * But the prototype of do_safe_futex takes a pointer, so
7863          * insert casts to satisfy the compiler.  We do not need
7864          * to tswap VAL2 since it's not compared to guest memory.
7865           */
7866         pts = (struct timespec *)(uintptr_t)timeout;
7867         timeout = 0;
7868         haddr2 = g2h(cpu, uaddr2);
7869         break;
7870     default:
7871         return -TARGET_ENOSYS;
7872     }
7873     if (timeout) {
7874         pts = &ts;
7875         if (time64
7876             ? target_to_host_timespec64(pts, timeout)
7877             : target_to_host_timespec(pts, timeout)) {
7878             return -TARGET_EFAULT;
7879         }
7880     }
7881     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7882 }
7883 #endif
7884 
7885 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7886 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7887                                      abi_long handle, abi_long mount_id,
7888                                      abi_long flags)
7889 {
7890     struct file_handle *target_fh;
7891     struct file_handle *fh;
7892     int mid = 0;
7893     abi_long ret;
7894     char *name;
7895     unsigned int size, total_size;
7896 
7897     if (get_user_s32(size, handle)) {
7898         return -TARGET_EFAULT;
7899     }
7900 
7901     name = lock_user_string(pathname);
7902     if (!name) {
7903         return -TARGET_EFAULT;
7904     }
7905 
7906     total_size = sizeof(struct file_handle) + size;
7907     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7908     if (!target_fh) {
7909         unlock_user(name, pathname, 0);
7910         return -TARGET_EFAULT;
7911     }
7912 
7913     fh = g_malloc0(total_size);
7914     fh->handle_bytes = size;
7915 
7916     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7917     unlock_user(name, pathname, 0);
7918 
7919     /* man name_to_handle_at(2):
7920      * Other than the use of the handle_bytes field, the caller should treat
7921      * the file_handle structure as an opaque data type
7922      */
7923 
7924     memcpy(target_fh, fh, total_size);
7925     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7926     target_fh->handle_type = tswap32(fh->handle_type);
7927     g_free(fh);
7928     unlock_user(target_fh, handle, total_size);
7929 
7930     if (put_user_s32(mid, mount_id)) {
7931         return -TARGET_EFAULT;
7932     }
7933 
7934     return ret;
7935 
7936 }
7937 #endif
7938 
7939 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7940 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7941                                      abi_long flags)
7942 {
7943     struct file_handle *target_fh;
7944     struct file_handle *fh;
7945     unsigned int size, total_size;
7946     abi_long ret;
7947 
7948     if (get_user_s32(size, handle)) {
7949         return -TARGET_EFAULT;
7950     }
7951 
7952     total_size = sizeof(struct file_handle) + size;
7953     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7954     if (!target_fh) {
7955         return -TARGET_EFAULT;
7956     }
7957 
7958     fh = g_memdup(target_fh, total_size);
7959     fh->handle_bytes = size;
7960     fh->handle_type = tswap32(target_fh->handle_type);
7961 
7962     ret = get_errno(open_by_handle_at(mount_fd, fh,
7963                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7964 
7965     g_free(fh);
7966 
7967     unlock_user(target_fh, handle, total_size);
7968 
7969     return ret;
7970 }
7971 #endif
7972 
7973 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7974 
7975 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
7976 {
7977     int host_flags;
7978     target_sigset_t *target_mask;
7979     sigset_t host_mask;
7980     abi_long ret;
7981 
7982     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
7983         return -TARGET_EINVAL;
7984     }
7985     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
7986         return -TARGET_EFAULT;
7987     }
7988 
7989     target_to_host_sigset(&host_mask, target_mask);
7990 
7991     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
7992 
7993     ret = get_errno(signalfd(fd, &host_mask, host_flags));
7994     if (ret >= 0) {
7995         fd_trans_register(ret, &target_signalfd_trans);
7996     }
7997 
7998     unlock_user_struct(target_mask, mask, 0);
7999 
8000     return ret;
8001 }
8002 #endif
8003 
8004 /* Map host to target signal numbers for the wait family of syscalls.
8005    Assume all other status bits are the same.  */
8006 int host_to_target_waitstatus(int status)
8007 {
8008     if (WIFSIGNALED(status)) {
8009         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8010     }
8011     if (WIFSTOPPED(status)) {
8012         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8013                | (status & 0xff);
8014     }
8015     return status;
8016 }
8017 
8018 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8019 {
8020     CPUState *cpu = env_cpu(cpu_env);
8021     struct linux_binprm *bprm = get_task_state(cpu)->bprm;
8022     int i;
8023 
8024     for (i = 0; i < bprm->argc; i++) {
8025         size_t len = strlen(bprm->argv[i]) + 1;
8026 
8027         if (write(fd, bprm->argv[i], len) != len) {
8028             return -1;
8029         }
8030     }
8031 
8032     return 0;
8033 }
8034 
8035 struct open_self_maps_data {
8036     TaskState *ts;
8037     IntervalTreeRoot *host_maps;
8038     int fd;
8039     bool smaps;
8040 };
8041 
8042 /*
8043  * Subroutine to output one line of /proc/self/maps,
8044  * or one region of /proc/self/smaps.
8045  */
8046 
8047 #ifdef TARGET_HPPA
8048 # define test_stack(S, E, L)  (E == L)
8049 #else
8050 # define test_stack(S, E, L)  (S == L)
8051 #endif
8052 
8053 static void open_self_maps_4(const struct open_self_maps_data *d,
8054                              const MapInfo *mi, abi_ptr start,
8055                              abi_ptr end, unsigned flags)
8056 {
8057     const struct image_info *info = d->ts->info;
8058     const char *path = mi->path;
8059     uint64_t offset;
8060     int fd = d->fd;
8061     int count;
8062 
8063     if (test_stack(start, end, info->stack_limit)) {
8064         path = "[stack]";
8065     } else if (start == info->brk) {
8066         path = "[heap]";
8067     } else if (start == info->vdso) {
8068         path = "[vdso]";
8069 #ifdef TARGET_X86_64
8070     } else if (start == TARGET_VSYSCALL_PAGE) {
8071         path = "[vsyscall]";
8072 #endif
8073     }
8074 
8075     /* Except null device (MAP_ANON), adjust offset for this fragment. */
8076     offset = mi->offset;
8077     if (mi->dev) {
8078         uintptr_t hstart = (uintptr_t)g2h_untagged(start);
8079         offset += hstart - mi->itree.start;
8080     }
8081 
8082     count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8083                     " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
8084                     start, end,
8085                     (flags & PAGE_READ) ? 'r' : '-',
8086                     (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8087                     (flags & PAGE_EXEC) ? 'x' : '-',
8088                     mi->is_priv ? 'p' : 's',
8089                     offset, major(mi->dev), minor(mi->dev),
8090                     (uint64_t)mi->inode);
8091     if (path) {
8092         dprintf(fd, "%*s%s\n", 73 - count, "", path);
8093     } else {
8094         dprintf(fd, "\n");
8095     }
8096 
8097     if (d->smaps) {
8098         unsigned long size = end - start;
8099         unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8100         unsigned long size_kb = size >> 10;
8101 
8102         dprintf(fd, "Size:                  %lu kB\n"
8103                 "KernelPageSize:        %lu kB\n"
8104                 "MMUPageSize:           %lu kB\n"
8105                 "Rss:                   0 kB\n"
8106                 "Pss:                   0 kB\n"
8107                 "Pss_Dirty:             0 kB\n"
8108                 "Shared_Clean:          0 kB\n"
8109                 "Shared_Dirty:          0 kB\n"
8110                 "Private_Clean:         0 kB\n"
8111                 "Private_Dirty:         0 kB\n"
8112                 "Referenced:            0 kB\n"
8113                 "Anonymous:             %lu kB\n"
8114                 "LazyFree:              0 kB\n"
8115                 "AnonHugePages:         0 kB\n"
8116                 "ShmemPmdMapped:        0 kB\n"
8117                 "FilePmdMapped:         0 kB\n"
8118                 "Shared_Hugetlb:        0 kB\n"
8119                 "Private_Hugetlb:       0 kB\n"
8120                 "Swap:                  0 kB\n"
8121                 "SwapPss:               0 kB\n"
8122                 "Locked:                0 kB\n"
8123                 "THPeligible:    0\n"
8124                 "VmFlags:%s%s%s%s%s%s%s%s\n",
8125                 size_kb, page_size_kb, page_size_kb,
8126                 (flags & PAGE_ANON ? size_kb : 0),
8127                 (flags & PAGE_READ) ? " rd" : "",
8128                 (flags & PAGE_WRITE_ORG) ? " wr" : "",
8129                 (flags & PAGE_EXEC) ? " ex" : "",
8130                 mi->is_priv ? "" : " sh",
8131                 (flags & PAGE_READ) ? " mr" : "",
8132                 (flags & PAGE_WRITE_ORG) ? " mw" : "",
8133                 (flags & PAGE_EXEC) ? " me" : "",
8134                 mi->is_priv ? "" : " ms");
8135     }
8136 }
8137 
8138 /*
8139  * Callback for walk_memory_regions, when read_self_maps() fails.
8140  * Proceed without the benefit of host /proc/self/maps cross-check.
8141  */
8142 static int open_self_maps_3(void *opaque, vaddr guest_start,
8143                             vaddr guest_end, int flags)
8144 {
8145     static const MapInfo mi = { .is_priv = true };
8146 
8147     open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
8148     return 0;
8149 }
8150 
8151 /*
8152  * Callback for walk_memory_regions, when read_self_maps() succeeds.
8153  */
8154 static int open_self_maps_2(void *opaque, vaddr guest_start,
8155                             vaddr guest_end, int flags)
8156 {
8157     const struct open_self_maps_data *d = opaque;
8158     uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
8159     uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
8160 
8161 #ifdef TARGET_X86_64
8162     /*
8163      * Because of the extremely high position of the page within the guest
8164      * virtual address space, this is not backed by host memory at all.
8165      * Therefore the loop below would fail.  This is the only instance
8166      * of not having host backing memory.
8167      */
8168     if (guest_start == TARGET_VSYSCALL_PAGE) {
8169         return open_self_maps_3(opaque, guest_start, guest_end, flags);
8170     }
8171 #endif
8172 
8173     while (1) {
8174         IntervalTreeNode *n =
8175             interval_tree_iter_first(d->host_maps, host_start, host_start);
8176         MapInfo *mi = container_of(n, MapInfo, itree);
8177         uintptr_t this_hlast = MIN(host_last, n->last);
8178         target_ulong this_gend = h2g(this_hlast) + 1;
8179 
8180         open_self_maps_4(d, mi, guest_start, this_gend, flags);
8181 
8182         if (this_hlast == host_last) {
8183             return 0;
8184         }
8185         host_start = this_hlast + 1;
8186         guest_start = h2g(host_start);
8187     }
8188 }
8189 
8190 static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
8191 {
8192     struct open_self_maps_data d = {
8193         .ts = get_task_state(env_cpu(env)),
8194         .fd = fd,
8195         .smaps = smaps
8196     };
8197 
8198     mmap_lock();
8199     d.host_maps = read_self_maps();
8200     if (d.host_maps) {
8201         walk_memory_regions(&d, open_self_maps_2);
8202         free_self_maps(d.host_maps);
8203     } else {
8204         walk_memory_regions(&d, open_self_maps_3);
8205     }
8206     mmap_unlock();
8207     return 0;
8208 }
8209 
8210 static int open_self_maps(CPUArchState *cpu_env, int fd)
8211 {
8212     return open_self_maps_1(cpu_env, fd, false);
8213 }
8214 
8215 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8216 {
8217     return open_self_maps_1(cpu_env, fd, true);
8218 }
8219 
8220 static int open_self_stat(CPUArchState *cpu_env, int fd)
8221 {
8222     CPUState *cpu = env_cpu(cpu_env);
8223     TaskState *ts = get_task_state(cpu);
8224     g_autoptr(GString) buf = g_string_new(NULL);
8225     int i;
8226 
8227     for (i = 0; i < 44; i++) {
8228         if (i == 0) {
8229             /* pid */
8230             g_string_printf(buf, FMT_pid " ", getpid());
8231         } else if (i == 1) {
8232             /* app name */
8233             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8234             bin = bin ? bin + 1 : ts->bprm->argv[0];
8235             g_string_printf(buf, "(%.15s) ", bin);
8236         } else if (i == 2) {
8237             /* task state */
8238             g_string_assign(buf, "R "); /* we are running right now */
8239         } else if (i == 3) {
8240             /* ppid */
8241             g_string_printf(buf, FMT_pid " ", getppid());
8242         } else if (i == 4) {
8243             /* pgid */
8244             g_string_printf(buf, FMT_pid " ", getpgrp());
8245         } else if (i == 19) {
8246             /* num_threads */
8247             int cpus = 0;
8248             WITH_RCU_READ_LOCK_GUARD() {
8249                 CPUState *cpu_iter;
8250                 CPU_FOREACH(cpu_iter) {
8251                     cpus++;
8252                 }
8253             }
8254             g_string_printf(buf, "%d ", cpus);
8255         } else if (i == 21) {
8256             /* starttime */
8257             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8258         } else if (i == 27) {
8259             /* stack bottom */
8260             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8261         } else {
8262             /* for the rest, there is MasterCard */
8263             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8264         }
8265 
8266         if (write(fd, buf->str, buf->len) != buf->len) {
8267             return -1;
8268         }
8269     }
8270 
8271     return 0;
8272 }
8273 
8274 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8275 {
8276     CPUState *cpu = env_cpu(cpu_env);
8277     TaskState *ts = get_task_state(cpu);
8278     abi_ulong auxv = ts->info->saved_auxv;
8279     abi_ulong len = ts->info->auxv_len;
8280     char *ptr;
8281 
8282     /*
8283      * Auxiliary vector is stored in target process stack.
8284      * read in whole auxv vector and copy it to file
8285      */
8286     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8287     if (ptr != NULL) {
8288         while (len > 0) {
8289             ssize_t r;
8290             r = write(fd, ptr, len);
8291             if (r <= 0) {
8292                 break;
8293             }
8294             len -= r;
8295             ptr += r;
8296         }
8297         lseek(fd, 0, SEEK_SET);
8298         unlock_user(ptr, auxv, len);
8299     }
8300 
8301     return 0;
8302 }
8303 
8304 static int is_proc_myself(const char *filename, const char *entry)
8305 {
8306     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8307         filename += strlen("/proc/");
8308         if (!strncmp(filename, "self/", strlen("self/"))) {
8309             filename += strlen("self/");
8310         } else if (*filename >= '1' && *filename <= '9') {
8311             char myself[80];
8312             snprintf(myself, sizeof(myself), "%d/", getpid());
8313             if (!strncmp(filename, myself, strlen(myself))) {
8314                 filename += strlen(myself);
8315             } else {
8316                 return 0;
8317             }
8318         } else {
8319             return 0;
8320         }
8321         if (!strcmp(filename, entry)) {
8322             return 1;
8323         }
8324     }
8325     return 0;
8326 }
8327 
8328 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8329                       const char *fmt, int code)
8330 {
8331     if (logfile) {
8332         CPUState *cs = env_cpu(env);
8333 
8334         fprintf(logfile, fmt, code);
8335         fprintf(logfile, "Failing executable: %s\n", exec_path);
8336         cpu_dump_state(cs, logfile, 0);
8337         open_self_maps(env, fileno(logfile));
8338     }
8339 }
8340 
8341 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8342 {
8343     /* dump to console */
8344     excp_dump_file(stderr, env, fmt, code);
8345 
8346     /* dump to log file */
8347     if (qemu_log_separate()) {
8348         FILE *logfile = qemu_log_trylock();
8349 
8350         excp_dump_file(logfile, env, fmt, code);
8351         qemu_log_unlock(logfile);
8352     }
8353 }
8354 
8355 #include "target_proc.h"
8356 
8357 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8358     defined(HAVE_ARCH_PROC_CPUINFO) || \
8359     defined(HAVE_ARCH_PROC_HARDWARE)
8360 static int is_proc(const char *filename, const char *entry)
8361 {
8362     return strcmp(filename, entry) == 0;
8363 }
8364 #endif
8365 
8366 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8367 static int open_net_route(CPUArchState *cpu_env, int fd)
8368 {
8369     FILE *fp;
8370     char *line = NULL;
8371     size_t len = 0;
8372     ssize_t read;
8373 
8374     fp = fopen("/proc/net/route", "r");
8375     if (fp == NULL) {
8376         return -1;
8377     }
8378 
8379     /* read header */
8380 
8381     read = getline(&line, &len, fp);
8382     dprintf(fd, "%s", line);
8383 
8384     /* read routes */
8385 
8386     while ((read = getline(&line, &len, fp)) != -1) {
8387         char iface[16];
8388         uint32_t dest, gw, mask;
8389         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8390         int fields;
8391 
8392         fields = sscanf(line,
8393                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8394                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8395                         &mask, &mtu, &window, &irtt);
8396         if (fields != 11) {
8397             continue;
8398         }
8399         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8400                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8401                 metric, tswap32(mask), mtu, window, irtt);
8402     }
8403 
8404     free(line);
8405     fclose(fp);
8406 
8407     return 0;
8408 }
8409 #endif
8410 
8411 static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
8412                               const char *fname, int flags, mode_t mode,
8413                               int openat2_resolve, bool safe)
8414 {
8415     g_autofree char *proc_name = NULL;
8416     const char *pathname;
8417     struct fake_open {
8418         const char *filename;
8419         int (*fill)(CPUArchState *cpu_env, int fd);
8420         int (*cmp)(const char *s1, const char *s2);
8421     };
8422     const struct fake_open *fake_open;
8423     static const struct fake_open fakes[] = {
8424         { "maps", open_self_maps, is_proc_myself },
8425         { "smaps", open_self_smaps, is_proc_myself },
8426         { "stat", open_self_stat, is_proc_myself },
8427         { "auxv", open_self_auxv, is_proc_myself },
8428         { "cmdline", open_self_cmdline, is_proc_myself },
8429 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8430         { "/proc/net/route", open_net_route, is_proc },
8431 #endif
8432 #if defined(HAVE_ARCH_PROC_CPUINFO)
8433         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8434 #endif
8435 #if defined(HAVE_ARCH_PROC_HARDWARE)
8436         { "/proc/hardware", open_hardware, is_proc },
8437 #endif
8438         { NULL, NULL, NULL }
8439     };
8440 
8441     /* if this is a file from /proc/ filesystem, expand full name */
8442     proc_name = realpath(fname, NULL);
8443     if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
8444         pathname = proc_name;
8445     } else {
8446         pathname = fname;
8447     }
8448 
8449     if (is_proc_myself(pathname, "exe")) {
8450         /* Honor openat2 resolve flags */
8451         if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
8452             (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
8453             errno = ELOOP;
8454             return -1;
8455         }
8456         if (safe) {
8457             return safe_openat(dirfd, exec_path, flags, mode);
8458         } else {
8459             return openat(dirfd, exec_path, flags, mode);
8460         }
8461     }
8462 
8463     for (fake_open = fakes; fake_open->filename; fake_open++) {
8464         if (fake_open->cmp(pathname, fake_open->filename)) {
8465             break;
8466         }
8467     }
8468 
8469     if (fake_open->filename) {
8470         const char *tmpdir;
8471         char filename[PATH_MAX];
8472         int fd, r;
8473 
8474         fd = memfd_create("qemu-open", 0);
8475         if (fd < 0) {
8476             if (errno != ENOSYS) {
8477                 return fd;
8478             }
8479             /* create temporary file to map stat to */
8480             tmpdir = getenv("TMPDIR");
8481             if (!tmpdir)
8482                 tmpdir = "/tmp";
8483             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8484             fd = mkstemp(filename);
8485             if (fd < 0) {
8486                 return fd;
8487             }
8488             unlink(filename);
8489         }
8490 
8491         if ((r = fake_open->fill(cpu_env, fd))) {
8492             int e = errno;
8493             close(fd);
8494             errno = e;
8495             return r;
8496         }
8497         lseek(fd, 0, SEEK_SET);
8498 
8499         return fd;
8500     }
8501 
8502     return -2;
8503 }
8504 
8505 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8506                     int flags, mode_t mode, bool safe)
8507 {
8508     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
8509     if (fd > -2) {
8510         return fd;
8511     }
8512 
8513     if (safe) {
8514         return safe_openat(dirfd, path(pathname), flags, mode);
8515     } else {
8516         return openat(dirfd, path(pathname), flags, mode);
8517     }
8518 }
8519 
8520 
8521 static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
8522                       abi_ptr guest_pathname, abi_ptr guest_open_how,
8523                       abi_ulong guest_size)
8524 {
8525     struct open_how_ver0 how = {0};
8526     char *pathname;
8527     int ret;
8528 
8529     if (guest_size < sizeof(struct target_open_how_ver0)) {
8530         return -TARGET_EINVAL;
8531     }
8532     ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
8533     if (ret) {
8534         if (ret == -TARGET_E2BIG) {
8535             qemu_log_mask(LOG_UNIMP,
8536                           "Unimplemented openat2 open_how size: "
8537                           TARGET_ABI_FMT_lu "\n", guest_size);
8538         }
8539         return ret;
8540     }
8541     pathname = lock_user_string(guest_pathname);
8542     if (!pathname) {
8543         return -TARGET_EFAULT;
8544     }
8545 
8546     how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
8547     how.mode = tswap64(how.mode);
8548     how.resolve = tswap64(how.resolve);
8549     int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
8550                                 how.resolve, true);
8551     if (fd > -2) {
8552         ret = get_errno(fd);
8553     } else {
8554         ret = get_errno(safe_openat2(dirfd, pathname, &how,
8555                                      sizeof(struct open_how_ver0)));
8556     }
8557 
8558     fd_trans_unregister(ret);
8559     unlock_user(pathname, guest_pathname, 0);
8560     return ret;
8561 }
8562 
8563 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8564 {
8565     ssize_t ret;
8566 
8567     if (!pathname || !buf) {
8568         errno = EFAULT;
8569         return -1;
8570     }
8571 
8572     if (!bufsiz) {
8573         /* Short circuit this for the magic exe check. */
8574         errno = EINVAL;
8575         return -1;
8576     }
8577 
8578     if (is_proc_myself((const char *)pathname, "exe")) {
8579         /*
8580          * Don't worry about sign mismatch as earlier mapping
8581          * logic would have thrown a bad address error.
8582          */
8583         ret = MIN(strlen(exec_path), bufsiz);
8584         /* We cannot NUL terminate the string. */
8585         memcpy(buf, exec_path, ret);
8586     } else {
8587         ret = readlink(path(pathname), buf, bufsiz);
8588     }
8589 
8590     return ret;
8591 }
8592 
8593 static int do_execv(CPUArchState *cpu_env, int dirfd,
8594                     abi_long pathname, abi_long guest_argp,
8595                     abi_long guest_envp, int flags, bool is_execveat)
8596 {
8597     int ret;
8598     char **argp, **envp;
8599     int argc, envc;
8600     abi_ulong gp;
8601     abi_ulong addr;
8602     char **q;
8603     void *p;
8604 
8605     argc = 0;
8606 
8607     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8608         if (get_user_ual(addr, gp)) {
8609             return -TARGET_EFAULT;
8610         }
8611         if (!addr) {
8612             break;
8613         }
8614         argc++;
8615     }
8616     envc = 0;
8617     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8618         if (get_user_ual(addr, gp)) {
8619             return -TARGET_EFAULT;
8620         }
8621         if (!addr) {
8622             break;
8623         }
8624         envc++;
8625     }
8626 
8627     argp = g_new0(char *, argc + 1);
8628     envp = g_new0(char *, envc + 1);
8629 
8630     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8631         if (get_user_ual(addr, gp)) {
8632             goto execve_efault;
8633         }
8634         if (!addr) {
8635             break;
8636         }
8637         *q = lock_user_string(addr);
8638         if (!*q) {
8639             goto execve_efault;
8640         }
8641     }
8642     *q = NULL;
8643 
8644     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8645         if (get_user_ual(addr, gp)) {
8646             goto execve_efault;
8647         }
8648         if (!addr) {
8649             break;
8650         }
8651         *q = lock_user_string(addr);
8652         if (!*q) {
8653             goto execve_efault;
8654         }
8655     }
8656     *q = NULL;
8657 
8658     /*
8659      * Although execve() is not an interruptible syscall it is
8660      * a special case where we must use the safe_syscall wrapper:
8661      * if we allow a signal to happen before we make the host
8662      * syscall then we will 'lose' it, because at the point of
8663      * execve the process leaves QEMU's control. So we use the
8664      * safe syscall wrapper to ensure that we either take the
8665      * signal as a guest signal, or else it does not happen
8666      * before the execve completes and makes it the other
8667      * program's problem.
8668      */
8669     p = lock_user_string(pathname);
8670     if (!p) {
8671         goto execve_efault;
8672     }
8673 
8674     const char *exe = p;
8675     if (is_proc_myself(p, "exe")) {
8676         exe = exec_path;
8677     }
8678     ret = is_execveat
8679         ? safe_execveat(dirfd, exe, argp, envp, flags)
8680         : safe_execve(exe, argp, envp);
8681     ret = get_errno(ret);
8682 
8683     unlock_user(p, pathname, 0);
8684 
8685     goto execve_end;
8686 
8687 execve_efault:
8688     ret = -TARGET_EFAULT;
8689 
8690 execve_end:
8691     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8692         if (get_user_ual(addr, gp) || !addr) {
8693             break;
8694         }
8695         unlock_user(*q, addr, 0);
8696     }
8697     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8698         if (get_user_ual(addr, gp) || !addr) {
8699             break;
8700         }
8701         unlock_user(*q, addr, 0);
8702     }
8703 
8704     g_free(argp);
8705     g_free(envp);
8706     return ret;
8707 }
8708 
8709 #define TIMER_MAGIC 0x0caf0000
8710 #define TIMER_MAGIC_MASK 0xffff0000
8711 
8712 /* Convert QEMU provided timer ID back to internal 16bit index format */
8713 static target_timer_t get_timer_id(abi_long arg)
8714 {
8715     target_timer_t timerid = arg;
8716 
8717     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8718         return -TARGET_EINVAL;
8719     }
8720 
8721     timerid &= 0xffff;
8722 
8723     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8724         return -TARGET_EINVAL;
8725     }
8726 
8727     return timerid;
8728 }
8729 
8730 static int target_to_host_cpu_mask(unsigned long *host_mask,
8731                                    size_t host_size,
8732                                    abi_ulong target_addr,
8733                                    size_t target_size)
8734 {
8735     unsigned target_bits = sizeof(abi_ulong) * 8;
8736     unsigned host_bits = sizeof(*host_mask) * 8;
8737     abi_ulong *target_mask;
8738     unsigned i, j;
8739 
8740     assert(host_size >= target_size);
8741 
8742     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8743     if (!target_mask) {
8744         return -TARGET_EFAULT;
8745     }
8746     memset(host_mask, 0, host_size);
8747 
8748     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8749         unsigned bit = i * target_bits;
8750         abi_ulong val;
8751 
8752         __get_user(val, &target_mask[i]);
8753         for (j = 0; j < target_bits; j++, bit++) {
8754             if (val & (1UL << j)) {
8755                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8756             }
8757         }
8758     }
8759 
8760     unlock_user(target_mask, target_addr, 0);
8761     return 0;
8762 }
8763 
8764 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8765                                    size_t host_size,
8766                                    abi_ulong target_addr,
8767                                    size_t target_size)
8768 {
8769     unsigned target_bits = sizeof(abi_ulong) * 8;
8770     unsigned host_bits = sizeof(*host_mask) * 8;
8771     abi_ulong *target_mask;
8772     unsigned i, j;
8773 
8774     assert(host_size >= target_size);
8775 
8776     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8777     if (!target_mask) {
8778         return -TARGET_EFAULT;
8779     }
8780 
8781     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8782         unsigned bit = i * target_bits;
8783         abi_ulong val = 0;
8784 
8785         for (j = 0; j < target_bits; j++, bit++) {
8786             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8787                 val |= 1UL << j;
8788             }
8789         }
8790         __put_user(val, &target_mask[i]);
8791     }
8792 
8793     unlock_user(target_mask, target_addr, target_size);
8794     return 0;
8795 }
8796 
8797 #ifdef TARGET_NR_getdents
8798 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8799 {
8800     g_autofree void *hdirp = NULL;
8801     void *tdirp;
8802     int hlen, hoff, toff;
8803     int hreclen, treclen;
8804     off_t prev_diroff = 0;
8805 
8806     hdirp = g_try_malloc(count);
8807     if (!hdirp) {
8808         return -TARGET_ENOMEM;
8809     }
8810 
8811 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8812     hlen = sys_getdents(dirfd, hdirp, count);
8813 #else
8814     hlen = sys_getdents64(dirfd, hdirp, count);
8815 #endif
8816 
8817     hlen = get_errno(hlen);
8818     if (is_error(hlen)) {
8819         return hlen;
8820     }
8821 
8822     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8823     if (!tdirp) {
8824         return -TARGET_EFAULT;
8825     }
8826 
8827     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8828 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8829         struct linux_dirent *hde = hdirp + hoff;
8830 #else
8831         struct linux_dirent64 *hde = hdirp + hoff;
8832 #endif
8833         struct target_dirent *tde = tdirp + toff;
8834         int namelen;
8835         uint8_t type;
8836 
8837         namelen = strlen(hde->d_name);
8838         hreclen = hde->d_reclen;
8839         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8840         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8841 
8842         if (toff + treclen > count) {
8843             /*
8844              * If the host struct is smaller than the target struct, or
8845              * requires less alignment and thus packs into less space,
8846              * then the host can return more entries than we can pass
8847              * on to the guest.
8848              */
8849             if (toff == 0) {
8850                 toff = -TARGET_EINVAL; /* result buffer is too small */
8851                 break;
8852             }
8853             /*
8854              * Return what we have, resetting the file pointer to the
8855              * location of the first record not returned.
8856              */
8857             lseek(dirfd, prev_diroff, SEEK_SET);
8858             break;
8859         }
8860 
8861         prev_diroff = hde->d_off;
8862         tde->d_ino = tswapal(hde->d_ino);
8863         tde->d_off = tswapal(hde->d_off);
8864         tde->d_reclen = tswap16(treclen);
8865         memcpy(tde->d_name, hde->d_name, namelen + 1);
8866 
8867         /*
8868          * The getdents type is in what was formerly a padding byte at the
8869          * end of the structure.
8870          */
8871 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8872         type = *((uint8_t *)hde + hreclen - 1);
8873 #else
8874         type = hde->d_type;
8875 #endif
8876         *((uint8_t *)tde + treclen - 1) = type;
8877     }
8878 
8879     unlock_user(tdirp, arg2, toff);
8880     return toff;
8881 }
8882 #endif /* TARGET_NR_getdents */
8883 
8884 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8885 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8886 {
8887     g_autofree void *hdirp = NULL;
8888     void *tdirp;
8889     int hlen, hoff, toff;
8890     int hreclen, treclen;
8891     off_t prev_diroff = 0;
8892 
8893     hdirp = g_try_malloc(count);
8894     if (!hdirp) {
8895         return -TARGET_ENOMEM;
8896     }
8897 
8898     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8899     if (is_error(hlen)) {
8900         return hlen;
8901     }
8902 
8903     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8904     if (!tdirp) {
8905         return -TARGET_EFAULT;
8906     }
8907 
8908     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8909         struct linux_dirent64 *hde = hdirp + hoff;
8910         struct target_dirent64 *tde = tdirp + toff;
8911         int namelen;
8912 
8913         namelen = strlen(hde->d_name) + 1;
8914         hreclen = hde->d_reclen;
8915         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8916         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8917 
8918         if (toff + treclen > count) {
8919             /*
8920              * If the host struct is smaller than the target struct, or
8921              * requires less alignment and thus packs into less space,
8922              * then the host can return more entries than we can pass
8923              * on to the guest.
8924              */
8925             if (toff == 0) {
8926                 toff = -TARGET_EINVAL; /* result buffer is too small */
8927                 break;
8928             }
8929             /*
8930              * Return what we have, resetting the file pointer to the
8931              * location of the first record not returned.
8932              */
8933             lseek(dirfd, prev_diroff, SEEK_SET);
8934             break;
8935         }
8936 
8937         prev_diroff = hde->d_off;
8938         tde->d_ino = tswap64(hde->d_ino);
8939         tde->d_off = tswap64(hde->d_off);
8940         tde->d_reclen = tswap16(treclen);
8941         tde->d_type = hde->d_type;
8942         memcpy(tde->d_name, hde->d_name, namelen);
8943     }
8944 
8945     unlock_user(tdirp, arg2, toff);
8946     return toff;
8947 }
8948 #endif /* TARGET_NR_getdents64 */
8949 
8950 #if defined(TARGET_NR_riscv_hwprobe)
8951 
8952 #define RISCV_HWPROBE_KEY_MVENDORID     0
8953 #define RISCV_HWPROBE_KEY_MARCHID       1
8954 #define RISCV_HWPROBE_KEY_MIMPID        2
8955 
8956 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8957 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8958 
8959 #define RISCV_HWPROBE_KEY_IMA_EXT_0         4
8960 #define     RISCV_HWPROBE_IMA_FD            (1 << 0)
8961 #define     RISCV_HWPROBE_IMA_C             (1 << 1)
8962 #define     RISCV_HWPROBE_IMA_V             (1 << 2)
8963 #define     RISCV_HWPROBE_EXT_ZBA           (1 << 3)
8964 #define     RISCV_HWPROBE_EXT_ZBB           (1 << 4)
8965 #define     RISCV_HWPROBE_EXT_ZBS           (1 << 5)
8966 #define     RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
8967 #define     RISCV_HWPROBE_EXT_ZBC           (1 << 7)
8968 #define     RISCV_HWPROBE_EXT_ZBKB          (1 << 8)
8969 #define     RISCV_HWPROBE_EXT_ZBKC          (1 << 9)
8970 #define     RISCV_HWPROBE_EXT_ZBKX          (1 << 10)
8971 #define     RISCV_HWPROBE_EXT_ZKND          (1 << 11)
8972 #define     RISCV_HWPROBE_EXT_ZKNE          (1 << 12)
8973 #define     RISCV_HWPROBE_EXT_ZKNH          (1 << 13)
8974 #define     RISCV_HWPROBE_EXT_ZKSED         (1 << 14)
8975 #define     RISCV_HWPROBE_EXT_ZKSH          (1 << 15)
8976 #define     RISCV_HWPROBE_EXT_ZKT           (1 << 16)
8977 #define     RISCV_HWPROBE_EXT_ZVBB          (1 << 17)
8978 #define     RISCV_HWPROBE_EXT_ZVBC          (1 << 18)
8979 #define     RISCV_HWPROBE_EXT_ZVKB          (1 << 19)
8980 #define     RISCV_HWPROBE_EXT_ZVKG          (1 << 20)
8981 #define     RISCV_HWPROBE_EXT_ZVKNED        (1 << 21)
8982 #define     RISCV_HWPROBE_EXT_ZVKNHA        (1 << 22)
8983 #define     RISCV_HWPROBE_EXT_ZVKNHB        (1 << 23)
8984 #define     RISCV_HWPROBE_EXT_ZVKSED        (1 << 24)
8985 #define     RISCV_HWPROBE_EXT_ZVKSH         (1 << 25)
8986 #define     RISCV_HWPROBE_EXT_ZVKT          (1 << 26)
8987 #define     RISCV_HWPROBE_EXT_ZFH           (1 << 27)
8988 #define     RISCV_HWPROBE_EXT_ZFHMIN        (1 << 28)
8989 #define     RISCV_HWPROBE_EXT_ZIHINTNTL     (1 << 29)
8990 #define     RISCV_HWPROBE_EXT_ZVFH          (1 << 30)
8991 #define     RISCV_HWPROBE_EXT_ZVFHMIN       (1ULL << 31)
8992 #define     RISCV_HWPROBE_EXT_ZFA           (1ULL << 32)
8993 #define     RISCV_HWPROBE_EXT_ZTSO          (1ULL << 33)
8994 #define     RISCV_HWPROBE_EXT_ZACAS         (1ULL << 34)
8995 #define     RISCV_HWPROBE_EXT_ZICOND        (1ULL << 35)
8996 
8997 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
8998 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
8999 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
9000 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
9001 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
9002 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9003 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9004 
9005 #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
9006 
9007 struct riscv_hwprobe {
9008     abi_llong  key;
9009     abi_ullong value;
9010 };
9011 
9012 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9013                                     struct riscv_hwprobe *pair,
9014                                     size_t pair_count)
9015 {
9016     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9017 
9018     for (; pair_count > 0; pair_count--, pair++) {
9019         abi_llong key;
9020         abi_ullong value;
9021         __put_user(0, &pair->value);
9022         __get_user(key, &pair->key);
9023         switch (key) {
9024         case RISCV_HWPROBE_KEY_MVENDORID:
9025             __put_user(cfg->mvendorid, &pair->value);
9026             break;
9027         case RISCV_HWPROBE_KEY_MARCHID:
9028             __put_user(cfg->marchid, &pair->value);
9029             break;
9030         case RISCV_HWPROBE_KEY_MIMPID:
9031             __put_user(cfg->mimpid, &pair->value);
9032             break;
9033         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9034             value = riscv_has_ext(env, RVI) &&
9035                     riscv_has_ext(env, RVM) &&
9036                     riscv_has_ext(env, RVA) ?
9037                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9038             __put_user(value, &pair->value);
9039             break;
9040         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9041             value = riscv_has_ext(env, RVF) &&
9042                     riscv_has_ext(env, RVD) ?
9043                     RISCV_HWPROBE_IMA_FD : 0;
9044             value |= riscv_has_ext(env, RVC) ?
9045                      RISCV_HWPROBE_IMA_C : 0;
9046             value |= riscv_has_ext(env, RVV) ?
9047                      RISCV_HWPROBE_IMA_V : 0;
9048             value |= cfg->ext_zba ?
9049                      RISCV_HWPROBE_EXT_ZBA : 0;
9050             value |= cfg->ext_zbb ?
9051                      RISCV_HWPROBE_EXT_ZBB : 0;
9052             value |= cfg->ext_zbs ?
9053                      RISCV_HWPROBE_EXT_ZBS : 0;
9054             value |= cfg->ext_zicboz ?
9055                      RISCV_HWPROBE_EXT_ZICBOZ : 0;
9056             value |= cfg->ext_zbc ?
9057                      RISCV_HWPROBE_EXT_ZBC : 0;
9058             value |= cfg->ext_zbkb ?
9059                      RISCV_HWPROBE_EXT_ZBKB : 0;
9060             value |= cfg->ext_zbkc ?
9061                      RISCV_HWPROBE_EXT_ZBKC : 0;
9062             value |= cfg->ext_zbkx ?
9063                      RISCV_HWPROBE_EXT_ZBKX : 0;
9064             value |= cfg->ext_zknd ?
9065                      RISCV_HWPROBE_EXT_ZKND : 0;
9066             value |= cfg->ext_zkne ?
9067                      RISCV_HWPROBE_EXT_ZKNE : 0;
9068             value |= cfg->ext_zknh ?
9069                      RISCV_HWPROBE_EXT_ZKNH : 0;
9070             value |= cfg->ext_zksed ?
9071                      RISCV_HWPROBE_EXT_ZKSED : 0;
9072             value |= cfg->ext_zksh ?
9073                      RISCV_HWPROBE_EXT_ZKSH : 0;
9074             value |= cfg->ext_zkt ?
9075                      RISCV_HWPROBE_EXT_ZKT : 0;
9076             value |= cfg->ext_zvbb ?
9077                      RISCV_HWPROBE_EXT_ZVBB : 0;
9078             value |= cfg->ext_zvbc ?
9079                      RISCV_HWPROBE_EXT_ZVBC : 0;
9080             value |= cfg->ext_zvkb ?
9081                      RISCV_HWPROBE_EXT_ZVKB : 0;
9082             value |= cfg->ext_zvkg ?
9083                      RISCV_HWPROBE_EXT_ZVKG : 0;
9084             value |= cfg->ext_zvkned ?
9085                      RISCV_HWPROBE_EXT_ZVKNED : 0;
9086             value |= cfg->ext_zvknha ?
9087                      RISCV_HWPROBE_EXT_ZVKNHA : 0;
9088             value |= cfg->ext_zvknhb ?
9089                      RISCV_HWPROBE_EXT_ZVKNHB : 0;
9090             value |= cfg->ext_zvksed ?
9091                      RISCV_HWPROBE_EXT_ZVKSED : 0;
9092             value |= cfg->ext_zvksh ?
9093                      RISCV_HWPROBE_EXT_ZVKSH : 0;
9094             value |= cfg->ext_zvkt ?
9095                      RISCV_HWPROBE_EXT_ZVKT : 0;
9096             value |= cfg->ext_zfh ?
9097                      RISCV_HWPROBE_EXT_ZFH : 0;
9098             value |= cfg->ext_zfhmin ?
9099                      RISCV_HWPROBE_EXT_ZFHMIN : 0;
9100             value |= cfg->ext_zihintntl ?
9101                      RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
9102             value |= cfg->ext_zvfh ?
9103                      RISCV_HWPROBE_EXT_ZVFH : 0;
9104             value |= cfg->ext_zvfhmin ?
9105                      RISCV_HWPROBE_EXT_ZVFHMIN : 0;
9106             value |= cfg->ext_zfa ?
9107                      RISCV_HWPROBE_EXT_ZFA : 0;
9108             value |= cfg->ext_ztso ?
9109                      RISCV_HWPROBE_EXT_ZTSO : 0;
9110             value |= cfg->ext_zacas ?
9111                      RISCV_HWPROBE_EXT_ZACAS : 0;
9112             value |= cfg->ext_zicond ?
9113                      RISCV_HWPROBE_EXT_ZICOND : 0;
9114             __put_user(value, &pair->value);
9115             break;
9116         case RISCV_HWPROBE_KEY_CPUPERF_0:
9117             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9118             break;
9119         case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
9120             value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
9121             __put_user(value, &pair->value);
9122             break;
9123         default:
9124             __put_user(-1, &pair->key);
9125             break;
9126         }
9127     }
9128 }
9129 
9130 /*
9131  * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
9132  * If the cpumast_t has no bits set: -EINVAL.
9133  * Otherwise the cpumask_t contains some bit set: 0.
9134  * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
9135  * nor bound the search by cpumask_size().
9136  */
9137 static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
9138 {
9139     unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
9140     int ret = -TARGET_EFAULT;
9141 
9142     if (p) {
9143         ret = -TARGET_EINVAL;
9144         /*
9145          * Since we only care about the empty/non-empty state of the cpumask_t
9146          * not the individual bits, we do not need to repartition the bits
9147          * from target abi_ulong to host unsigned long.
9148          *
9149          * Note that the kernel does not round up cpusetsize to a multiple of
9150          * sizeof(abi_ulong).  After bounding cpusetsize by cpumask_size(),
9151          * it copies exactly cpusetsize bytes into a zeroed buffer.
9152          */
9153         for (abi_ulong i = 0; i < cpusetsize; ++i) {
9154             if (p[i]) {
9155                 ret = 0;
9156                 break;
9157             }
9158         }
9159         unlock_user(p, target_cpus, 0);
9160     }
9161     return ret;
9162 }
9163 
9164 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9165                                  abi_long arg2, abi_long arg3,
9166                                  abi_long arg4, abi_long arg5)
9167 {
9168     int ret;
9169     struct riscv_hwprobe *host_pairs;
9170 
9171     /* flags must be 0 */
9172     if (arg5 != 0) {
9173         return -TARGET_EINVAL;
9174     }
9175 
9176     /* check cpu_set */
9177     if (arg3 != 0) {
9178         ret = nonempty_cpu_set(arg3, arg4);
9179         if (ret != 0) {
9180             return ret;
9181         }
9182     } else if (arg4 != 0) {
9183         return -TARGET_EINVAL;
9184     }
9185 
9186     /* no pairs */
9187     if (arg2 == 0) {
9188         return 0;
9189     }
9190 
9191     host_pairs = lock_user(VERIFY_WRITE, arg1,
9192                            sizeof(*host_pairs) * (size_t)arg2, 0);
9193     if (host_pairs == NULL) {
9194         return -TARGET_EFAULT;
9195     }
9196     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9197     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9198     return 0;
9199 }
9200 #endif /* TARGET_NR_riscv_hwprobe */
9201 
9202 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9203 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9204 #endif
9205 
9206 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9207 #define __NR_sys_open_tree __NR_open_tree
9208 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9209           unsigned int, __flags)
9210 #endif
9211 
9212 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9213 #define __NR_sys_move_mount __NR_move_mount
9214 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9215            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9216 #endif
9217 
9218 /* This is an internal helper for do_syscall so that it is easier
9219  * to have a single return point, so that actions, such as logging
9220  * of syscall results, can be performed.
9221  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9222  */
9223 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9224                             abi_long arg2, abi_long arg3, abi_long arg4,
9225                             abi_long arg5, abi_long arg6, abi_long arg7,
9226                             abi_long arg8)
9227 {
9228     CPUState *cpu = env_cpu(cpu_env);
9229     abi_long ret;
9230 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9231     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9232     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9233     || defined(TARGET_NR_statx)
9234     struct stat st;
9235 #endif
9236 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9237     || defined(TARGET_NR_fstatfs)
9238     struct statfs stfs;
9239 #endif
9240     void *p;
9241 
9242     switch(num) {
9243     case TARGET_NR_exit:
9244         /* In old applications this may be used to implement _exit(2).
9245            However in threaded applications it is used for thread termination,
9246            and _exit_group is used for application termination.
9247            Do thread termination if we have more then one thread.  */
9248 
9249         if (block_signals()) {
9250             return -QEMU_ERESTARTSYS;
9251         }
9252 
9253         pthread_mutex_lock(&clone_lock);
9254 
9255         if (CPU_NEXT(first_cpu)) {
9256             TaskState *ts = get_task_state(cpu);
9257 
9258             if (ts->child_tidptr) {
9259                 put_user_u32(0, ts->child_tidptr);
9260                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9261                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9262             }
9263 
9264             object_unparent(OBJECT(cpu));
9265             object_unref(OBJECT(cpu));
9266             /*
9267              * At this point the CPU should be unrealized and removed
9268              * from cpu lists. We can clean-up the rest of the thread
9269              * data without the lock held.
9270              */
9271 
9272             pthread_mutex_unlock(&clone_lock);
9273 
9274             thread_cpu = NULL;
9275             g_free(ts);
9276             rcu_unregister_thread();
9277             pthread_exit(NULL);
9278         }
9279 
9280         pthread_mutex_unlock(&clone_lock);
9281         preexit_cleanup(cpu_env, arg1);
9282         _exit(arg1);
9283         return 0; /* avoid warning */
9284     case TARGET_NR_read:
9285         if (arg2 == 0 && arg3 == 0) {
9286             return get_errno(safe_read(arg1, 0, 0));
9287         } else {
9288             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9289                 return -TARGET_EFAULT;
9290             ret = get_errno(safe_read(arg1, p, arg3));
9291             if (ret >= 0 &&
9292                 fd_trans_host_to_target_data(arg1)) {
9293                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9294             }
9295             unlock_user(p, arg2, ret);
9296         }
9297         return ret;
9298     case TARGET_NR_write:
9299         if (arg2 == 0 && arg3 == 0) {
9300             return get_errno(safe_write(arg1, 0, 0));
9301         }
9302         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9303             return -TARGET_EFAULT;
9304         if (fd_trans_target_to_host_data(arg1)) {
9305             void *copy = g_malloc(arg3);
9306             memcpy(copy, p, arg3);
9307             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9308             if (ret >= 0) {
9309                 ret = get_errno(safe_write(arg1, copy, ret));
9310             }
9311             g_free(copy);
9312         } else {
9313             ret = get_errno(safe_write(arg1, p, arg3));
9314         }
9315         unlock_user(p, arg2, 0);
9316         return ret;
9317 
9318 #ifdef TARGET_NR_open
9319     case TARGET_NR_open:
9320         if (!(p = lock_user_string(arg1)))
9321             return -TARGET_EFAULT;
9322         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9323                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9324                                   arg3, true));
9325         fd_trans_unregister(ret);
9326         unlock_user(p, arg1, 0);
9327         return ret;
9328 #endif
9329     case TARGET_NR_openat:
9330         if (!(p = lock_user_string(arg2)))
9331             return -TARGET_EFAULT;
9332         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9333                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9334                                   arg4, true));
9335         fd_trans_unregister(ret);
9336         unlock_user(p, arg2, 0);
9337         return ret;
9338     case TARGET_NR_openat2:
9339         ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
9340         return ret;
9341 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9342     case TARGET_NR_name_to_handle_at:
9343         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9344         return ret;
9345 #endif
9346 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9347     case TARGET_NR_open_by_handle_at:
9348         ret = do_open_by_handle_at(arg1, arg2, arg3);
9349         fd_trans_unregister(ret);
9350         return ret;
9351 #endif
9352 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9353     case TARGET_NR_pidfd_open:
9354         return get_errno(pidfd_open(arg1, arg2));
9355 #endif
9356 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9357     case TARGET_NR_pidfd_send_signal:
9358         {
9359             siginfo_t uinfo, *puinfo;
9360 
9361             if (arg3) {
9362                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9363                 if (!p) {
9364                     return -TARGET_EFAULT;
9365                  }
9366                  target_to_host_siginfo(&uinfo, p);
9367                  unlock_user(p, arg3, 0);
9368                  puinfo = &uinfo;
9369             } else {
9370                  puinfo = NULL;
9371             }
9372             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9373                                               puinfo, arg4));
9374         }
9375         return ret;
9376 #endif
9377 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9378     case TARGET_NR_pidfd_getfd:
9379         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9380 #endif
9381     case TARGET_NR_close:
9382         fd_trans_unregister(arg1);
9383         return get_errno(close(arg1));
9384 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9385     case TARGET_NR_close_range:
9386         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9387         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9388             abi_long fd, maxfd;
9389             maxfd = MIN(arg2, target_fd_max);
9390             for (fd = arg1; fd < maxfd; fd++) {
9391                 fd_trans_unregister(fd);
9392             }
9393         }
9394         return ret;
9395 #endif
9396 
9397     case TARGET_NR_brk:
9398         return do_brk(arg1);
9399 #ifdef TARGET_NR_fork
9400     case TARGET_NR_fork:
9401         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9402 #endif
9403 #ifdef TARGET_NR_waitpid
9404     case TARGET_NR_waitpid:
9405         {
9406             int status;
9407             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9408             if (!is_error(ret) && arg2 && ret
9409                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9410                 return -TARGET_EFAULT;
9411         }
9412         return ret;
9413 #endif
9414 #ifdef TARGET_NR_waitid
9415     case TARGET_NR_waitid:
9416         {
9417             struct rusage ru;
9418             siginfo_t info;
9419 
9420             ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
9421                                         arg4, (arg5 ? &ru : NULL)));
9422             if (!is_error(ret)) {
9423                 if (arg3) {
9424                     p = lock_user(VERIFY_WRITE, arg3,
9425                                   sizeof(target_siginfo_t), 0);
9426                     if (!p) {
9427                         return -TARGET_EFAULT;
9428                     }
9429                     host_to_target_siginfo(p, &info);
9430                     unlock_user(p, arg3, sizeof(target_siginfo_t));
9431                 }
9432                 if (arg5 && host_to_target_rusage(arg5, &ru)) {
9433                     return -TARGET_EFAULT;
9434                 }
9435             }
9436         }
9437         return ret;
9438 #endif
9439 #ifdef TARGET_NR_creat /* not on alpha */
9440     case TARGET_NR_creat:
9441         if (!(p = lock_user_string(arg1)))
9442             return -TARGET_EFAULT;
9443         ret = get_errno(creat(p, arg2));
9444         fd_trans_unregister(ret);
9445         unlock_user(p, arg1, 0);
9446         return ret;
9447 #endif
9448 #ifdef TARGET_NR_link
9449     case TARGET_NR_link:
9450         {
9451             void * p2;
9452             p = lock_user_string(arg1);
9453             p2 = lock_user_string(arg2);
9454             if (!p || !p2)
9455                 ret = -TARGET_EFAULT;
9456             else
9457                 ret = get_errno(link(p, p2));
9458             unlock_user(p2, arg2, 0);
9459             unlock_user(p, arg1, 0);
9460         }
9461         return ret;
9462 #endif
9463 #if defined(TARGET_NR_linkat)
9464     case TARGET_NR_linkat:
9465         {
9466             void * p2 = NULL;
9467             if (!arg2 || !arg4)
9468                 return -TARGET_EFAULT;
9469             p  = lock_user_string(arg2);
9470             p2 = lock_user_string(arg4);
9471             if (!p || !p2)
9472                 ret = -TARGET_EFAULT;
9473             else
9474                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9475             unlock_user(p, arg2, 0);
9476             unlock_user(p2, arg4, 0);
9477         }
9478         return ret;
9479 #endif
9480 #ifdef TARGET_NR_unlink
9481     case TARGET_NR_unlink:
9482         if (!(p = lock_user_string(arg1)))
9483             return -TARGET_EFAULT;
9484         ret = get_errno(unlink(p));
9485         unlock_user(p, arg1, 0);
9486         return ret;
9487 #endif
9488 #if defined(TARGET_NR_unlinkat)
9489     case TARGET_NR_unlinkat:
9490         if (!(p = lock_user_string(arg2)))
9491             return -TARGET_EFAULT;
9492         ret = get_errno(unlinkat(arg1, p, arg3));
9493         unlock_user(p, arg2, 0);
9494         return ret;
9495 #endif
9496     case TARGET_NR_execveat:
9497         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9498     case TARGET_NR_execve:
9499         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9500     case TARGET_NR_chdir:
9501         if (!(p = lock_user_string(arg1)))
9502             return -TARGET_EFAULT;
9503         ret = get_errno(chdir(p));
9504         unlock_user(p, arg1, 0);
9505         return ret;
9506 #ifdef TARGET_NR_time
9507     case TARGET_NR_time:
9508         {
9509             time_t host_time;
9510             ret = get_errno(time(&host_time));
9511             if (!is_error(ret)
9512                 && arg1
9513                 && put_user_sal(host_time, arg1))
9514                 return -TARGET_EFAULT;
9515         }
9516         return ret;
9517 #endif
9518 #ifdef TARGET_NR_mknod
9519     case TARGET_NR_mknod:
9520         if (!(p = lock_user_string(arg1)))
9521             return -TARGET_EFAULT;
9522         ret = get_errno(mknod(p, arg2, arg3));
9523         unlock_user(p, arg1, 0);
9524         return ret;
9525 #endif
9526 #if defined(TARGET_NR_mknodat)
9527     case TARGET_NR_mknodat:
9528         if (!(p = lock_user_string(arg2)))
9529             return -TARGET_EFAULT;
9530         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9531         unlock_user(p, arg2, 0);
9532         return ret;
9533 #endif
9534 #ifdef TARGET_NR_chmod
9535     case TARGET_NR_chmod:
9536         if (!(p = lock_user_string(arg1)))
9537             return -TARGET_EFAULT;
9538         ret = get_errno(chmod(p, arg2));
9539         unlock_user(p, arg1, 0);
9540         return ret;
9541 #endif
9542 #ifdef TARGET_NR_lseek
9543     case TARGET_NR_lseek:
9544         return get_errno(lseek(arg1, arg2, arg3));
9545 #endif
9546 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9547     /* Alpha specific */
9548     case TARGET_NR_getxpid:
9549         cpu_env->ir[IR_A4] = getppid();
9550         return get_errno(getpid());
9551 #endif
9552 #ifdef TARGET_NR_getpid
9553     case TARGET_NR_getpid:
9554         return get_errno(getpid());
9555 #endif
9556     case TARGET_NR_mount:
9557         {
9558             /* need to look at the data field */
9559             void *p2, *p3;
9560 
9561             if (arg1) {
9562                 p = lock_user_string(arg1);
9563                 if (!p) {
9564                     return -TARGET_EFAULT;
9565                 }
9566             } else {
9567                 p = NULL;
9568             }
9569 
9570             p2 = lock_user_string(arg2);
9571             if (!p2) {
9572                 if (arg1) {
9573                     unlock_user(p, arg1, 0);
9574                 }
9575                 return -TARGET_EFAULT;
9576             }
9577 
9578             if (arg3) {
9579                 p3 = lock_user_string(arg3);
9580                 if (!p3) {
9581                     if (arg1) {
9582                         unlock_user(p, arg1, 0);
9583                     }
9584                     unlock_user(p2, arg2, 0);
9585                     return -TARGET_EFAULT;
9586                 }
9587             } else {
9588                 p3 = NULL;
9589             }
9590 
9591             /* FIXME - arg5 should be locked, but it isn't clear how to
9592              * do that since it's not guaranteed to be a NULL-terminated
9593              * string.
9594              */
9595             if (!arg5) {
9596                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9597             } else {
9598                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9599             }
9600             ret = get_errno(ret);
9601 
9602             if (arg1) {
9603                 unlock_user(p, arg1, 0);
9604             }
9605             unlock_user(p2, arg2, 0);
9606             if (arg3) {
9607                 unlock_user(p3, arg3, 0);
9608             }
9609         }
9610         return ret;
9611 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9612 #if defined(TARGET_NR_umount)
9613     case TARGET_NR_umount:
9614 #endif
9615 #if defined(TARGET_NR_oldumount)
9616     case TARGET_NR_oldumount:
9617 #endif
9618         if (!(p = lock_user_string(arg1)))
9619             return -TARGET_EFAULT;
9620         ret = get_errno(umount(p));
9621         unlock_user(p, arg1, 0);
9622         return ret;
9623 #endif
9624 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9625     case TARGET_NR_move_mount:
9626         {
9627             void *p2, *p4;
9628 
9629             if (!arg2 || !arg4) {
9630                 return -TARGET_EFAULT;
9631             }
9632 
9633             p2 = lock_user_string(arg2);
9634             if (!p2) {
9635                 return -TARGET_EFAULT;
9636             }
9637 
9638             p4 = lock_user_string(arg4);
9639             if (!p4) {
9640                 unlock_user(p2, arg2, 0);
9641                 return -TARGET_EFAULT;
9642             }
9643             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9644 
9645             unlock_user(p2, arg2, 0);
9646             unlock_user(p4, arg4, 0);
9647 
9648             return ret;
9649         }
9650 #endif
9651 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9652     case TARGET_NR_open_tree:
9653         {
9654             void *p2;
9655             int host_flags;
9656 
9657             if (!arg2) {
9658                 return -TARGET_EFAULT;
9659             }
9660 
9661             p2 = lock_user_string(arg2);
9662             if (!p2) {
9663                 return -TARGET_EFAULT;
9664             }
9665 
9666             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9667             if (arg3 & TARGET_O_CLOEXEC) {
9668                 host_flags |= O_CLOEXEC;
9669             }
9670 
9671             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9672 
9673             unlock_user(p2, arg2, 0);
9674 
9675             return ret;
9676         }
9677 #endif
9678 #ifdef TARGET_NR_stime /* not on alpha */
9679     case TARGET_NR_stime:
9680         {
9681             struct timespec ts;
9682             ts.tv_nsec = 0;
9683             if (get_user_sal(ts.tv_sec, arg1)) {
9684                 return -TARGET_EFAULT;
9685             }
9686             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9687         }
9688 #endif
9689 #ifdef TARGET_NR_alarm /* not on alpha */
9690     case TARGET_NR_alarm:
9691         return alarm(arg1);
9692 #endif
9693 #ifdef TARGET_NR_pause /* not on alpha */
9694     case TARGET_NR_pause:
9695         if (!block_signals()) {
9696             sigsuspend(&get_task_state(cpu)->signal_mask);
9697         }
9698         return -TARGET_EINTR;
9699 #endif
9700 #ifdef TARGET_NR_utime
9701     case TARGET_NR_utime:
9702         {
9703             struct utimbuf tbuf, *host_tbuf;
9704             struct target_utimbuf *target_tbuf;
9705             if (arg2) {
9706                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9707                     return -TARGET_EFAULT;
9708                 tbuf.actime = tswapal(target_tbuf->actime);
9709                 tbuf.modtime = tswapal(target_tbuf->modtime);
9710                 unlock_user_struct(target_tbuf, arg2, 0);
9711                 host_tbuf = &tbuf;
9712             } else {
9713                 host_tbuf = NULL;
9714             }
9715             if (!(p = lock_user_string(arg1)))
9716                 return -TARGET_EFAULT;
9717             ret = get_errno(utime(p, host_tbuf));
9718             unlock_user(p, arg1, 0);
9719         }
9720         return ret;
9721 #endif
9722 #ifdef TARGET_NR_utimes
9723     case TARGET_NR_utimes:
9724         {
9725             struct timeval *tvp, tv[2];
9726             if (arg2) {
9727                 if (copy_from_user_timeval(&tv[0], arg2)
9728                     || copy_from_user_timeval(&tv[1],
9729                                               arg2 + sizeof(struct target_timeval)))
9730                     return -TARGET_EFAULT;
9731                 tvp = tv;
9732             } else {
9733                 tvp = NULL;
9734             }
9735             if (!(p = lock_user_string(arg1)))
9736                 return -TARGET_EFAULT;
9737             ret = get_errno(utimes(p, tvp));
9738             unlock_user(p, arg1, 0);
9739         }
9740         return ret;
9741 #endif
9742 #if defined(TARGET_NR_futimesat)
9743     case TARGET_NR_futimesat:
9744         {
9745             struct timeval *tvp, tv[2];
9746             if (arg3) {
9747                 if (copy_from_user_timeval(&tv[0], arg3)
9748                     || copy_from_user_timeval(&tv[1],
9749                                               arg3 + sizeof(struct target_timeval)))
9750                     return -TARGET_EFAULT;
9751                 tvp = tv;
9752             } else {
9753                 tvp = NULL;
9754             }
9755             if (!(p = lock_user_string(arg2))) {
9756                 return -TARGET_EFAULT;
9757             }
9758             ret = get_errno(futimesat(arg1, path(p), tvp));
9759             unlock_user(p, arg2, 0);
9760         }
9761         return ret;
9762 #endif
9763 #ifdef TARGET_NR_access
9764     case TARGET_NR_access:
9765         if (!(p = lock_user_string(arg1))) {
9766             return -TARGET_EFAULT;
9767         }
9768         ret = get_errno(access(path(p), arg2));
9769         unlock_user(p, arg1, 0);
9770         return ret;
9771 #endif
9772 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9773     case TARGET_NR_faccessat:
9774         if (!(p = lock_user_string(arg2))) {
9775             return -TARGET_EFAULT;
9776         }
9777         ret = get_errno(faccessat(arg1, p, arg3, 0));
9778         unlock_user(p, arg2, 0);
9779         return ret;
9780 #endif
9781 #if defined(TARGET_NR_faccessat2)
9782     case TARGET_NR_faccessat2:
9783         if (!(p = lock_user_string(arg2))) {
9784             return -TARGET_EFAULT;
9785         }
9786         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9787         unlock_user(p, arg2, 0);
9788         return ret;
9789 #endif
9790 #ifdef TARGET_NR_nice /* not on alpha */
9791     case TARGET_NR_nice:
9792         return get_errno(nice(arg1));
9793 #endif
9794     case TARGET_NR_sync:
9795         sync();
9796         return 0;
9797 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9798     case TARGET_NR_syncfs:
9799         return get_errno(syncfs(arg1));
9800 #endif
9801     case TARGET_NR_kill:
9802         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9803 #ifdef TARGET_NR_rename
9804     case TARGET_NR_rename:
9805         {
9806             void *p2;
9807             p = lock_user_string(arg1);
9808             p2 = lock_user_string(arg2);
9809             if (!p || !p2)
9810                 ret = -TARGET_EFAULT;
9811             else
9812                 ret = get_errno(rename(p, p2));
9813             unlock_user(p2, arg2, 0);
9814             unlock_user(p, arg1, 0);
9815         }
9816         return ret;
9817 #endif
9818 #if defined(TARGET_NR_renameat)
9819     case TARGET_NR_renameat:
9820         {
9821             void *p2;
9822             p  = lock_user_string(arg2);
9823             p2 = lock_user_string(arg4);
9824             if (!p || !p2)
9825                 ret = -TARGET_EFAULT;
9826             else
9827                 ret = get_errno(renameat(arg1, p, arg3, p2));
9828             unlock_user(p2, arg4, 0);
9829             unlock_user(p, arg2, 0);
9830         }
9831         return ret;
9832 #endif
9833 #if defined(TARGET_NR_renameat2)
9834     case TARGET_NR_renameat2:
9835         {
9836             void *p2;
9837             p  = lock_user_string(arg2);
9838             p2 = lock_user_string(arg4);
9839             if (!p || !p2) {
9840                 ret = -TARGET_EFAULT;
9841             } else {
9842                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9843             }
9844             unlock_user(p2, arg4, 0);
9845             unlock_user(p, arg2, 0);
9846         }
9847         return ret;
9848 #endif
9849 #ifdef TARGET_NR_mkdir
9850     case TARGET_NR_mkdir:
9851         if (!(p = lock_user_string(arg1)))
9852             return -TARGET_EFAULT;
9853         ret = get_errno(mkdir(p, arg2));
9854         unlock_user(p, arg1, 0);
9855         return ret;
9856 #endif
9857 #if defined(TARGET_NR_mkdirat)
9858     case TARGET_NR_mkdirat:
9859         if (!(p = lock_user_string(arg2)))
9860             return -TARGET_EFAULT;
9861         ret = get_errno(mkdirat(arg1, p, arg3));
9862         unlock_user(p, arg2, 0);
9863         return ret;
9864 #endif
9865 #ifdef TARGET_NR_rmdir
9866     case TARGET_NR_rmdir:
9867         if (!(p = lock_user_string(arg1)))
9868             return -TARGET_EFAULT;
9869         ret = get_errno(rmdir(p));
9870         unlock_user(p, arg1, 0);
9871         return ret;
9872 #endif
9873     case TARGET_NR_dup:
9874         ret = get_errno(dup(arg1));
9875         if (ret >= 0) {
9876             fd_trans_dup(arg1, ret);
9877         }
9878         return ret;
9879 #ifdef TARGET_NR_pipe
9880     case TARGET_NR_pipe:
9881         return do_pipe(cpu_env, arg1, 0, 0);
9882 #endif
9883 #ifdef TARGET_NR_pipe2
9884     case TARGET_NR_pipe2:
9885         return do_pipe(cpu_env, arg1,
9886                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9887 #endif
9888     case TARGET_NR_times:
9889         {
9890             struct target_tms *tmsp;
9891             struct tms tms;
9892             ret = get_errno(times(&tms));
9893             if (arg1) {
9894                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9895                 if (!tmsp)
9896                     return -TARGET_EFAULT;
9897                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9898                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9899                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9900                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9901             }
9902             if (!is_error(ret))
9903                 ret = host_to_target_clock_t(ret);
9904         }
9905         return ret;
9906     case TARGET_NR_acct:
9907         if (arg1 == 0) {
9908             ret = get_errno(acct(NULL));
9909         } else {
9910             if (!(p = lock_user_string(arg1))) {
9911                 return -TARGET_EFAULT;
9912             }
9913             ret = get_errno(acct(path(p)));
9914             unlock_user(p, arg1, 0);
9915         }
9916         return ret;
9917 #ifdef TARGET_NR_umount2
9918     case TARGET_NR_umount2:
9919         if (!(p = lock_user_string(arg1)))
9920             return -TARGET_EFAULT;
9921         ret = get_errno(umount2(p, arg2));
9922         unlock_user(p, arg1, 0);
9923         return ret;
9924 #endif
9925     case TARGET_NR_ioctl:
9926         return do_ioctl(arg1, arg2, arg3);
9927 #ifdef TARGET_NR_fcntl
9928     case TARGET_NR_fcntl:
9929         return do_fcntl(arg1, arg2, arg3);
9930 #endif
9931     case TARGET_NR_setpgid:
9932         return get_errno(setpgid(arg1, arg2));
9933     case TARGET_NR_umask:
9934         return get_errno(umask(arg1));
9935     case TARGET_NR_chroot:
9936         if (!(p = lock_user_string(arg1)))
9937             return -TARGET_EFAULT;
9938         ret = get_errno(chroot(p));
9939         unlock_user(p, arg1, 0);
9940         return ret;
9941 #ifdef TARGET_NR_dup2
9942     case TARGET_NR_dup2:
9943         ret = get_errno(dup2(arg1, arg2));
9944         if (ret >= 0) {
9945             fd_trans_dup(arg1, arg2);
9946         }
9947         return ret;
9948 #endif
9949 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9950     case TARGET_NR_dup3:
9951     {
9952         int host_flags;
9953 
9954         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9955             return -EINVAL;
9956         }
9957         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9958         ret = get_errno(dup3(arg1, arg2, host_flags));
9959         if (ret >= 0) {
9960             fd_trans_dup(arg1, arg2);
9961         }
9962         return ret;
9963     }
9964 #endif
9965 #ifdef TARGET_NR_getppid /* not on alpha */
9966     case TARGET_NR_getppid:
9967         return get_errno(getppid());
9968 #endif
9969 #ifdef TARGET_NR_getpgrp
9970     case TARGET_NR_getpgrp:
9971         return get_errno(getpgrp());
9972 #endif
9973     case TARGET_NR_setsid:
9974         return get_errno(setsid());
9975 #ifdef TARGET_NR_sigaction
9976     case TARGET_NR_sigaction:
9977         {
9978 #if defined(TARGET_MIPS)
9979 	    struct target_sigaction act, oact, *pact, *old_act;
9980 
9981 	    if (arg2) {
9982                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9983                     return -TARGET_EFAULT;
9984 		act._sa_handler = old_act->_sa_handler;
9985 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9986 		act.sa_flags = old_act->sa_flags;
9987 		unlock_user_struct(old_act, arg2, 0);
9988 		pact = &act;
9989 	    } else {
9990 		pact = NULL;
9991 	    }
9992 
9993         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9994 
9995 	    if (!is_error(ret) && arg3) {
9996                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9997                     return -TARGET_EFAULT;
9998 		old_act->_sa_handler = oact._sa_handler;
9999 		old_act->sa_flags = oact.sa_flags;
10000 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
10001 		old_act->sa_mask.sig[1] = 0;
10002 		old_act->sa_mask.sig[2] = 0;
10003 		old_act->sa_mask.sig[3] = 0;
10004 		unlock_user_struct(old_act, arg3, 1);
10005 	    }
10006 #else
10007             struct target_old_sigaction *old_act;
10008             struct target_sigaction act, oact, *pact;
10009             if (arg2) {
10010                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
10011                     return -TARGET_EFAULT;
10012                 act._sa_handler = old_act->_sa_handler;
10013                 target_siginitset(&act.sa_mask, old_act->sa_mask);
10014                 act.sa_flags = old_act->sa_flags;
10015 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10016                 act.sa_restorer = old_act->sa_restorer;
10017 #endif
10018                 unlock_user_struct(old_act, arg2, 0);
10019                 pact = &act;
10020             } else {
10021                 pact = NULL;
10022             }
10023             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
10024             if (!is_error(ret) && arg3) {
10025                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
10026                     return -TARGET_EFAULT;
10027                 old_act->_sa_handler = oact._sa_handler;
10028                 old_act->sa_mask = oact.sa_mask.sig[0];
10029                 old_act->sa_flags = oact.sa_flags;
10030 #ifdef TARGET_ARCH_HAS_SA_RESTORER
10031                 old_act->sa_restorer = oact.sa_restorer;
10032 #endif
10033                 unlock_user_struct(old_act, arg3, 1);
10034             }
10035 #endif
10036         }
10037         return ret;
10038 #endif
10039     case TARGET_NR_rt_sigaction:
10040         {
10041             /*
10042              * For Alpha and SPARC this is a 5 argument syscall, with
10043              * a 'restorer' parameter which must be copied into the
10044              * sa_restorer field of the sigaction struct.
10045              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10046              * and arg5 is the sigsetsize.
10047              */
10048 #if defined(TARGET_ALPHA)
10049             target_ulong sigsetsize = arg4;
10050             target_ulong restorer = arg5;
10051 #elif defined(TARGET_SPARC)
10052             target_ulong restorer = arg4;
10053             target_ulong sigsetsize = arg5;
10054 #else
10055             target_ulong sigsetsize = arg4;
10056             target_ulong restorer = 0;
10057 #endif
10058             struct target_sigaction *act = NULL;
10059             struct target_sigaction *oact = NULL;
10060 
10061             if (sigsetsize != sizeof(target_sigset_t)) {
10062                 return -TARGET_EINVAL;
10063             }
10064             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10065                 return -TARGET_EFAULT;
10066             }
10067             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10068                 ret = -TARGET_EFAULT;
10069             } else {
10070                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10071                 if (oact) {
10072                     unlock_user_struct(oact, arg3, 1);
10073                 }
10074             }
10075             if (act) {
10076                 unlock_user_struct(act, arg2, 0);
10077             }
10078         }
10079         return ret;
10080 #ifdef TARGET_NR_sgetmask /* not on alpha */
10081     case TARGET_NR_sgetmask:
10082         {
10083             sigset_t cur_set;
10084             abi_ulong target_set;
10085             ret = do_sigprocmask(0, NULL, &cur_set);
10086             if (!ret) {
10087                 host_to_target_old_sigset(&target_set, &cur_set);
10088                 ret = target_set;
10089             }
10090         }
10091         return ret;
10092 #endif
10093 #ifdef TARGET_NR_ssetmask /* not on alpha */
10094     case TARGET_NR_ssetmask:
10095         {
10096             sigset_t set, oset;
10097             abi_ulong target_set = arg1;
10098             target_to_host_old_sigset(&set, &target_set);
10099             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10100             if (!ret) {
10101                 host_to_target_old_sigset(&target_set, &oset);
10102                 ret = target_set;
10103             }
10104         }
10105         return ret;
10106 #endif
10107 #ifdef TARGET_NR_sigprocmask
10108     case TARGET_NR_sigprocmask:
10109         {
10110 #if defined(TARGET_ALPHA)
10111             sigset_t set, oldset;
10112             abi_ulong mask;
10113             int how;
10114 
10115             switch (arg1) {
10116             case TARGET_SIG_BLOCK:
10117                 how = SIG_BLOCK;
10118                 break;
10119             case TARGET_SIG_UNBLOCK:
10120                 how = SIG_UNBLOCK;
10121                 break;
10122             case TARGET_SIG_SETMASK:
10123                 how = SIG_SETMASK;
10124                 break;
10125             default:
10126                 return -TARGET_EINVAL;
10127             }
10128             mask = arg2;
10129             target_to_host_old_sigset(&set, &mask);
10130 
10131             ret = do_sigprocmask(how, &set, &oldset);
10132             if (!is_error(ret)) {
10133                 host_to_target_old_sigset(&mask, &oldset);
10134                 ret = mask;
10135                 cpu_env->ir[IR_V0] = 0; /* force no error */
10136             }
10137 #else
10138             sigset_t set, oldset, *set_ptr;
10139             int how;
10140 
10141             if (arg2) {
10142                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10143                 if (!p) {
10144                     return -TARGET_EFAULT;
10145                 }
10146                 target_to_host_old_sigset(&set, p);
10147                 unlock_user(p, arg2, 0);
10148                 set_ptr = &set;
10149                 switch (arg1) {
10150                 case TARGET_SIG_BLOCK:
10151                     how = SIG_BLOCK;
10152                     break;
10153                 case TARGET_SIG_UNBLOCK:
10154                     how = SIG_UNBLOCK;
10155                     break;
10156                 case TARGET_SIG_SETMASK:
10157                     how = SIG_SETMASK;
10158                     break;
10159                 default:
10160                     return -TARGET_EINVAL;
10161                 }
10162             } else {
10163                 how = 0;
10164                 set_ptr = NULL;
10165             }
10166             ret = do_sigprocmask(how, set_ptr, &oldset);
10167             if (!is_error(ret) && arg3) {
10168                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10169                     return -TARGET_EFAULT;
10170                 host_to_target_old_sigset(p, &oldset);
10171                 unlock_user(p, arg3, sizeof(target_sigset_t));
10172             }
10173 #endif
10174         }
10175         return ret;
10176 #endif
10177     case TARGET_NR_rt_sigprocmask:
10178         {
10179             int how = arg1;
10180             sigset_t set, oldset, *set_ptr;
10181 
10182             if (arg4 != sizeof(target_sigset_t)) {
10183                 return -TARGET_EINVAL;
10184             }
10185 
10186             if (arg2) {
10187                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10188                 if (!p) {
10189                     return -TARGET_EFAULT;
10190                 }
10191                 target_to_host_sigset(&set, p);
10192                 unlock_user(p, arg2, 0);
10193                 set_ptr = &set;
10194                 switch(how) {
10195                 case TARGET_SIG_BLOCK:
10196                     how = SIG_BLOCK;
10197                     break;
10198                 case TARGET_SIG_UNBLOCK:
10199                     how = SIG_UNBLOCK;
10200                     break;
10201                 case TARGET_SIG_SETMASK:
10202                     how = SIG_SETMASK;
10203                     break;
10204                 default:
10205                     return -TARGET_EINVAL;
10206                 }
10207             } else {
10208                 how = 0;
10209                 set_ptr = NULL;
10210             }
10211             ret = do_sigprocmask(how, set_ptr, &oldset);
10212             if (!is_error(ret) && arg3) {
10213                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10214                     return -TARGET_EFAULT;
10215                 host_to_target_sigset(p, &oldset);
10216                 unlock_user(p, arg3, sizeof(target_sigset_t));
10217             }
10218         }
10219         return ret;
10220 #ifdef TARGET_NR_sigpending
10221     case TARGET_NR_sigpending:
10222         {
10223             sigset_t set;
10224             ret = get_errno(sigpending(&set));
10225             if (!is_error(ret)) {
10226                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10227                     return -TARGET_EFAULT;
10228                 host_to_target_old_sigset(p, &set);
10229                 unlock_user(p, arg1, sizeof(target_sigset_t));
10230             }
10231         }
10232         return ret;
10233 #endif
10234     case TARGET_NR_rt_sigpending:
10235         {
10236             sigset_t set;
10237 
10238             /* Yes, this check is >, not != like most. We follow the kernel's
10239              * logic and it does it like this because it implements
10240              * NR_sigpending through the same code path, and in that case
10241              * the old_sigset_t is smaller in size.
10242              */
10243             if (arg2 > sizeof(target_sigset_t)) {
10244                 return -TARGET_EINVAL;
10245             }
10246 
10247             ret = get_errno(sigpending(&set));
10248             if (!is_error(ret)) {
10249                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10250                     return -TARGET_EFAULT;
10251                 host_to_target_sigset(p, &set);
10252                 unlock_user(p, arg1, sizeof(target_sigset_t));
10253             }
10254         }
10255         return ret;
10256 #ifdef TARGET_NR_sigsuspend
10257     case TARGET_NR_sigsuspend:
10258         {
10259             sigset_t *set;
10260 
10261 #if defined(TARGET_ALPHA)
10262             TaskState *ts = get_task_state(cpu);
10263             /* target_to_host_old_sigset will bswap back */
10264             abi_ulong mask = tswapal(arg1);
10265             set = &ts->sigsuspend_mask;
10266             target_to_host_old_sigset(set, &mask);
10267 #else
10268             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10269             if (ret != 0) {
10270                 return ret;
10271             }
10272 #endif
10273             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10274             finish_sigsuspend_mask(ret);
10275         }
10276         return ret;
10277 #endif
10278     case TARGET_NR_rt_sigsuspend:
10279         {
10280             sigset_t *set;
10281 
10282             ret = process_sigsuspend_mask(&set, arg1, arg2);
10283             if (ret != 0) {
10284                 return ret;
10285             }
10286             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10287             finish_sigsuspend_mask(ret);
10288         }
10289         return ret;
10290 #ifdef TARGET_NR_rt_sigtimedwait
10291     case TARGET_NR_rt_sigtimedwait:
10292         {
10293             sigset_t set;
10294             struct timespec uts, *puts;
10295             siginfo_t uinfo;
10296 
10297             if (arg4 != sizeof(target_sigset_t)) {
10298                 return -TARGET_EINVAL;
10299             }
10300 
10301             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10302                 return -TARGET_EFAULT;
10303             target_to_host_sigset(&set, p);
10304             unlock_user(p, arg1, 0);
10305             if (arg3) {
10306                 puts = &uts;
10307                 if (target_to_host_timespec(puts, arg3)) {
10308                     return -TARGET_EFAULT;
10309                 }
10310             } else {
10311                 puts = NULL;
10312             }
10313             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10314                                                  SIGSET_T_SIZE));
10315             if (!is_error(ret)) {
10316                 if (arg2) {
10317                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10318                                   0);
10319                     if (!p) {
10320                         return -TARGET_EFAULT;
10321                     }
10322                     host_to_target_siginfo(p, &uinfo);
10323                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10324                 }
10325                 ret = host_to_target_signal(ret);
10326             }
10327         }
10328         return ret;
10329 #endif
10330 #ifdef TARGET_NR_rt_sigtimedwait_time64
10331     case TARGET_NR_rt_sigtimedwait_time64:
10332         {
10333             sigset_t set;
10334             struct timespec uts, *puts;
10335             siginfo_t uinfo;
10336 
10337             if (arg4 != sizeof(target_sigset_t)) {
10338                 return -TARGET_EINVAL;
10339             }
10340 
10341             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10342             if (!p) {
10343                 return -TARGET_EFAULT;
10344             }
10345             target_to_host_sigset(&set, p);
10346             unlock_user(p, arg1, 0);
10347             if (arg3) {
10348                 puts = &uts;
10349                 if (target_to_host_timespec64(puts, arg3)) {
10350                     return -TARGET_EFAULT;
10351                 }
10352             } else {
10353                 puts = NULL;
10354             }
10355             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10356                                                  SIGSET_T_SIZE));
10357             if (!is_error(ret)) {
10358                 if (arg2) {
10359                     p = lock_user(VERIFY_WRITE, arg2,
10360                                   sizeof(target_siginfo_t), 0);
10361                     if (!p) {
10362                         return -TARGET_EFAULT;
10363                     }
10364                     host_to_target_siginfo(p, &uinfo);
10365                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10366                 }
10367                 ret = host_to_target_signal(ret);
10368             }
10369         }
10370         return ret;
10371 #endif
10372     case TARGET_NR_rt_sigqueueinfo:
10373         {
10374             siginfo_t uinfo;
10375 
10376             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10377             if (!p) {
10378                 return -TARGET_EFAULT;
10379             }
10380             target_to_host_siginfo(&uinfo, p);
10381             unlock_user(p, arg3, 0);
10382             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10383         }
10384         return ret;
10385     case TARGET_NR_rt_tgsigqueueinfo:
10386         {
10387             siginfo_t uinfo;
10388 
10389             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10390             if (!p) {
10391                 return -TARGET_EFAULT;
10392             }
10393             target_to_host_siginfo(&uinfo, p);
10394             unlock_user(p, arg4, 0);
10395             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10396         }
10397         return ret;
10398 #ifdef TARGET_NR_sigreturn
10399     case TARGET_NR_sigreturn:
10400         if (block_signals()) {
10401             return -QEMU_ERESTARTSYS;
10402         }
10403         return do_sigreturn(cpu_env);
10404 #endif
10405     case TARGET_NR_rt_sigreturn:
10406         if (block_signals()) {
10407             return -QEMU_ERESTARTSYS;
10408         }
10409         return do_rt_sigreturn(cpu_env);
10410     case TARGET_NR_sethostname:
10411         if (!(p = lock_user_string(arg1)))
10412             return -TARGET_EFAULT;
10413         ret = get_errno(sethostname(p, arg2));
10414         unlock_user(p, arg1, 0);
10415         return ret;
10416 #ifdef TARGET_NR_setrlimit
10417     case TARGET_NR_setrlimit:
10418         {
10419             int resource = target_to_host_resource(arg1);
10420             struct target_rlimit *target_rlim;
10421             struct rlimit rlim;
10422             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10423                 return -TARGET_EFAULT;
10424             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10425             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10426             unlock_user_struct(target_rlim, arg2, 0);
10427             /*
10428              * If we just passed through resource limit settings for memory then
10429              * they would also apply to QEMU's own allocations, and QEMU will
10430              * crash or hang or die if its allocations fail. Ideally we would
10431              * track the guest allocations in QEMU and apply the limits ourselves.
10432              * For now, just tell the guest the call succeeded but don't actually
10433              * limit anything.
10434              */
10435             if (resource != RLIMIT_AS &&
10436                 resource != RLIMIT_DATA &&
10437                 resource != RLIMIT_STACK) {
10438                 return get_errno(setrlimit(resource, &rlim));
10439             } else {
10440                 return 0;
10441             }
10442         }
10443 #endif
10444 #ifdef TARGET_NR_getrlimit
10445     case TARGET_NR_getrlimit:
10446         {
10447             int resource = target_to_host_resource(arg1);
10448             struct target_rlimit *target_rlim;
10449             struct rlimit rlim;
10450 
10451             ret = get_errno(getrlimit(resource, &rlim));
10452             if (!is_error(ret)) {
10453                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10454                     return -TARGET_EFAULT;
10455                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10456                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10457                 unlock_user_struct(target_rlim, arg2, 1);
10458             }
10459         }
10460         return ret;
10461 #endif
10462     case TARGET_NR_getrusage:
10463         {
10464             struct rusage rusage;
10465             ret = get_errno(getrusage(arg1, &rusage));
10466             if (!is_error(ret)) {
10467                 ret = host_to_target_rusage(arg2, &rusage);
10468             }
10469         }
10470         return ret;
10471 #if defined(TARGET_NR_gettimeofday)
10472     case TARGET_NR_gettimeofday:
10473         {
10474             struct timeval tv;
10475             struct timezone tz;
10476 
10477             ret = get_errno(gettimeofday(&tv, &tz));
10478             if (!is_error(ret)) {
10479                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10480                     return -TARGET_EFAULT;
10481                 }
10482                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10483                     return -TARGET_EFAULT;
10484                 }
10485             }
10486         }
10487         return ret;
10488 #endif
10489 #if defined(TARGET_NR_settimeofday)
10490     case TARGET_NR_settimeofday:
10491         {
10492             struct timeval tv, *ptv = NULL;
10493             struct timezone tz, *ptz = NULL;
10494 
10495             if (arg1) {
10496                 if (copy_from_user_timeval(&tv, arg1)) {
10497                     return -TARGET_EFAULT;
10498                 }
10499                 ptv = &tv;
10500             }
10501 
10502             if (arg2) {
10503                 if (copy_from_user_timezone(&tz, arg2)) {
10504                     return -TARGET_EFAULT;
10505                 }
10506                 ptz = &tz;
10507             }
10508 
10509             return get_errno(settimeofday(ptv, ptz));
10510         }
10511 #endif
10512 #if defined(TARGET_NR_select)
10513     case TARGET_NR_select:
10514 #if defined(TARGET_WANT_NI_OLD_SELECT)
10515         /* some architectures used to have old_select here
10516          * but now ENOSYS it.
10517          */
10518         ret = -TARGET_ENOSYS;
10519 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10520         ret = do_old_select(arg1);
10521 #else
10522         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10523 #endif
10524         return ret;
10525 #endif
10526 #ifdef TARGET_NR_pselect6
10527     case TARGET_NR_pselect6:
10528         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10529 #endif
10530 #ifdef TARGET_NR_pselect6_time64
10531     case TARGET_NR_pselect6_time64:
10532         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10533 #endif
10534 #ifdef TARGET_NR_symlink
10535     case TARGET_NR_symlink:
10536         {
10537             void *p2;
10538             p = lock_user_string(arg1);
10539             p2 = lock_user_string(arg2);
10540             if (!p || !p2)
10541                 ret = -TARGET_EFAULT;
10542             else
10543                 ret = get_errno(symlink(p, p2));
10544             unlock_user(p2, arg2, 0);
10545             unlock_user(p, arg1, 0);
10546         }
10547         return ret;
10548 #endif
10549 #if defined(TARGET_NR_symlinkat)
10550     case TARGET_NR_symlinkat:
10551         {
10552             void *p2;
10553             p  = lock_user_string(arg1);
10554             p2 = lock_user_string(arg3);
10555             if (!p || !p2)
10556                 ret = -TARGET_EFAULT;
10557             else
10558                 ret = get_errno(symlinkat(p, arg2, p2));
10559             unlock_user(p2, arg3, 0);
10560             unlock_user(p, arg1, 0);
10561         }
10562         return ret;
10563 #endif
10564 #ifdef TARGET_NR_readlink
10565     case TARGET_NR_readlink:
10566         {
10567             void *p2;
10568             p = lock_user_string(arg1);
10569             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10570             ret = get_errno(do_guest_readlink(p, p2, arg3));
10571             unlock_user(p2, arg2, ret);
10572             unlock_user(p, arg1, 0);
10573         }
10574         return ret;
10575 #endif
10576 #if defined(TARGET_NR_readlinkat)
10577     case TARGET_NR_readlinkat:
10578         {
10579             void *p2;
10580             p  = lock_user_string(arg2);
10581             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10582             if (!p || !p2) {
10583                 ret = -TARGET_EFAULT;
10584             } else if (!arg4) {
10585                 /* Short circuit this for the magic exe check. */
10586                 ret = -TARGET_EINVAL;
10587             } else if (is_proc_myself((const char *)p, "exe")) {
10588                 /*
10589                  * Don't worry about sign mismatch as earlier mapping
10590                  * logic would have thrown a bad address error.
10591                  */
10592                 ret = MIN(strlen(exec_path), arg4);
10593                 /* We cannot NUL terminate the string. */
10594                 memcpy(p2, exec_path, ret);
10595             } else {
10596                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10597             }
10598             unlock_user(p2, arg3, ret);
10599             unlock_user(p, arg2, 0);
10600         }
10601         return ret;
10602 #endif
10603 #ifdef TARGET_NR_swapon
10604     case TARGET_NR_swapon:
10605         if (!(p = lock_user_string(arg1)))
10606             return -TARGET_EFAULT;
10607         ret = get_errno(swapon(p, arg2));
10608         unlock_user(p, arg1, 0);
10609         return ret;
10610 #endif
10611     case TARGET_NR_reboot:
10612         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10613            /* arg4 must be ignored in all other cases */
10614            p = lock_user_string(arg4);
10615            if (!p) {
10616                return -TARGET_EFAULT;
10617            }
10618            ret = get_errno(reboot(arg1, arg2, arg3, p));
10619            unlock_user(p, arg4, 0);
10620         } else {
10621            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10622         }
10623         return ret;
10624 #ifdef TARGET_NR_mmap
10625     case TARGET_NR_mmap:
10626 #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
10627         {
10628             abi_ulong *v;
10629             abi_ulong v1, v2, v3, v4, v5, v6;
10630             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10631                 return -TARGET_EFAULT;
10632             v1 = tswapal(v[0]);
10633             v2 = tswapal(v[1]);
10634             v3 = tswapal(v[2]);
10635             v4 = tswapal(v[3]);
10636             v5 = tswapal(v[4]);
10637             v6 = tswapal(v[5]);
10638             unlock_user(v, arg1, 0);
10639             return do_mmap(v1, v2, v3, v4, v5, v6);
10640         }
10641 #else
10642         /* mmap pointers are always untagged */
10643         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10644 #endif
10645 #endif
10646 #ifdef TARGET_NR_mmap2
10647     case TARGET_NR_mmap2:
10648 #ifndef MMAP_SHIFT
10649 #define MMAP_SHIFT 12
10650 #endif
10651         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10652                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10653 #endif
10654     case TARGET_NR_munmap:
10655         arg1 = cpu_untagged_addr(cpu, arg1);
10656         return get_errno(target_munmap(arg1, arg2));
10657     case TARGET_NR_mprotect:
10658         arg1 = cpu_untagged_addr(cpu, arg1);
10659         {
10660             TaskState *ts = get_task_state(cpu);
10661             /* Special hack to detect libc making the stack executable.  */
10662             if ((arg3 & PROT_GROWSDOWN)
10663                 && arg1 >= ts->info->stack_limit
10664                 && arg1 <= ts->info->start_stack) {
10665                 arg3 &= ~PROT_GROWSDOWN;
10666                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10667                 arg1 = ts->info->stack_limit;
10668             }
10669         }
10670         return get_errno(target_mprotect(arg1, arg2, arg3));
10671 #ifdef TARGET_NR_mremap
10672     case TARGET_NR_mremap:
10673         arg1 = cpu_untagged_addr(cpu, arg1);
10674         /* mremap new_addr (arg5) is always untagged */
10675         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10676 #endif
10677         /* ??? msync/mlock/munlock are broken for softmmu.  */
10678 #ifdef TARGET_NR_msync
10679     case TARGET_NR_msync:
10680         return get_errno(msync(g2h(cpu, arg1), arg2,
10681                                target_to_host_msync_arg(arg3)));
10682 #endif
10683 #ifdef TARGET_NR_mlock
10684     case TARGET_NR_mlock:
10685         return get_errno(mlock(g2h(cpu, arg1), arg2));
10686 #endif
10687 #ifdef TARGET_NR_munlock
10688     case TARGET_NR_munlock:
10689         return get_errno(munlock(g2h(cpu, arg1), arg2));
10690 #endif
10691 #ifdef TARGET_NR_mlockall
10692     case TARGET_NR_mlockall:
10693         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10694 #endif
10695 #ifdef TARGET_NR_munlockall
10696     case TARGET_NR_munlockall:
10697         return get_errno(munlockall());
10698 #endif
10699 #ifdef TARGET_NR_truncate
10700     case TARGET_NR_truncate:
10701         if (!(p = lock_user_string(arg1)))
10702             return -TARGET_EFAULT;
10703         ret = get_errno(truncate(p, arg2));
10704         unlock_user(p, arg1, 0);
10705         return ret;
10706 #endif
10707 #ifdef TARGET_NR_ftruncate
10708     case TARGET_NR_ftruncate:
10709         return get_errno(ftruncate(arg1, arg2));
10710 #endif
10711     case TARGET_NR_fchmod:
10712         return get_errno(fchmod(arg1, arg2));
10713 #if defined(TARGET_NR_fchmodat)
10714     case TARGET_NR_fchmodat:
10715         if (!(p = lock_user_string(arg2)))
10716             return -TARGET_EFAULT;
10717         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10718         unlock_user(p, arg2, 0);
10719         return ret;
10720 #endif
10721 #if defined(TARGET_NR_fchmodat2) && defined(__NR_fchmodat2)
10722     case TARGET_NR_fchmodat2:
10723         if (!(p = lock_user_string(arg2))) {
10724             return -TARGET_EFAULT;
10725         }
10726         ret = get_errno(safe_fchmodat2(arg1, p, arg3, arg4));
10727         unlock_user(p, arg2, 0);
10728         return ret;
10729 #endif
10730     case TARGET_NR_getpriority:
10731         /* Note that negative values are valid for getpriority, so we must
10732            differentiate based on errno settings.  */
10733         errno = 0;
10734         ret = getpriority(arg1, arg2);
10735         if (ret == -1 && errno != 0) {
10736             return -host_to_target_errno(errno);
10737         }
10738 #ifdef TARGET_ALPHA
10739         /* Return value is the unbiased priority.  Signal no error.  */
10740         cpu_env->ir[IR_V0] = 0;
10741 #else
10742         /* Return value is a biased priority to avoid negative numbers.  */
10743         ret = 20 - ret;
10744 #endif
10745         return ret;
10746     case TARGET_NR_setpriority:
10747         return get_errno(setpriority(arg1, arg2, arg3));
10748 #ifdef TARGET_NR_statfs
10749     case TARGET_NR_statfs:
10750         if (!(p = lock_user_string(arg1))) {
10751             return -TARGET_EFAULT;
10752         }
10753         ret = get_errno(statfs(path(p), &stfs));
10754         unlock_user(p, arg1, 0);
10755     convert_statfs:
10756         if (!is_error(ret)) {
10757             struct target_statfs *target_stfs;
10758 
10759             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10760                 return -TARGET_EFAULT;
10761             __put_user(stfs.f_type, &target_stfs->f_type);
10762             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10763             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10764             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10765             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10766             __put_user(stfs.f_files, &target_stfs->f_files);
10767             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10768             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10769             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10770             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10771             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10772 #ifdef _STATFS_F_FLAGS
10773             __put_user(stfs.f_flags, &target_stfs->f_flags);
10774 #else
10775             __put_user(0, &target_stfs->f_flags);
10776 #endif
10777             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10778             unlock_user_struct(target_stfs, arg2, 1);
10779         }
10780         return ret;
10781 #endif
10782 #ifdef TARGET_NR_fstatfs
10783     case TARGET_NR_fstatfs:
10784         ret = get_errno(fstatfs(arg1, &stfs));
10785         goto convert_statfs;
10786 #endif
10787 #ifdef TARGET_NR_statfs64
10788     case TARGET_NR_statfs64:
10789         if (!(p = lock_user_string(arg1))) {
10790             return -TARGET_EFAULT;
10791         }
10792         ret = get_errno(statfs(path(p), &stfs));
10793         unlock_user(p, arg1, 0);
10794     convert_statfs64:
10795         if (!is_error(ret)) {
10796             struct target_statfs64 *target_stfs;
10797 
10798             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10799                 return -TARGET_EFAULT;
10800             __put_user(stfs.f_type, &target_stfs->f_type);
10801             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10802             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10803             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10804             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10805             __put_user(stfs.f_files, &target_stfs->f_files);
10806             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10807             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10808             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10809             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10810             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10811 #ifdef _STATFS_F_FLAGS
10812             __put_user(stfs.f_flags, &target_stfs->f_flags);
10813 #else
10814             __put_user(0, &target_stfs->f_flags);
10815 #endif
10816             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10817             unlock_user_struct(target_stfs, arg3, 1);
10818         }
10819         return ret;
10820     case TARGET_NR_fstatfs64:
10821         ret = get_errno(fstatfs(arg1, &stfs));
10822         goto convert_statfs64;
10823 #endif
10824 #ifdef TARGET_NR_socketcall
10825     case TARGET_NR_socketcall:
10826         return do_socketcall(arg1, arg2);
10827 #endif
10828 #ifdef TARGET_NR_accept
10829     case TARGET_NR_accept:
10830         return do_accept4(arg1, arg2, arg3, 0);
10831 #endif
10832 #ifdef TARGET_NR_accept4
10833     case TARGET_NR_accept4:
10834         return do_accept4(arg1, arg2, arg3, arg4);
10835 #endif
10836 #ifdef TARGET_NR_bind
10837     case TARGET_NR_bind:
10838         return do_bind(arg1, arg2, arg3);
10839 #endif
10840 #ifdef TARGET_NR_connect
10841     case TARGET_NR_connect:
10842         return do_connect(arg1, arg2, arg3);
10843 #endif
10844 #ifdef TARGET_NR_getpeername
10845     case TARGET_NR_getpeername:
10846         return do_getpeername(arg1, arg2, arg3);
10847 #endif
10848 #ifdef TARGET_NR_getsockname
10849     case TARGET_NR_getsockname:
10850         return do_getsockname(arg1, arg2, arg3);
10851 #endif
10852 #ifdef TARGET_NR_getsockopt
10853     case TARGET_NR_getsockopt:
10854         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10855 #endif
10856 #ifdef TARGET_NR_listen
10857     case TARGET_NR_listen:
10858         return get_errno(listen(arg1, arg2));
10859 #endif
10860 #ifdef TARGET_NR_recv
10861     case TARGET_NR_recv:
10862         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10863 #endif
10864 #ifdef TARGET_NR_recvfrom
10865     case TARGET_NR_recvfrom:
10866         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10867 #endif
10868 #ifdef TARGET_NR_recvmsg
10869     case TARGET_NR_recvmsg:
10870         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10871 #endif
10872 #ifdef TARGET_NR_send
10873     case TARGET_NR_send:
10874         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10875 #endif
10876 #ifdef TARGET_NR_sendmsg
10877     case TARGET_NR_sendmsg:
10878         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10879 #endif
10880 #ifdef TARGET_NR_sendmmsg
10881     case TARGET_NR_sendmmsg:
10882         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10883 #endif
10884 #ifdef TARGET_NR_recvmmsg
10885     case TARGET_NR_recvmmsg:
10886         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10887 #endif
10888 #ifdef TARGET_NR_sendto
10889     case TARGET_NR_sendto:
10890         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10891 #endif
10892 #ifdef TARGET_NR_shutdown
10893     case TARGET_NR_shutdown:
10894         return get_errno(shutdown(arg1, arg2));
10895 #endif
10896 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10897     case TARGET_NR_getrandom:
10898         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10899         if (!p) {
10900             return -TARGET_EFAULT;
10901         }
10902         ret = get_errno(getrandom(p, arg2, arg3));
10903         unlock_user(p, arg1, ret);
10904         return ret;
10905 #endif
10906 #ifdef TARGET_NR_socket
10907     case TARGET_NR_socket:
10908         return do_socket(arg1, arg2, arg3);
10909 #endif
10910 #ifdef TARGET_NR_socketpair
10911     case TARGET_NR_socketpair:
10912         return do_socketpair(arg1, arg2, arg3, arg4);
10913 #endif
10914 #ifdef TARGET_NR_setsockopt
10915     case TARGET_NR_setsockopt:
10916         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10917 #endif
10918 #if defined(TARGET_NR_syslog)
10919     case TARGET_NR_syslog:
10920         {
10921             int len = arg2;
10922 
10923             switch (arg1) {
10924             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10925             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10926             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10927             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10928             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10929             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10930             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10931             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10932                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10933             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10934             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10935             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10936                 {
10937                     if (len < 0) {
10938                         return -TARGET_EINVAL;
10939                     }
10940                     if (len == 0) {
10941                         return 0;
10942                     }
10943                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10944                     if (!p) {
10945                         return -TARGET_EFAULT;
10946                     }
10947                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10948                     unlock_user(p, arg2, arg3);
10949                 }
10950                 return ret;
10951             default:
10952                 return -TARGET_EINVAL;
10953             }
10954         }
10955         break;
10956 #endif
10957     case TARGET_NR_setitimer:
10958         {
10959             struct itimerval value, ovalue, *pvalue;
10960 
10961             if (arg2) {
10962                 pvalue = &value;
10963                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10964                     || copy_from_user_timeval(&pvalue->it_value,
10965                                               arg2 + sizeof(struct target_timeval)))
10966                     return -TARGET_EFAULT;
10967             } else {
10968                 pvalue = NULL;
10969             }
10970             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10971             if (!is_error(ret) && arg3) {
10972                 if (copy_to_user_timeval(arg3,
10973                                          &ovalue.it_interval)
10974                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10975                                             &ovalue.it_value))
10976                     return -TARGET_EFAULT;
10977             }
10978         }
10979         return ret;
10980     case TARGET_NR_getitimer:
10981         {
10982             struct itimerval value;
10983 
10984             ret = get_errno(getitimer(arg1, &value));
10985             if (!is_error(ret) && arg2) {
10986                 if (copy_to_user_timeval(arg2,
10987                                          &value.it_interval)
10988                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10989                                             &value.it_value))
10990                     return -TARGET_EFAULT;
10991             }
10992         }
10993         return ret;
10994 #ifdef TARGET_NR_stat
10995     case TARGET_NR_stat:
10996         if (!(p = lock_user_string(arg1))) {
10997             return -TARGET_EFAULT;
10998         }
10999         ret = get_errno(stat(path(p), &st));
11000         unlock_user(p, arg1, 0);
11001         goto do_stat;
11002 #endif
11003 #ifdef TARGET_NR_lstat
11004     case TARGET_NR_lstat:
11005         if (!(p = lock_user_string(arg1))) {
11006             return -TARGET_EFAULT;
11007         }
11008         ret = get_errno(lstat(path(p), &st));
11009         unlock_user(p, arg1, 0);
11010         goto do_stat;
11011 #endif
11012 #ifdef TARGET_NR_fstat
11013     case TARGET_NR_fstat:
11014         {
11015             ret = get_errno(fstat(arg1, &st));
11016 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
11017         do_stat:
11018 #endif
11019             if (!is_error(ret)) {
11020                 struct target_stat *target_st;
11021 
11022                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
11023                     return -TARGET_EFAULT;
11024                 memset(target_st, 0, sizeof(*target_st));
11025                 __put_user(st.st_dev, &target_st->st_dev);
11026                 __put_user(st.st_ino, &target_st->st_ino);
11027                 __put_user(st.st_mode, &target_st->st_mode);
11028                 __put_user(st.st_uid, &target_st->st_uid);
11029                 __put_user(st.st_gid, &target_st->st_gid);
11030                 __put_user(st.st_nlink, &target_st->st_nlink);
11031                 __put_user(st.st_rdev, &target_st->st_rdev);
11032                 __put_user(st.st_size, &target_st->st_size);
11033                 __put_user(st.st_blksize, &target_st->st_blksize);
11034                 __put_user(st.st_blocks, &target_st->st_blocks);
11035                 __put_user(st.st_atime, &target_st->target_st_atime);
11036                 __put_user(st.st_mtime, &target_st->target_st_mtime);
11037                 __put_user(st.st_ctime, &target_st->target_st_ctime);
11038 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
11039                 __put_user(st.st_atim.tv_nsec,
11040                            &target_st->target_st_atime_nsec);
11041                 __put_user(st.st_mtim.tv_nsec,
11042                            &target_st->target_st_mtime_nsec);
11043                 __put_user(st.st_ctim.tv_nsec,
11044                            &target_st->target_st_ctime_nsec);
11045 #endif
11046                 unlock_user_struct(target_st, arg2, 1);
11047             }
11048         }
11049         return ret;
11050 #endif
11051     case TARGET_NR_vhangup:
11052         return get_errno(vhangup());
11053 #ifdef TARGET_NR_syscall
11054     case TARGET_NR_syscall:
11055         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11056                           arg6, arg7, arg8, 0);
11057 #endif
11058 #if defined(TARGET_NR_wait4)
11059     case TARGET_NR_wait4:
11060         {
11061             int status;
11062             abi_long status_ptr = arg2;
11063             struct rusage rusage, *rusage_ptr;
11064             abi_ulong target_rusage = arg4;
11065             abi_long rusage_err;
11066             if (target_rusage)
11067                 rusage_ptr = &rusage;
11068             else
11069                 rusage_ptr = NULL;
11070             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11071             if (!is_error(ret)) {
11072                 if (status_ptr && ret) {
11073                     status = host_to_target_waitstatus(status);
11074                     if (put_user_s32(status, status_ptr))
11075                         return -TARGET_EFAULT;
11076                 }
11077                 if (target_rusage) {
11078                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11079                     if (rusage_err) {
11080                         ret = rusage_err;
11081                     }
11082                 }
11083             }
11084         }
11085         return ret;
11086 #endif
11087 #ifdef TARGET_NR_swapoff
11088     case TARGET_NR_swapoff:
11089         if (!(p = lock_user_string(arg1)))
11090             return -TARGET_EFAULT;
11091         ret = get_errno(swapoff(p));
11092         unlock_user(p, arg1, 0);
11093         return ret;
11094 #endif
11095     case TARGET_NR_sysinfo:
11096         {
11097             struct target_sysinfo *target_value;
11098             struct sysinfo value;
11099             ret = get_errno(sysinfo(&value));
11100             if (!is_error(ret) && arg1)
11101             {
11102                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11103                     return -TARGET_EFAULT;
11104                 __put_user(value.uptime, &target_value->uptime);
11105                 __put_user(value.loads[0], &target_value->loads[0]);
11106                 __put_user(value.loads[1], &target_value->loads[1]);
11107                 __put_user(value.loads[2], &target_value->loads[2]);
11108                 __put_user(value.totalram, &target_value->totalram);
11109                 __put_user(value.freeram, &target_value->freeram);
11110                 __put_user(value.sharedram, &target_value->sharedram);
11111                 __put_user(value.bufferram, &target_value->bufferram);
11112                 __put_user(value.totalswap, &target_value->totalswap);
11113                 __put_user(value.freeswap, &target_value->freeswap);
11114                 __put_user(value.procs, &target_value->procs);
11115                 __put_user(value.totalhigh, &target_value->totalhigh);
11116                 __put_user(value.freehigh, &target_value->freehigh);
11117                 __put_user(value.mem_unit, &target_value->mem_unit);
11118                 unlock_user_struct(target_value, arg1, 1);
11119             }
11120         }
11121         return ret;
11122 #ifdef TARGET_NR_ipc
11123     case TARGET_NR_ipc:
11124         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11125 #endif
11126 #ifdef TARGET_NR_semget
11127     case TARGET_NR_semget:
11128         return get_errno(semget(arg1, arg2, arg3));
11129 #endif
11130 #ifdef TARGET_NR_semop
11131     case TARGET_NR_semop:
11132         return do_semtimedop(arg1, arg2, arg3, 0, false);
11133 #endif
11134 #ifdef TARGET_NR_semtimedop
11135     case TARGET_NR_semtimedop:
11136         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11137 #endif
11138 #ifdef TARGET_NR_semtimedop_time64
11139     case TARGET_NR_semtimedop_time64:
11140         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11141 #endif
11142 #ifdef TARGET_NR_semctl
11143     case TARGET_NR_semctl:
11144         return do_semctl(arg1, arg2, arg3, arg4);
11145 #endif
11146 #ifdef TARGET_NR_msgctl
11147     case TARGET_NR_msgctl:
11148         return do_msgctl(arg1, arg2, arg3);
11149 #endif
11150 #ifdef TARGET_NR_msgget
11151     case TARGET_NR_msgget:
11152         return get_errno(msgget(arg1, arg2));
11153 #endif
11154 #ifdef TARGET_NR_msgrcv
11155     case TARGET_NR_msgrcv:
11156         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11157 #endif
11158 #ifdef TARGET_NR_msgsnd
11159     case TARGET_NR_msgsnd:
11160         return do_msgsnd(arg1, arg2, arg3, arg4);
11161 #endif
11162 #ifdef TARGET_NR_shmget
11163     case TARGET_NR_shmget:
11164         return get_errno(shmget(arg1, arg2, arg3));
11165 #endif
11166 #ifdef TARGET_NR_shmctl
11167     case TARGET_NR_shmctl:
11168         return do_shmctl(arg1, arg2, arg3);
11169 #endif
11170 #ifdef TARGET_NR_shmat
11171     case TARGET_NR_shmat:
11172         return target_shmat(cpu_env, arg1, arg2, arg3);
11173 #endif
11174 #ifdef TARGET_NR_shmdt
11175     case TARGET_NR_shmdt:
11176         return target_shmdt(arg1);
11177 #endif
11178     case TARGET_NR_fsync:
11179         return get_errno(fsync(arg1));
11180     case TARGET_NR_clone:
11181         /* Linux manages to have three different orderings for its
11182          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11183          * match the kernel's CONFIG_CLONE_* settings.
11184          * Microblaze is further special in that it uses a sixth
11185          * implicit argument to clone for the TLS pointer.
11186          */
11187 #if defined(TARGET_MICROBLAZE)
11188         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11189 #elif defined(TARGET_CLONE_BACKWARDS)
11190         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11191 #elif defined(TARGET_CLONE_BACKWARDS2)
11192         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11193 #else
11194         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11195 #endif
11196         return ret;
11197 #ifdef __NR_exit_group
11198         /* new thread calls */
11199     case TARGET_NR_exit_group:
11200         preexit_cleanup(cpu_env, arg1);
11201         return get_errno(exit_group(arg1));
11202 #endif
11203     case TARGET_NR_setdomainname:
11204         if (!(p = lock_user_string(arg1)))
11205             return -TARGET_EFAULT;
11206         ret = get_errno(setdomainname(p, arg2));
11207         unlock_user(p, arg1, 0);
11208         return ret;
11209     case TARGET_NR_uname:
11210         /* no need to transcode because we use the linux syscall */
11211         {
11212             struct new_utsname * buf;
11213 
11214             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11215                 return -TARGET_EFAULT;
11216             ret = get_errno(sys_uname(buf));
11217             if (!is_error(ret)) {
11218                 /* Overwrite the native machine name with whatever is being
11219                    emulated. */
11220                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11221                           sizeof(buf->machine));
11222                 /* Allow the user to override the reported release.  */
11223                 if (qemu_uname_release && *qemu_uname_release) {
11224                     g_strlcpy(buf->release, qemu_uname_release,
11225                               sizeof(buf->release));
11226                 }
11227             }
11228             unlock_user_struct(buf, arg1, 1);
11229         }
11230         return ret;
11231 #ifdef TARGET_I386
11232     case TARGET_NR_modify_ldt:
11233         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11234 #if !defined(TARGET_X86_64)
11235     case TARGET_NR_vm86:
11236         return do_vm86(cpu_env, arg1, arg2);
11237 #endif
11238 #endif
11239 #if defined(TARGET_NR_adjtimex)
11240     case TARGET_NR_adjtimex:
11241         {
11242             struct timex host_buf;
11243 
11244             if (target_to_host_timex(&host_buf, arg1) != 0) {
11245                 return -TARGET_EFAULT;
11246             }
11247             ret = get_errno(adjtimex(&host_buf));
11248             if (!is_error(ret)) {
11249                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11250                     return -TARGET_EFAULT;
11251                 }
11252             }
11253         }
11254         return ret;
11255 #endif
11256 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11257     case TARGET_NR_clock_adjtime:
11258         {
11259             struct timex htx;
11260 
11261             if (target_to_host_timex(&htx, arg2) != 0) {
11262                 return -TARGET_EFAULT;
11263             }
11264             ret = get_errno(clock_adjtime(arg1, &htx));
11265             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11266                 return -TARGET_EFAULT;
11267             }
11268         }
11269         return ret;
11270 #endif
11271 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11272     case TARGET_NR_clock_adjtime64:
11273         {
11274             struct timex htx;
11275 
11276             if (target_to_host_timex64(&htx, arg2) != 0) {
11277                 return -TARGET_EFAULT;
11278             }
11279             ret = get_errno(clock_adjtime(arg1, &htx));
11280             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11281                     return -TARGET_EFAULT;
11282             }
11283         }
11284         return ret;
11285 #endif
11286     case TARGET_NR_getpgid:
11287         return get_errno(getpgid(arg1));
11288     case TARGET_NR_fchdir:
11289         return get_errno(fchdir(arg1));
11290     case TARGET_NR_personality:
11291         return get_errno(personality(arg1));
11292 #ifdef TARGET_NR__llseek /* Not on alpha */
11293     case TARGET_NR__llseek:
11294         {
11295             int64_t res;
11296 #if !defined(__NR_llseek)
11297             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11298             if (res == -1) {
11299                 ret = get_errno(res);
11300             } else {
11301                 ret = 0;
11302             }
11303 #else
11304             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11305 #endif
11306             if ((ret == 0) && put_user_s64(res, arg4)) {
11307                 return -TARGET_EFAULT;
11308             }
11309         }
11310         return ret;
11311 #endif
11312 #ifdef TARGET_NR_getdents
11313     case TARGET_NR_getdents:
11314         return do_getdents(arg1, arg2, arg3);
11315 #endif /* TARGET_NR_getdents */
11316 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11317     case TARGET_NR_getdents64:
11318         return do_getdents64(arg1, arg2, arg3);
11319 #endif /* TARGET_NR_getdents64 */
11320 #if defined(TARGET_NR__newselect)
11321     case TARGET_NR__newselect:
11322         return do_select(arg1, arg2, arg3, arg4, arg5);
11323 #endif
11324 #ifdef TARGET_NR_poll
11325     case TARGET_NR_poll:
11326         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11327 #endif
11328 #ifdef TARGET_NR_ppoll
11329     case TARGET_NR_ppoll:
11330         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11331 #endif
11332 #ifdef TARGET_NR_ppoll_time64
11333     case TARGET_NR_ppoll_time64:
11334         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11335 #endif
11336     case TARGET_NR_flock:
11337         /* NOTE: the flock constant seems to be the same for every
11338            Linux platform */
11339         return get_errno(safe_flock(arg1, arg2));
11340     case TARGET_NR_readv:
11341         {
11342             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11343             if (vec != NULL) {
11344                 ret = get_errno(safe_readv(arg1, vec, arg3));
11345                 unlock_iovec(vec, arg2, arg3, 1);
11346             } else {
11347                 ret = -host_to_target_errno(errno);
11348             }
11349         }
11350         return ret;
11351     case TARGET_NR_writev:
11352         {
11353             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11354             if (vec != NULL) {
11355                 ret = get_errno(safe_writev(arg1, vec, arg3));
11356                 unlock_iovec(vec, arg2, arg3, 0);
11357             } else {
11358                 ret = -host_to_target_errno(errno);
11359             }
11360         }
11361         return ret;
11362 #if defined(TARGET_NR_preadv)
11363     case TARGET_NR_preadv:
11364         {
11365             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11366             if (vec != NULL) {
11367                 unsigned long low, high;
11368 
11369                 target_to_host_low_high(arg4, arg5, &low, &high);
11370                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11371                 unlock_iovec(vec, arg2, arg3, 1);
11372             } else {
11373                 ret = -host_to_target_errno(errno);
11374            }
11375         }
11376         return ret;
11377 #endif
11378 #if defined(TARGET_NR_pwritev)
11379     case TARGET_NR_pwritev:
11380         {
11381             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11382             if (vec != NULL) {
11383                 unsigned long low, high;
11384 
11385                 target_to_host_low_high(arg4, arg5, &low, &high);
11386                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11387                 unlock_iovec(vec, arg2, arg3, 0);
11388             } else {
11389                 ret = -host_to_target_errno(errno);
11390            }
11391         }
11392         return ret;
11393 #endif
11394     case TARGET_NR_getsid:
11395         return get_errno(getsid(arg1));
11396 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11397     case TARGET_NR_fdatasync:
11398         return get_errno(fdatasync(arg1));
11399 #endif
11400     case TARGET_NR_sched_getaffinity:
11401         {
11402             unsigned int mask_size;
11403             unsigned long *mask;
11404 
11405             /*
11406              * sched_getaffinity needs multiples of ulong, so need to take
11407              * care of mismatches between target ulong and host ulong sizes.
11408              */
11409             if (arg2 & (sizeof(abi_ulong) - 1)) {
11410                 return -TARGET_EINVAL;
11411             }
11412             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11413 
11414             mask = alloca(mask_size);
11415             memset(mask, 0, mask_size);
11416             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11417 
11418             if (!is_error(ret)) {
11419                 if (ret > arg2) {
11420                     /* More data returned than the caller's buffer will fit.
11421                      * This only happens if sizeof(abi_long) < sizeof(long)
11422                      * and the caller passed us a buffer holding an odd number
11423                      * of abi_longs. If the host kernel is actually using the
11424                      * extra 4 bytes then fail EINVAL; otherwise we can just
11425                      * ignore them and only copy the interesting part.
11426                      */
11427                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11428                     if (numcpus > arg2 * 8) {
11429                         return -TARGET_EINVAL;
11430                     }
11431                     ret = arg2;
11432                 }
11433 
11434                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11435                     return -TARGET_EFAULT;
11436                 }
11437             }
11438         }
11439         return ret;
11440     case TARGET_NR_sched_setaffinity:
11441         {
11442             unsigned int mask_size;
11443             unsigned long *mask;
11444 
11445             /*
11446              * sched_setaffinity needs multiples of ulong, so need to take
11447              * care of mismatches between target ulong and host ulong sizes.
11448              */
11449             if (arg2 & (sizeof(abi_ulong) - 1)) {
11450                 return -TARGET_EINVAL;
11451             }
11452             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11453             mask = alloca(mask_size);
11454 
11455             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11456             if (ret) {
11457                 return ret;
11458             }
11459 
11460             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11461         }
11462     case TARGET_NR_getcpu:
11463         {
11464             unsigned cpuid, node;
11465             ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
11466                                        arg2 ? &node : NULL,
11467                                        NULL));
11468             if (is_error(ret)) {
11469                 return ret;
11470             }
11471             if (arg1 && put_user_u32(cpuid, arg1)) {
11472                 return -TARGET_EFAULT;
11473             }
11474             if (arg2 && put_user_u32(node, arg2)) {
11475                 return -TARGET_EFAULT;
11476             }
11477         }
11478         return ret;
11479     case TARGET_NR_sched_setparam:
11480         {
11481             struct target_sched_param *target_schp;
11482             struct sched_param schp;
11483 
11484             if (arg2 == 0) {
11485                 return -TARGET_EINVAL;
11486             }
11487             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11488                 return -TARGET_EFAULT;
11489             }
11490             schp.sched_priority = tswap32(target_schp->sched_priority);
11491             unlock_user_struct(target_schp, arg2, 0);
11492             return get_errno(sys_sched_setparam(arg1, &schp));
11493         }
11494     case TARGET_NR_sched_getparam:
11495         {
11496             struct target_sched_param *target_schp;
11497             struct sched_param schp;
11498 
11499             if (arg2 == 0) {
11500                 return -TARGET_EINVAL;
11501             }
11502             ret = get_errno(sys_sched_getparam(arg1, &schp));
11503             if (!is_error(ret)) {
11504                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11505                     return -TARGET_EFAULT;
11506                 }
11507                 target_schp->sched_priority = tswap32(schp.sched_priority);
11508                 unlock_user_struct(target_schp, arg2, 1);
11509             }
11510         }
11511         return ret;
11512     case TARGET_NR_sched_setscheduler:
11513         {
11514             struct target_sched_param *target_schp;
11515             struct sched_param schp;
11516             if (arg3 == 0) {
11517                 return -TARGET_EINVAL;
11518             }
11519             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11520                 return -TARGET_EFAULT;
11521             }
11522             schp.sched_priority = tswap32(target_schp->sched_priority);
11523             unlock_user_struct(target_schp, arg3, 0);
11524             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11525         }
11526     case TARGET_NR_sched_getscheduler:
11527         return get_errno(sys_sched_getscheduler(arg1));
11528     case TARGET_NR_sched_getattr:
11529         {
11530             struct target_sched_attr *target_scha;
11531             struct sched_attr scha;
11532             if (arg2 == 0) {
11533                 return -TARGET_EINVAL;
11534             }
11535             if (arg3 > sizeof(scha)) {
11536                 arg3 = sizeof(scha);
11537             }
11538             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11539             if (!is_error(ret)) {
11540                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11541                 if (!target_scha) {
11542                     return -TARGET_EFAULT;
11543                 }
11544                 target_scha->size = tswap32(scha.size);
11545                 target_scha->sched_policy = tswap32(scha.sched_policy);
11546                 target_scha->sched_flags = tswap64(scha.sched_flags);
11547                 target_scha->sched_nice = tswap32(scha.sched_nice);
11548                 target_scha->sched_priority = tswap32(scha.sched_priority);
11549                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11550                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11551                 target_scha->sched_period = tswap64(scha.sched_period);
11552                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11553                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11554                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11555                 }
11556                 unlock_user(target_scha, arg2, arg3);
11557             }
11558             return ret;
11559         }
11560     case TARGET_NR_sched_setattr:
11561         {
11562             struct target_sched_attr *target_scha;
11563             struct sched_attr scha;
11564             uint32_t size;
11565             int zeroed;
11566             if (arg2 == 0) {
11567                 return -TARGET_EINVAL;
11568             }
11569             if (get_user_u32(size, arg2)) {
11570                 return -TARGET_EFAULT;
11571             }
11572             if (!size) {
11573                 size = offsetof(struct target_sched_attr, sched_util_min);
11574             }
11575             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11576                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11577                     return -TARGET_EFAULT;
11578                 }
11579                 return -TARGET_E2BIG;
11580             }
11581 
11582             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11583             if (zeroed < 0) {
11584                 return zeroed;
11585             } else if (zeroed == 0) {
11586                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11587                     return -TARGET_EFAULT;
11588                 }
11589                 return -TARGET_E2BIG;
11590             }
11591             if (size > sizeof(struct target_sched_attr)) {
11592                 size = sizeof(struct target_sched_attr);
11593             }
11594 
11595             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11596             if (!target_scha) {
11597                 return -TARGET_EFAULT;
11598             }
11599             scha.size = size;
11600             scha.sched_policy = tswap32(target_scha->sched_policy);
11601             scha.sched_flags = tswap64(target_scha->sched_flags);
11602             scha.sched_nice = tswap32(target_scha->sched_nice);
11603             scha.sched_priority = tswap32(target_scha->sched_priority);
11604             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11605             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11606             scha.sched_period = tswap64(target_scha->sched_period);
11607             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11608                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11609                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11610             }
11611             unlock_user(target_scha, arg2, 0);
11612             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11613         }
11614     case TARGET_NR_sched_yield:
11615         return get_errno(sched_yield());
11616     case TARGET_NR_sched_get_priority_max:
11617         return get_errno(sched_get_priority_max(arg1));
11618     case TARGET_NR_sched_get_priority_min:
11619         return get_errno(sched_get_priority_min(arg1));
11620 #ifdef TARGET_NR_sched_rr_get_interval
11621     case TARGET_NR_sched_rr_get_interval:
11622         {
11623             struct timespec ts;
11624             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11625             if (!is_error(ret)) {
11626                 ret = host_to_target_timespec(arg2, &ts);
11627             }
11628         }
11629         return ret;
11630 #endif
11631 #ifdef TARGET_NR_sched_rr_get_interval_time64
11632     case TARGET_NR_sched_rr_get_interval_time64:
11633         {
11634             struct timespec ts;
11635             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11636             if (!is_error(ret)) {
11637                 ret = host_to_target_timespec64(arg2, &ts);
11638             }
11639         }
11640         return ret;
11641 #endif
11642 #if defined(TARGET_NR_nanosleep)
11643     case TARGET_NR_nanosleep:
11644         {
11645             struct timespec req, rem;
11646             if (target_to_host_timespec(&req, arg1)) {
11647                 return -TARGET_EFAULT;
11648             }
11649             ret = get_errno(safe_nanosleep(&req, &rem));
11650             if (is_error(ret) && arg2) {
11651                 if (host_to_target_timespec(arg2, &rem)) {
11652                     return -TARGET_EFAULT;
11653                 }
11654             }
11655         }
11656         return ret;
11657 #endif
11658     case TARGET_NR_prctl:
11659         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11660         break;
11661 #ifdef TARGET_NR_arch_prctl
11662     case TARGET_NR_arch_prctl:
11663         return do_arch_prctl(cpu_env, arg1, arg2);
11664 #endif
11665 #ifdef TARGET_NR_pread64
11666     case TARGET_NR_pread64:
11667         if (regpairs_aligned(cpu_env, num)) {
11668             arg4 = arg5;
11669             arg5 = arg6;
11670         }
11671         if (arg2 == 0 && arg3 == 0) {
11672             /* Special-case NULL buffer and zero length, which should succeed */
11673             p = 0;
11674         } else {
11675             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11676             if (!p) {
11677                 return -TARGET_EFAULT;
11678             }
11679         }
11680         ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
11681         unlock_user(p, arg2, ret);
11682         return ret;
11683     case TARGET_NR_pwrite64:
11684         if (regpairs_aligned(cpu_env, num)) {
11685             arg4 = arg5;
11686             arg5 = arg6;
11687         }
11688         if (arg2 == 0 && arg3 == 0) {
11689             /* Special-case NULL buffer and zero length, which should succeed */
11690             p = 0;
11691         } else {
11692             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11693             if (!p) {
11694                 return -TARGET_EFAULT;
11695             }
11696         }
11697         ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
11698         unlock_user(p, arg2, 0);
11699         return ret;
11700 #endif
11701     case TARGET_NR_getcwd:
11702         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11703             return -TARGET_EFAULT;
11704         ret = get_errno(sys_getcwd1(p, arg2));
11705         unlock_user(p, arg1, ret);
11706         return ret;
11707     case TARGET_NR_capget:
11708     case TARGET_NR_capset:
11709     {
11710         struct target_user_cap_header *target_header;
11711         struct target_user_cap_data *target_data = NULL;
11712         struct __user_cap_header_struct header;
11713         struct __user_cap_data_struct data[2];
11714         struct __user_cap_data_struct *dataptr = NULL;
11715         int i, target_datalen;
11716         int data_items = 1;
11717 
11718         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11719             return -TARGET_EFAULT;
11720         }
11721         header.version = tswap32(target_header->version);
11722         header.pid = tswap32(target_header->pid);
11723 
11724         if (header.version != _LINUX_CAPABILITY_VERSION) {
11725             /* Version 2 and up takes pointer to two user_data structs */
11726             data_items = 2;
11727         }
11728 
11729         target_datalen = sizeof(*target_data) * data_items;
11730 
11731         if (arg2) {
11732             if (num == TARGET_NR_capget) {
11733                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11734             } else {
11735                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11736             }
11737             if (!target_data) {
11738                 unlock_user_struct(target_header, arg1, 0);
11739                 return -TARGET_EFAULT;
11740             }
11741 
11742             if (num == TARGET_NR_capset) {
11743                 for (i = 0; i < data_items; i++) {
11744                     data[i].effective = tswap32(target_data[i].effective);
11745                     data[i].permitted = tswap32(target_data[i].permitted);
11746                     data[i].inheritable = tswap32(target_data[i].inheritable);
11747                 }
11748             }
11749 
11750             dataptr = data;
11751         }
11752 
11753         if (num == TARGET_NR_capget) {
11754             ret = get_errno(capget(&header, dataptr));
11755         } else {
11756             ret = get_errno(capset(&header, dataptr));
11757         }
11758 
11759         /* The kernel always updates version for both capget and capset */
11760         target_header->version = tswap32(header.version);
11761         unlock_user_struct(target_header, arg1, 1);
11762 
11763         if (arg2) {
11764             if (num == TARGET_NR_capget) {
11765                 for (i = 0; i < data_items; i++) {
11766                     target_data[i].effective = tswap32(data[i].effective);
11767                     target_data[i].permitted = tswap32(data[i].permitted);
11768                     target_data[i].inheritable = tswap32(data[i].inheritable);
11769                 }
11770                 unlock_user(target_data, arg2, target_datalen);
11771             } else {
11772                 unlock_user(target_data, arg2, 0);
11773             }
11774         }
11775         return ret;
11776     }
11777     case TARGET_NR_sigaltstack:
11778         return do_sigaltstack(arg1, arg2, cpu_env);
11779 
11780 #ifdef CONFIG_SENDFILE
11781 #ifdef TARGET_NR_sendfile
11782     case TARGET_NR_sendfile:
11783     {
11784         off_t *offp = NULL;
11785         off_t off;
11786         if (arg3) {
11787             ret = get_user_sal(off, arg3);
11788             if (is_error(ret)) {
11789                 return ret;
11790             }
11791             offp = &off;
11792         }
11793         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11794         if (!is_error(ret) && arg3) {
11795             abi_long ret2 = put_user_sal(off, arg3);
11796             if (is_error(ret2)) {
11797                 ret = ret2;
11798             }
11799         }
11800         return ret;
11801     }
11802 #endif
11803 #ifdef TARGET_NR_sendfile64
11804     case TARGET_NR_sendfile64:
11805     {
11806         off_t *offp = NULL;
11807         off_t off;
11808         if (arg3) {
11809             ret = get_user_s64(off, arg3);
11810             if (is_error(ret)) {
11811                 return ret;
11812             }
11813             offp = &off;
11814         }
11815         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11816         if (!is_error(ret) && arg3) {
11817             abi_long ret2 = put_user_s64(off, arg3);
11818             if (is_error(ret2)) {
11819                 ret = ret2;
11820             }
11821         }
11822         return ret;
11823     }
11824 #endif
11825 #endif
11826 #ifdef TARGET_NR_vfork
11827     case TARGET_NR_vfork:
11828         return get_errno(do_fork(cpu_env,
11829                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11830                          0, 0, 0, 0));
11831 #endif
11832 #ifdef TARGET_NR_ugetrlimit
11833     case TARGET_NR_ugetrlimit:
11834     {
11835 	struct rlimit rlim;
11836 	int resource = target_to_host_resource(arg1);
11837 	ret = get_errno(getrlimit(resource, &rlim));
11838 	if (!is_error(ret)) {
11839 	    struct target_rlimit *target_rlim;
11840             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11841                 return -TARGET_EFAULT;
11842 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11843 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11844             unlock_user_struct(target_rlim, arg2, 1);
11845 	}
11846         return ret;
11847     }
11848 #endif
11849 #ifdef TARGET_NR_truncate64
11850     case TARGET_NR_truncate64:
11851         if (!(p = lock_user_string(arg1)))
11852             return -TARGET_EFAULT;
11853 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11854         unlock_user(p, arg1, 0);
11855         return ret;
11856 #endif
11857 #ifdef TARGET_NR_ftruncate64
11858     case TARGET_NR_ftruncate64:
11859         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11860 #endif
11861 #ifdef TARGET_NR_stat64
11862     case TARGET_NR_stat64:
11863         if (!(p = lock_user_string(arg1))) {
11864             return -TARGET_EFAULT;
11865         }
11866         ret = get_errno(stat(path(p), &st));
11867         unlock_user(p, arg1, 0);
11868         if (!is_error(ret))
11869             ret = host_to_target_stat64(cpu_env, arg2, &st);
11870         return ret;
11871 #endif
11872 #ifdef TARGET_NR_lstat64
11873     case TARGET_NR_lstat64:
11874         if (!(p = lock_user_string(arg1))) {
11875             return -TARGET_EFAULT;
11876         }
11877         ret = get_errno(lstat(path(p), &st));
11878         unlock_user(p, arg1, 0);
11879         if (!is_error(ret))
11880             ret = host_to_target_stat64(cpu_env, arg2, &st);
11881         return ret;
11882 #endif
11883 #ifdef TARGET_NR_fstat64
11884     case TARGET_NR_fstat64:
11885         ret = get_errno(fstat(arg1, &st));
11886         if (!is_error(ret))
11887             ret = host_to_target_stat64(cpu_env, arg2, &st);
11888         return ret;
11889 #endif
11890 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11891 #ifdef TARGET_NR_fstatat64
11892     case TARGET_NR_fstatat64:
11893 #endif
11894 #ifdef TARGET_NR_newfstatat
11895     case TARGET_NR_newfstatat:
11896 #endif
11897         if (!(p = lock_user_string(arg2))) {
11898             return -TARGET_EFAULT;
11899         }
11900         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11901         unlock_user(p, arg2, 0);
11902         if (!is_error(ret))
11903             ret = host_to_target_stat64(cpu_env, arg3, &st);
11904         return ret;
11905 #endif
11906 #if defined(TARGET_NR_statx)
11907     case TARGET_NR_statx:
11908         {
11909             struct target_statx *target_stx;
11910             int dirfd = arg1;
11911             int flags = arg3;
11912 
11913             p = lock_user_string(arg2);
11914             if (p == NULL) {
11915                 return -TARGET_EFAULT;
11916             }
11917 #if defined(__NR_statx)
11918             {
11919                 /*
11920                  * It is assumed that struct statx is architecture independent.
11921                  */
11922                 struct target_statx host_stx;
11923                 int mask = arg4;
11924 
11925                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11926                 if (!is_error(ret)) {
11927                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11928                         unlock_user(p, arg2, 0);
11929                         return -TARGET_EFAULT;
11930                     }
11931                 }
11932 
11933                 if (ret != -TARGET_ENOSYS) {
11934                     unlock_user(p, arg2, 0);
11935                     return ret;
11936                 }
11937             }
11938 #endif
11939             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11940             unlock_user(p, arg2, 0);
11941 
11942             if (!is_error(ret)) {
11943                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11944                     return -TARGET_EFAULT;
11945                 }
11946                 memset(target_stx, 0, sizeof(*target_stx));
11947                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11948                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11949                 __put_user(st.st_ino, &target_stx->stx_ino);
11950                 __put_user(st.st_mode, &target_stx->stx_mode);
11951                 __put_user(st.st_uid, &target_stx->stx_uid);
11952                 __put_user(st.st_gid, &target_stx->stx_gid);
11953                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11954                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11955                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11956                 __put_user(st.st_size, &target_stx->stx_size);
11957                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11958                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11959                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11960                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11961                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11962                 unlock_user_struct(target_stx, arg5, 1);
11963             }
11964         }
11965         return ret;
11966 #endif
11967 #ifdef TARGET_NR_lchown
11968     case TARGET_NR_lchown:
11969         if (!(p = lock_user_string(arg1)))
11970             return -TARGET_EFAULT;
11971         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11972         unlock_user(p, arg1, 0);
11973         return ret;
11974 #endif
11975 #ifdef TARGET_NR_getuid
11976     case TARGET_NR_getuid:
11977         return get_errno(high2lowuid(getuid()));
11978 #endif
11979 #ifdef TARGET_NR_getgid
11980     case TARGET_NR_getgid:
11981         return get_errno(high2lowgid(getgid()));
11982 #endif
11983 #ifdef TARGET_NR_geteuid
11984     case TARGET_NR_geteuid:
11985         return get_errno(high2lowuid(geteuid()));
11986 #endif
11987 #ifdef TARGET_NR_getegid
11988     case TARGET_NR_getegid:
11989         return get_errno(high2lowgid(getegid()));
11990 #endif
11991     case TARGET_NR_setreuid:
11992         return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
11993     case TARGET_NR_setregid:
11994         return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
11995     case TARGET_NR_getgroups:
11996         { /* the same code as for TARGET_NR_getgroups32 */
11997             int gidsetsize = arg1;
11998             target_id *target_grouplist;
11999             g_autofree gid_t *grouplist = NULL;
12000             int i;
12001 
12002             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12003                 return -TARGET_EINVAL;
12004             }
12005             if (gidsetsize > 0) {
12006                 grouplist = g_try_new(gid_t, gidsetsize);
12007                 if (!grouplist) {
12008                     return -TARGET_ENOMEM;
12009                 }
12010             }
12011             ret = get_errno(getgroups(gidsetsize, grouplist));
12012             if (!is_error(ret) && gidsetsize > 0) {
12013                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12014                                              gidsetsize * sizeof(target_id), 0);
12015                 if (!target_grouplist) {
12016                     return -TARGET_EFAULT;
12017                 }
12018                 for (i = 0; i < ret; i++) {
12019                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
12020                 }
12021                 unlock_user(target_grouplist, arg2,
12022                             gidsetsize * sizeof(target_id));
12023             }
12024             return ret;
12025         }
12026     case TARGET_NR_setgroups:
12027         { /* the same code as for TARGET_NR_setgroups32 */
12028             int gidsetsize = arg1;
12029             target_id *target_grouplist;
12030             g_autofree gid_t *grouplist = NULL;
12031             int i;
12032 
12033             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12034                 return -TARGET_EINVAL;
12035             }
12036             if (gidsetsize > 0) {
12037                 grouplist = g_try_new(gid_t, gidsetsize);
12038                 if (!grouplist) {
12039                     return -TARGET_ENOMEM;
12040                 }
12041                 target_grouplist = lock_user(VERIFY_READ, arg2,
12042                                              gidsetsize * sizeof(target_id), 1);
12043                 if (!target_grouplist) {
12044                     return -TARGET_EFAULT;
12045                 }
12046                 for (i = 0; i < gidsetsize; i++) {
12047                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
12048                 }
12049                 unlock_user(target_grouplist, arg2,
12050                             gidsetsize * sizeof(target_id));
12051             }
12052             return get_errno(sys_setgroups(gidsetsize, grouplist));
12053         }
12054     case TARGET_NR_fchown:
12055         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12056 #if defined(TARGET_NR_fchownat)
12057     case TARGET_NR_fchownat:
12058         if (!(p = lock_user_string(arg2)))
12059             return -TARGET_EFAULT;
12060         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12061                                  low2highgid(arg4), arg5));
12062         unlock_user(p, arg2, 0);
12063         return ret;
12064 #endif
12065 #ifdef TARGET_NR_setresuid
12066     case TARGET_NR_setresuid:
12067         return get_errno(sys_setresuid(low2highuid(arg1),
12068                                        low2highuid(arg2),
12069                                        low2highuid(arg3)));
12070 #endif
12071 #ifdef TARGET_NR_getresuid
12072     case TARGET_NR_getresuid:
12073         {
12074             uid_t ruid, euid, suid;
12075             ret = get_errno(getresuid(&ruid, &euid, &suid));
12076             if (!is_error(ret)) {
12077                 if (put_user_id(high2lowuid(ruid), arg1)
12078                     || put_user_id(high2lowuid(euid), arg2)
12079                     || put_user_id(high2lowuid(suid), arg3))
12080                     return -TARGET_EFAULT;
12081             }
12082         }
12083         return ret;
12084 #endif
12085 #ifdef TARGET_NR_getresgid
12086     case TARGET_NR_setresgid:
12087         return get_errno(sys_setresgid(low2highgid(arg1),
12088                                        low2highgid(arg2),
12089                                        low2highgid(arg3)));
12090 #endif
12091 #ifdef TARGET_NR_getresgid
12092     case TARGET_NR_getresgid:
12093         {
12094             gid_t rgid, egid, sgid;
12095             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12096             if (!is_error(ret)) {
12097                 if (put_user_id(high2lowgid(rgid), arg1)
12098                     || put_user_id(high2lowgid(egid), arg2)
12099                     || put_user_id(high2lowgid(sgid), arg3))
12100                     return -TARGET_EFAULT;
12101             }
12102         }
12103         return ret;
12104 #endif
12105 #ifdef TARGET_NR_chown
12106     case TARGET_NR_chown:
12107         if (!(p = lock_user_string(arg1)))
12108             return -TARGET_EFAULT;
12109         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12110         unlock_user(p, arg1, 0);
12111         return ret;
12112 #endif
12113     case TARGET_NR_setuid:
12114         return get_errno(sys_setuid(low2highuid(arg1)));
12115     case TARGET_NR_setgid:
12116         return get_errno(sys_setgid(low2highgid(arg1)));
12117     case TARGET_NR_setfsuid:
12118         return get_errno(setfsuid(arg1));
12119     case TARGET_NR_setfsgid:
12120         return get_errno(setfsgid(arg1));
12121 
12122 #ifdef TARGET_NR_lchown32
12123     case TARGET_NR_lchown32:
12124         if (!(p = lock_user_string(arg1)))
12125             return -TARGET_EFAULT;
12126         ret = get_errno(lchown(p, arg2, arg3));
12127         unlock_user(p, arg1, 0);
12128         return ret;
12129 #endif
12130 #ifdef TARGET_NR_getuid32
12131     case TARGET_NR_getuid32:
12132         return get_errno(getuid());
12133 #endif
12134 
12135 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12136    /* Alpha specific */
12137     case TARGET_NR_getxuid:
12138          {
12139             uid_t euid;
12140             euid=geteuid();
12141             cpu_env->ir[IR_A4]=euid;
12142          }
12143         return get_errno(getuid());
12144 #endif
12145 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12146    /* Alpha specific */
12147     case TARGET_NR_getxgid:
12148          {
12149             uid_t egid;
12150             egid=getegid();
12151             cpu_env->ir[IR_A4]=egid;
12152          }
12153         return get_errno(getgid());
12154 #endif
12155 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12156     /* Alpha specific */
12157     case TARGET_NR_osf_getsysinfo:
12158         ret = -TARGET_EOPNOTSUPP;
12159         switch (arg1) {
12160           case TARGET_GSI_IEEE_FP_CONTROL:
12161             {
12162                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12163                 uint64_t swcr = cpu_env->swcr;
12164 
12165                 swcr &= ~SWCR_STATUS_MASK;
12166                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12167 
12168                 if (put_user_u64 (swcr, arg2))
12169                         return -TARGET_EFAULT;
12170                 ret = 0;
12171             }
12172             break;
12173 
12174           /* case GSI_IEEE_STATE_AT_SIGNAL:
12175              -- Not implemented in linux kernel.
12176              case GSI_UACPROC:
12177              -- Retrieves current unaligned access state; not much used.
12178              case GSI_PROC_TYPE:
12179              -- Retrieves implver information; surely not used.
12180              case GSI_GET_HWRPB:
12181              -- Grabs a copy of the HWRPB; surely not used.
12182           */
12183         }
12184         return ret;
12185 #endif
12186 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12187     /* Alpha specific */
12188     case TARGET_NR_osf_setsysinfo:
12189         ret = -TARGET_EOPNOTSUPP;
12190         switch (arg1) {
12191           case TARGET_SSI_IEEE_FP_CONTROL:
12192             {
12193                 uint64_t swcr, fpcr;
12194 
12195                 if (get_user_u64 (swcr, arg2)) {
12196                     return -TARGET_EFAULT;
12197                 }
12198 
12199                 /*
12200                  * The kernel calls swcr_update_status to update the
12201                  * status bits from the fpcr at every point that it
12202                  * could be queried.  Therefore, we store the status
12203                  * bits only in FPCR.
12204                  */
12205                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12206 
12207                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12208                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12209                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12210                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12211                 ret = 0;
12212             }
12213             break;
12214 
12215           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12216             {
12217                 uint64_t exc, fpcr, fex;
12218 
12219                 if (get_user_u64(exc, arg2)) {
12220                     return -TARGET_EFAULT;
12221                 }
12222                 exc &= SWCR_STATUS_MASK;
12223                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12224 
12225                 /* Old exceptions are not signaled.  */
12226                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12227                 fex = exc & ~fex;
12228                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12229                 fex &= (cpu_env)->swcr;
12230 
12231                 /* Update the hardware fpcr.  */
12232                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12233                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12234 
12235                 if (fex) {
12236                     int si_code = TARGET_FPE_FLTUNK;
12237                     target_siginfo_t info;
12238 
12239                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12240                         si_code = TARGET_FPE_FLTUND;
12241                     }
12242                     if (fex & SWCR_TRAP_ENABLE_INE) {
12243                         si_code = TARGET_FPE_FLTRES;
12244                     }
12245                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12246                         si_code = TARGET_FPE_FLTUND;
12247                     }
12248                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12249                         si_code = TARGET_FPE_FLTOVF;
12250                     }
12251                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12252                         si_code = TARGET_FPE_FLTDIV;
12253                     }
12254                     if (fex & SWCR_TRAP_ENABLE_INV) {
12255                         si_code = TARGET_FPE_FLTINV;
12256                     }
12257 
12258                     info.si_signo = SIGFPE;
12259                     info.si_errno = 0;
12260                     info.si_code = si_code;
12261                     info._sifields._sigfault._addr = (cpu_env)->pc;
12262                     queue_signal(cpu_env, info.si_signo,
12263                                  QEMU_SI_FAULT, &info);
12264                 }
12265                 ret = 0;
12266             }
12267             break;
12268 
12269           /* case SSI_NVPAIRS:
12270              -- Used with SSIN_UACPROC to enable unaligned accesses.
12271              case SSI_IEEE_STATE_AT_SIGNAL:
12272              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12273              -- Not implemented in linux kernel
12274           */
12275         }
12276         return ret;
12277 #endif
12278 #ifdef TARGET_NR_osf_sigprocmask
12279     /* Alpha specific.  */
12280     case TARGET_NR_osf_sigprocmask:
12281         {
12282             abi_ulong mask;
12283             int how;
12284             sigset_t set, oldset;
12285 
12286             switch(arg1) {
12287             case TARGET_SIG_BLOCK:
12288                 how = SIG_BLOCK;
12289                 break;
12290             case TARGET_SIG_UNBLOCK:
12291                 how = SIG_UNBLOCK;
12292                 break;
12293             case TARGET_SIG_SETMASK:
12294                 how = SIG_SETMASK;
12295                 break;
12296             default:
12297                 return -TARGET_EINVAL;
12298             }
12299             mask = arg2;
12300             target_to_host_old_sigset(&set, &mask);
12301             ret = do_sigprocmask(how, &set, &oldset);
12302             if (!ret) {
12303                 host_to_target_old_sigset(&mask, &oldset);
12304                 ret = mask;
12305             }
12306         }
12307         return ret;
12308 #endif
12309 
12310 #ifdef TARGET_NR_getgid32
12311     case TARGET_NR_getgid32:
12312         return get_errno(getgid());
12313 #endif
12314 #ifdef TARGET_NR_geteuid32
12315     case TARGET_NR_geteuid32:
12316         return get_errno(geteuid());
12317 #endif
12318 #ifdef TARGET_NR_getegid32
12319     case TARGET_NR_getegid32:
12320         return get_errno(getegid());
12321 #endif
12322 #ifdef TARGET_NR_setreuid32
12323     case TARGET_NR_setreuid32:
12324         return get_errno(sys_setreuid(arg1, arg2));
12325 #endif
12326 #ifdef TARGET_NR_setregid32
12327     case TARGET_NR_setregid32:
12328         return get_errno(sys_setregid(arg1, arg2));
12329 #endif
12330 #ifdef TARGET_NR_getgroups32
12331     case TARGET_NR_getgroups32:
12332         { /* the same code as for TARGET_NR_getgroups */
12333             int gidsetsize = arg1;
12334             uint32_t *target_grouplist;
12335             g_autofree gid_t *grouplist = NULL;
12336             int i;
12337 
12338             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12339                 return -TARGET_EINVAL;
12340             }
12341             if (gidsetsize > 0) {
12342                 grouplist = g_try_new(gid_t, gidsetsize);
12343                 if (!grouplist) {
12344                     return -TARGET_ENOMEM;
12345                 }
12346             }
12347             ret = get_errno(getgroups(gidsetsize, grouplist));
12348             if (!is_error(ret) && gidsetsize > 0) {
12349                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12350                                              gidsetsize * 4, 0);
12351                 if (!target_grouplist) {
12352                     return -TARGET_EFAULT;
12353                 }
12354                 for (i = 0; i < ret; i++) {
12355                     target_grouplist[i] = tswap32(grouplist[i]);
12356                 }
12357                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12358             }
12359             return ret;
12360         }
12361 #endif
12362 #ifdef TARGET_NR_setgroups32
12363     case TARGET_NR_setgroups32:
12364         { /* the same code as for TARGET_NR_setgroups */
12365             int gidsetsize = arg1;
12366             uint32_t *target_grouplist;
12367             g_autofree gid_t *grouplist = NULL;
12368             int i;
12369 
12370             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12371                 return -TARGET_EINVAL;
12372             }
12373             if (gidsetsize > 0) {
12374                 grouplist = g_try_new(gid_t, gidsetsize);
12375                 if (!grouplist) {
12376                     return -TARGET_ENOMEM;
12377                 }
12378                 target_grouplist = lock_user(VERIFY_READ, arg2,
12379                                              gidsetsize * 4, 1);
12380                 if (!target_grouplist) {
12381                     return -TARGET_EFAULT;
12382                 }
12383                 for (i = 0; i < gidsetsize; i++) {
12384                     grouplist[i] = tswap32(target_grouplist[i]);
12385                 }
12386                 unlock_user(target_grouplist, arg2, 0);
12387             }
12388             return get_errno(sys_setgroups(gidsetsize, grouplist));
12389         }
12390 #endif
12391 #ifdef TARGET_NR_fchown32
12392     case TARGET_NR_fchown32:
12393         return get_errno(fchown(arg1, arg2, arg3));
12394 #endif
12395 #ifdef TARGET_NR_setresuid32
12396     case TARGET_NR_setresuid32:
12397         return get_errno(sys_setresuid(arg1, arg2, arg3));
12398 #endif
12399 #ifdef TARGET_NR_getresuid32
12400     case TARGET_NR_getresuid32:
12401         {
12402             uid_t ruid, euid, suid;
12403             ret = get_errno(getresuid(&ruid, &euid, &suid));
12404             if (!is_error(ret)) {
12405                 if (put_user_u32(ruid, arg1)
12406                     || put_user_u32(euid, arg2)
12407                     || put_user_u32(suid, arg3))
12408                     return -TARGET_EFAULT;
12409             }
12410         }
12411         return ret;
12412 #endif
12413 #ifdef TARGET_NR_setresgid32
12414     case TARGET_NR_setresgid32:
12415         return get_errno(sys_setresgid(arg1, arg2, arg3));
12416 #endif
12417 #ifdef TARGET_NR_getresgid32
12418     case TARGET_NR_getresgid32:
12419         {
12420             gid_t rgid, egid, sgid;
12421             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12422             if (!is_error(ret)) {
12423                 if (put_user_u32(rgid, arg1)
12424                     || put_user_u32(egid, arg2)
12425                     || put_user_u32(sgid, arg3))
12426                     return -TARGET_EFAULT;
12427             }
12428         }
12429         return ret;
12430 #endif
12431 #ifdef TARGET_NR_chown32
12432     case TARGET_NR_chown32:
12433         if (!(p = lock_user_string(arg1)))
12434             return -TARGET_EFAULT;
12435         ret = get_errno(chown(p, arg2, arg3));
12436         unlock_user(p, arg1, 0);
12437         return ret;
12438 #endif
12439 #ifdef TARGET_NR_setuid32
12440     case TARGET_NR_setuid32:
12441         return get_errno(sys_setuid(arg1));
12442 #endif
12443 #ifdef TARGET_NR_setgid32
12444     case TARGET_NR_setgid32:
12445         return get_errno(sys_setgid(arg1));
12446 #endif
12447 #ifdef TARGET_NR_setfsuid32
12448     case TARGET_NR_setfsuid32:
12449         return get_errno(setfsuid(arg1));
12450 #endif
12451 #ifdef TARGET_NR_setfsgid32
12452     case TARGET_NR_setfsgid32:
12453         return get_errno(setfsgid(arg1));
12454 #endif
12455 #ifdef TARGET_NR_mincore
12456     case TARGET_NR_mincore:
12457         {
12458             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12459             if (!a) {
12460                 return -TARGET_ENOMEM;
12461             }
12462             p = lock_user_string(arg3);
12463             if (!p) {
12464                 ret = -TARGET_EFAULT;
12465             } else {
12466                 ret = get_errno(mincore(a, arg2, p));
12467                 unlock_user(p, arg3, ret);
12468             }
12469             unlock_user(a, arg1, 0);
12470         }
12471         return ret;
12472 #endif
12473 #ifdef TARGET_NR_arm_fadvise64_64
12474     case TARGET_NR_arm_fadvise64_64:
12475         /* arm_fadvise64_64 looks like fadvise64_64 but
12476          * with different argument order: fd, advice, offset, len
12477          * rather than the usual fd, offset, len, advice.
12478          * Note that offset and len are both 64-bit so appear as
12479          * pairs of 32-bit registers.
12480          */
12481         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12482                             target_offset64(arg5, arg6), arg2);
12483         return -host_to_target_errno(ret);
12484 #endif
12485 
12486 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12487 
12488 #ifdef TARGET_NR_fadvise64_64
12489     case TARGET_NR_fadvise64_64:
12490 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12491         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12492         ret = arg2;
12493         arg2 = arg3;
12494         arg3 = arg4;
12495         arg4 = arg5;
12496         arg5 = arg6;
12497         arg6 = ret;
12498 #else
12499         /* 6 args: fd, offset (high, low), len (high, low), advice */
12500         if (regpairs_aligned(cpu_env, num)) {
12501             /* offset is in (3,4), len in (5,6) and advice in 7 */
12502             arg2 = arg3;
12503             arg3 = arg4;
12504             arg4 = arg5;
12505             arg5 = arg6;
12506             arg6 = arg7;
12507         }
12508 #endif
12509         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12510                             target_offset64(arg4, arg5), arg6);
12511         return -host_to_target_errno(ret);
12512 #endif
12513 
12514 #ifdef TARGET_NR_fadvise64
12515     case TARGET_NR_fadvise64:
12516         /* 5 args: fd, offset (high, low), len, advice */
12517         if (regpairs_aligned(cpu_env, num)) {
12518             /* offset is in (3,4), len in 5 and advice in 6 */
12519             arg2 = arg3;
12520             arg3 = arg4;
12521             arg4 = arg5;
12522             arg5 = arg6;
12523         }
12524         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12525         return -host_to_target_errno(ret);
12526 #endif
12527 
12528 #else /* not a 32-bit ABI */
12529 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12530 #ifdef TARGET_NR_fadvise64_64
12531     case TARGET_NR_fadvise64_64:
12532 #endif
12533 #ifdef TARGET_NR_fadvise64
12534     case TARGET_NR_fadvise64:
12535 #endif
12536 #ifdef TARGET_S390X
12537         switch (arg4) {
12538         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12539         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12540         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12541         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12542         default: break;
12543         }
12544 #endif
12545         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12546 #endif
12547 #endif /* end of 64-bit ABI fadvise handling */
12548 
12549 #ifdef TARGET_NR_madvise
12550     case TARGET_NR_madvise:
12551         return target_madvise(arg1, arg2, arg3);
12552 #endif
12553 #ifdef TARGET_NR_fcntl64
12554     case TARGET_NR_fcntl64:
12555     {
12556         int cmd;
12557         struct flock fl;
12558         from_flock64_fn *copyfrom = copy_from_user_flock64;
12559         to_flock64_fn *copyto = copy_to_user_flock64;
12560 
12561 #ifdef TARGET_ARM
12562         if (!cpu_env->eabi) {
12563             copyfrom = copy_from_user_oabi_flock64;
12564             copyto = copy_to_user_oabi_flock64;
12565         }
12566 #endif
12567 
12568         cmd = target_to_host_fcntl_cmd(arg2);
12569         if (cmd == -TARGET_EINVAL) {
12570             return cmd;
12571         }
12572 
12573         switch(arg2) {
12574         case TARGET_F_GETLK64:
12575             ret = copyfrom(&fl, arg3);
12576             if (ret) {
12577                 break;
12578             }
12579             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12580             if (ret == 0) {
12581                 ret = copyto(arg3, &fl);
12582             }
12583 	    break;
12584 
12585         case TARGET_F_SETLK64:
12586         case TARGET_F_SETLKW64:
12587             ret = copyfrom(&fl, arg3);
12588             if (ret) {
12589                 break;
12590             }
12591             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12592 	    break;
12593         default:
12594             ret = do_fcntl(arg1, arg2, arg3);
12595             break;
12596         }
12597         return ret;
12598     }
12599 #endif
12600 #ifdef TARGET_NR_cacheflush
12601     case TARGET_NR_cacheflush:
12602         /* self-modifying code is handled automatically, so nothing needed */
12603         return 0;
12604 #endif
12605 #ifdef TARGET_NR_getpagesize
12606     case TARGET_NR_getpagesize:
12607         return TARGET_PAGE_SIZE;
12608 #endif
12609     case TARGET_NR_gettid:
12610         return get_errno(sys_gettid());
12611 #ifdef TARGET_NR_readahead
12612     case TARGET_NR_readahead:
12613 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12614         if (regpairs_aligned(cpu_env, num)) {
12615             arg2 = arg3;
12616             arg3 = arg4;
12617             arg4 = arg5;
12618         }
12619         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12620 #else
12621         ret = get_errno(readahead(arg1, arg2, arg3));
12622 #endif
12623         return ret;
12624 #endif
12625 #ifdef CONFIG_ATTR
12626 #ifdef TARGET_NR_setxattr
12627     case TARGET_NR_listxattr:
12628     case TARGET_NR_llistxattr:
12629     {
12630         void *b = 0;
12631         if (arg2) {
12632             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12633             if (!b) {
12634                 return -TARGET_EFAULT;
12635             }
12636         }
12637         p = lock_user_string(arg1);
12638         if (p) {
12639             if (num == TARGET_NR_listxattr) {
12640                 ret = get_errno(listxattr(p, b, arg3));
12641             } else {
12642                 ret = get_errno(llistxattr(p, b, arg3));
12643             }
12644         } else {
12645             ret = -TARGET_EFAULT;
12646         }
12647         unlock_user(p, arg1, 0);
12648         unlock_user(b, arg2, arg3);
12649         return ret;
12650     }
12651     case TARGET_NR_flistxattr:
12652     {
12653         void *b = 0;
12654         if (arg2) {
12655             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12656             if (!b) {
12657                 return -TARGET_EFAULT;
12658             }
12659         }
12660         ret = get_errno(flistxattr(arg1, b, arg3));
12661         unlock_user(b, arg2, arg3);
12662         return ret;
12663     }
12664     case TARGET_NR_setxattr:
12665     case TARGET_NR_lsetxattr:
12666         {
12667             void *n, *v = 0;
12668             if (arg3) {
12669                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12670                 if (!v) {
12671                     return -TARGET_EFAULT;
12672                 }
12673             }
12674             p = lock_user_string(arg1);
12675             n = lock_user_string(arg2);
12676             if (p && n) {
12677                 if (num == TARGET_NR_setxattr) {
12678                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12679                 } else {
12680                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12681                 }
12682             } else {
12683                 ret = -TARGET_EFAULT;
12684             }
12685             unlock_user(p, arg1, 0);
12686             unlock_user(n, arg2, 0);
12687             unlock_user(v, arg3, 0);
12688         }
12689         return ret;
12690     case TARGET_NR_fsetxattr:
12691         {
12692             void *n, *v = 0;
12693             if (arg3) {
12694                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12695                 if (!v) {
12696                     return -TARGET_EFAULT;
12697                 }
12698             }
12699             n = lock_user_string(arg2);
12700             if (n) {
12701                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12702             } else {
12703                 ret = -TARGET_EFAULT;
12704             }
12705             unlock_user(n, arg2, 0);
12706             unlock_user(v, arg3, 0);
12707         }
12708         return ret;
12709     case TARGET_NR_getxattr:
12710     case TARGET_NR_lgetxattr:
12711         {
12712             void *n, *v = 0;
12713             if (arg3) {
12714                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12715                 if (!v) {
12716                     return -TARGET_EFAULT;
12717                 }
12718             }
12719             p = lock_user_string(arg1);
12720             n = lock_user_string(arg2);
12721             if (p && n) {
12722                 if (num == TARGET_NR_getxattr) {
12723                     ret = get_errno(getxattr(p, n, v, arg4));
12724                 } else {
12725                     ret = get_errno(lgetxattr(p, n, v, arg4));
12726                 }
12727             } else {
12728                 ret = -TARGET_EFAULT;
12729             }
12730             unlock_user(p, arg1, 0);
12731             unlock_user(n, arg2, 0);
12732             unlock_user(v, arg3, arg4);
12733         }
12734         return ret;
12735     case TARGET_NR_fgetxattr:
12736         {
12737             void *n, *v = 0;
12738             if (arg3) {
12739                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12740                 if (!v) {
12741                     return -TARGET_EFAULT;
12742                 }
12743             }
12744             n = lock_user_string(arg2);
12745             if (n) {
12746                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12747             } else {
12748                 ret = -TARGET_EFAULT;
12749             }
12750             unlock_user(n, arg2, 0);
12751             unlock_user(v, arg3, arg4);
12752         }
12753         return ret;
12754     case TARGET_NR_removexattr:
12755     case TARGET_NR_lremovexattr:
12756         {
12757             void *n;
12758             p = lock_user_string(arg1);
12759             n = lock_user_string(arg2);
12760             if (p && n) {
12761                 if (num == TARGET_NR_removexattr) {
12762                     ret = get_errno(removexattr(p, n));
12763                 } else {
12764                     ret = get_errno(lremovexattr(p, n));
12765                 }
12766             } else {
12767                 ret = -TARGET_EFAULT;
12768             }
12769             unlock_user(p, arg1, 0);
12770             unlock_user(n, arg2, 0);
12771         }
12772         return ret;
12773     case TARGET_NR_fremovexattr:
12774         {
12775             void *n;
12776             n = lock_user_string(arg2);
12777             if (n) {
12778                 ret = get_errno(fremovexattr(arg1, n));
12779             } else {
12780                 ret = -TARGET_EFAULT;
12781             }
12782             unlock_user(n, arg2, 0);
12783         }
12784         return ret;
12785 #endif
12786 #endif /* CONFIG_ATTR */
12787 #ifdef TARGET_NR_set_thread_area
12788     case TARGET_NR_set_thread_area:
12789 #if defined(TARGET_MIPS)
12790       cpu_env->active_tc.CP0_UserLocal = arg1;
12791       return 0;
12792 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12793       return do_set_thread_area(cpu_env, arg1);
12794 #elif defined(TARGET_M68K)
12795       {
12796           TaskState *ts = get_task_state(cpu);
12797           ts->tp_value = arg1;
12798           return 0;
12799       }
12800 #else
12801       return -TARGET_ENOSYS;
12802 #endif
12803 #endif
12804 #ifdef TARGET_NR_get_thread_area
12805     case TARGET_NR_get_thread_area:
12806 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12807         return do_get_thread_area(cpu_env, arg1);
12808 #elif defined(TARGET_M68K)
12809         {
12810             TaskState *ts = get_task_state(cpu);
12811             return ts->tp_value;
12812         }
12813 #else
12814         return -TARGET_ENOSYS;
12815 #endif
12816 #endif
12817 #ifdef TARGET_NR_getdomainname
12818     case TARGET_NR_getdomainname:
12819         return -TARGET_ENOSYS;
12820 #endif
12821 
12822 #ifdef TARGET_NR_clock_settime
12823     case TARGET_NR_clock_settime:
12824     {
12825         struct timespec ts;
12826 
12827         ret = target_to_host_timespec(&ts, arg2);
12828         if (!is_error(ret)) {
12829             ret = get_errno(clock_settime(arg1, &ts));
12830         }
12831         return ret;
12832     }
12833 #endif
12834 #ifdef TARGET_NR_clock_settime64
12835     case TARGET_NR_clock_settime64:
12836     {
12837         struct timespec ts;
12838 
12839         ret = target_to_host_timespec64(&ts, arg2);
12840         if (!is_error(ret)) {
12841             ret = get_errno(clock_settime(arg1, &ts));
12842         }
12843         return ret;
12844     }
12845 #endif
12846 #ifdef TARGET_NR_clock_gettime
12847     case TARGET_NR_clock_gettime:
12848     {
12849         struct timespec ts;
12850         ret = get_errno(clock_gettime(arg1, &ts));
12851         if (!is_error(ret)) {
12852             ret = host_to_target_timespec(arg2, &ts);
12853         }
12854         return ret;
12855     }
12856 #endif
12857 #ifdef TARGET_NR_clock_gettime64
12858     case TARGET_NR_clock_gettime64:
12859     {
12860         struct timespec ts;
12861         ret = get_errno(clock_gettime(arg1, &ts));
12862         if (!is_error(ret)) {
12863             ret = host_to_target_timespec64(arg2, &ts);
12864         }
12865         return ret;
12866     }
12867 #endif
12868 #ifdef TARGET_NR_clock_getres
12869     case TARGET_NR_clock_getres:
12870     {
12871         struct timespec ts;
12872         ret = get_errno(clock_getres(arg1, &ts));
12873         if (!is_error(ret)) {
12874             host_to_target_timespec(arg2, &ts);
12875         }
12876         return ret;
12877     }
12878 #endif
12879 #ifdef TARGET_NR_clock_getres_time64
12880     case TARGET_NR_clock_getres_time64:
12881     {
12882         struct timespec ts;
12883         ret = get_errno(clock_getres(arg1, &ts));
12884         if (!is_error(ret)) {
12885             host_to_target_timespec64(arg2, &ts);
12886         }
12887         return ret;
12888     }
12889 #endif
12890 #ifdef TARGET_NR_clock_nanosleep
12891     case TARGET_NR_clock_nanosleep:
12892     {
12893         struct timespec ts;
12894         if (target_to_host_timespec(&ts, arg3)) {
12895             return -TARGET_EFAULT;
12896         }
12897         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12898                                              &ts, arg4 ? &ts : NULL));
12899         /*
12900          * if the call is interrupted by a signal handler, it fails
12901          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12902          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12903          */
12904         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12905             host_to_target_timespec(arg4, &ts)) {
12906               return -TARGET_EFAULT;
12907         }
12908 
12909         return ret;
12910     }
12911 #endif
12912 #ifdef TARGET_NR_clock_nanosleep_time64
12913     case TARGET_NR_clock_nanosleep_time64:
12914     {
12915         struct timespec ts;
12916 
12917         if (target_to_host_timespec64(&ts, arg3)) {
12918             return -TARGET_EFAULT;
12919         }
12920 
12921         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12922                                              &ts, arg4 ? &ts : NULL));
12923 
12924         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12925             host_to_target_timespec64(arg4, &ts)) {
12926             return -TARGET_EFAULT;
12927         }
12928         return ret;
12929     }
12930 #endif
12931 
12932 #if defined(TARGET_NR_set_tid_address)
12933     case TARGET_NR_set_tid_address:
12934     {
12935         TaskState *ts = get_task_state(cpu);
12936         ts->child_tidptr = arg1;
12937         /* do not call host set_tid_address() syscall, instead return tid() */
12938         return get_errno(sys_gettid());
12939     }
12940 #endif
12941 
12942     case TARGET_NR_tkill:
12943         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12944 
12945     case TARGET_NR_tgkill:
12946         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12947                          target_to_host_signal(arg3)));
12948 
12949 #ifdef TARGET_NR_set_robust_list
12950     case TARGET_NR_set_robust_list:
12951     case TARGET_NR_get_robust_list:
12952         /* The ABI for supporting robust futexes has userspace pass
12953          * the kernel a pointer to a linked list which is updated by
12954          * userspace after the syscall; the list is walked by the kernel
12955          * when the thread exits. Since the linked list in QEMU guest
12956          * memory isn't a valid linked list for the host and we have
12957          * no way to reliably intercept the thread-death event, we can't
12958          * support these. Silently return ENOSYS so that guest userspace
12959          * falls back to a non-robust futex implementation (which should
12960          * be OK except in the corner case of the guest crashing while
12961          * holding a mutex that is shared with another process via
12962          * shared memory).
12963          */
12964         return -TARGET_ENOSYS;
12965 #endif
12966 
12967 #if defined(TARGET_NR_utimensat)
12968     case TARGET_NR_utimensat:
12969         {
12970             struct timespec *tsp, ts[2];
12971             if (!arg3) {
12972                 tsp = NULL;
12973             } else {
12974                 if (target_to_host_timespec(ts, arg3)) {
12975                     return -TARGET_EFAULT;
12976                 }
12977                 if (target_to_host_timespec(ts + 1, arg3 +
12978                                             sizeof(struct target_timespec))) {
12979                     return -TARGET_EFAULT;
12980                 }
12981                 tsp = ts;
12982             }
12983             if (!arg2)
12984                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12985             else {
12986                 if (!(p = lock_user_string(arg2))) {
12987                     return -TARGET_EFAULT;
12988                 }
12989                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12990                 unlock_user(p, arg2, 0);
12991             }
12992         }
12993         return ret;
12994 #endif
12995 #ifdef TARGET_NR_utimensat_time64
12996     case TARGET_NR_utimensat_time64:
12997         {
12998             struct timespec *tsp, ts[2];
12999             if (!arg3) {
13000                 tsp = NULL;
13001             } else {
13002                 if (target_to_host_timespec64(ts, arg3)) {
13003                     return -TARGET_EFAULT;
13004                 }
13005                 if (target_to_host_timespec64(ts + 1, arg3 +
13006                                      sizeof(struct target__kernel_timespec))) {
13007                     return -TARGET_EFAULT;
13008                 }
13009                 tsp = ts;
13010             }
13011             if (!arg2)
13012                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
13013             else {
13014                 p = lock_user_string(arg2);
13015                 if (!p) {
13016                     return -TARGET_EFAULT;
13017                 }
13018                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
13019                 unlock_user(p, arg2, 0);
13020             }
13021         }
13022         return ret;
13023 #endif
13024 #ifdef TARGET_NR_futex
13025     case TARGET_NR_futex:
13026         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
13027 #endif
13028 #ifdef TARGET_NR_futex_time64
13029     case TARGET_NR_futex_time64:
13030         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
13031 #endif
13032 #ifdef CONFIG_INOTIFY
13033 #if defined(TARGET_NR_inotify_init)
13034     case TARGET_NR_inotify_init:
13035         ret = get_errno(inotify_init());
13036         if (ret >= 0) {
13037             fd_trans_register(ret, &target_inotify_trans);
13038         }
13039         return ret;
13040 #endif
13041 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13042     case TARGET_NR_inotify_init1:
13043         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13044                                           fcntl_flags_tbl)));
13045         if (ret >= 0) {
13046             fd_trans_register(ret, &target_inotify_trans);
13047         }
13048         return ret;
13049 #endif
13050 #if defined(TARGET_NR_inotify_add_watch)
13051     case TARGET_NR_inotify_add_watch:
13052         p = lock_user_string(arg2);
13053         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13054         unlock_user(p, arg2, 0);
13055         return ret;
13056 #endif
13057 #if defined(TARGET_NR_inotify_rm_watch)
13058     case TARGET_NR_inotify_rm_watch:
13059         return get_errno(inotify_rm_watch(arg1, arg2));
13060 #endif
13061 #endif
13062 
13063 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13064     case TARGET_NR_mq_open:
13065         {
13066             struct mq_attr posix_mq_attr;
13067             struct mq_attr *pposix_mq_attr;
13068             int host_flags;
13069 
13070             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13071             pposix_mq_attr = NULL;
13072             if (arg4) {
13073                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13074                     return -TARGET_EFAULT;
13075                 }
13076                 pposix_mq_attr = &posix_mq_attr;
13077             }
13078             p = lock_user_string(arg1 - 1);
13079             if (!p) {
13080                 return -TARGET_EFAULT;
13081             }
13082             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13083             unlock_user (p, arg1, 0);
13084         }
13085         return ret;
13086 
13087     case TARGET_NR_mq_unlink:
13088         p = lock_user_string(arg1 - 1);
13089         if (!p) {
13090             return -TARGET_EFAULT;
13091         }
13092         ret = get_errno(mq_unlink(p));
13093         unlock_user (p, arg1, 0);
13094         return ret;
13095 
13096 #ifdef TARGET_NR_mq_timedsend
13097     case TARGET_NR_mq_timedsend:
13098         {
13099             struct timespec ts;
13100 
13101             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13102             if (arg5 != 0) {
13103                 if (target_to_host_timespec(&ts, arg5)) {
13104                     return -TARGET_EFAULT;
13105                 }
13106                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13107                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13108                     return -TARGET_EFAULT;
13109                 }
13110             } else {
13111                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13112             }
13113             unlock_user (p, arg2, arg3);
13114         }
13115         return ret;
13116 #endif
13117 #ifdef TARGET_NR_mq_timedsend_time64
13118     case TARGET_NR_mq_timedsend_time64:
13119         {
13120             struct timespec ts;
13121 
13122             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13123             if (arg5 != 0) {
13124                 if (target_to_host_timespec64(&ts, arg5)) {
13125                     return -TARGET_EFAULT;
13126                 }
13127                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13128                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13129                     return -TARGET_EFAULT;
13130                 }
13131             } else {
13132                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13133             }
13134             unlock_user(p, arg2, arg3);
13135         }
13136         return ret;
13137 #endif
13138 
13139 #ifdef TARGET_NR_mq_timedreceive
13140     case TARGET_NR_mq_timedreceive:
13141         {
13142             struct timespec ts;
13143             unsigned int prio;
13144 
13145             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13146             if (arg5 != 0) {
13147                 if (target_to_host_timespec(&ts, arg5)) {
13148                     return -TARGET_EFAULT;
13149                 }
13150                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13151                                                      &prio, &ts));
13152                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13153                     return -TARGET_EFAULT;
13154                 }
13155             } else {
13156                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13157                                                      &prio, NULL));
13158             }
13159             unlock_user (p, arg2, arg3);
13160             if (arg4 != 0)
13161                 put_user_u32(prio, arg4);
13162         }
13163         return ret;
13164 #endif
13165 #ifdef TARGET_NR_mq_timedreceive_time64
13166     case TARGET_NR_mq_timedreceive_time64:
13167         {
13168             struct timespec ts;
13169             unsigned int prio;
13170 
13171             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13172             if (arg5 != 0) {
13173                 if (target_to_host_timespec64(&ts, arg5)) {
13174                     return -TARGET_EFAULT;
13175                 }
13176                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13177                                                      &prio, &ts));
13178                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13179                     return -TARGET_EFAULT;
13180                 }
13181             } else {
13182                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13183                                                      &prio, NULL));
13184             }
13185             unlock_user(p, arg2, arg3);
13186             if (arg4 != 0) {
13187                 put_user_u32(prio, arg4);
13188             }
13189         }
13190         return ret;
13191 #endif
13192 
13193     /* Not implemented for now... */
13194 /*     case TARGET_NR_mq_notify: */
13195 /*         break; */
13196 
13197     case TARGET_NR_mq_getsetattr:
13198         {
13199             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13200             ret = 0;
13201             if (arg2 != 0) {
13202                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13203                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13204                                            &posix_mq_attr_out));
13205             } else if (arg3 != 0) {
13206                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13207             }
13208             if (ret == 0 && arg3 != 0) {
13209                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13210             }
13211         }
13212         return ret;
13213 #endif
13214 
13215 #ifdef CONFIG_SPLICE
13216 #ifdef TARGET_NR_tee
13217     case TARGET_NR_tee:
13218         {
13219             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13220         }
13221         return ret;
13222 #endif
13223 #ifdef TARGET_NR_splice
13224     case TARGET_NR_splice:
13225         {
13226             loff_t loff_in, loff_out;
13227             loff_t *ploff_in = NULL, *ploff_out = NULL;
13228             if (arg2) {
13229                 if (get_user_u64(loff_in, arg2)) {
13230                     return -TARGET_EFAULT;
13231                 }
13232                 ploff_in = &loff_in;
13233             }
13234             if (arg4) {
13235                 if (get_user_u64(loff_out, arg4)) {
13236                     return -TARGET_EFAULT;
13237                 }
13238                 ploff_out = &loff_out;
13239             }
13240             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13241             if (arg2) {
13242                 if (put_user_u64(loff_in, arg2)) {
13243                     return -TARGET_EFAULT;
13244                 }
13245             }
13246             if (arg4) {
13247                 if (put_user_u64(loff_out, arg4)) {
13248                     return -TARGET_EFAULT;
13249                 }
13250             }
13251         }
13252         return ret;
13253 #endif
13254 #ifdef TARGET_NR_vmsplice
13255 	case TARGET_NR_vmsplice:
13256         {
13257             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13258             if (vec != NULL) {
13259                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13260                 unlock_iovec(vec, arg2, arg3, 0);
13261             } else {
13262                 ret = -host_to_target_errno(errno);
13263             }
13264         }
13265         return ret;
13266 #endif
13267 #endif /* CONFIG_SPLICE */
13268 #ifdef CONFIG_EVENTFD
13269 #if defined(TARGET_NR_eventfd)
13270     case TARGET_NR_eventfd:
13271         ret = get_errno(eventfd(arg1, 0));
13272         if (ret >= 0) {
13273             fd_trans_register(ret, &target_eventfd_trans);
13274         }
13275         return ret;
13276 #endif
13277 #if defined(TARGET_NR_eventfd2)
13278     case TARGET_NR_eventfd2:
13279     {
13280         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13281         if (arg2 & TARGET_O_NONBLOCK) {
13282             host_flags |= O_NONBLOCK;
13283         }
13284         if (arg2 & TARGET_O_CLOEXEC) {
13285             host_flags |= O_CLOEXEC;
13286         }
13287         ret = get_errno(eventfd(arg1, host_flags));
13288         if (ret >= 0) {
13289             fd_trans_register(ret, &target_eventfd_trans);
13290         }
13291         return ret;
13292     }
13293 #endif
13294 #endif /* CONFIG_EVENTFD  */
13295 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13296     case TARGET_NR_fallocate:
13297 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13298         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13299                                   target_offset64(arg5, arg6)));
13300 #else
13301         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13302 #endif
13303         return ret;
13304 #endif
13305 #if defined(CONFIG_SYNC_FILE_RANGE)
13306 #if defined(TARGET_NR_sync_file_range)
13307     case TARGET_NR_sync_file_range:
13308 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13309 #if defined(TARGET_MIPS)
13310         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13311                                         target_offset64(arg5, arg6), arg7));
13312 #else
13313         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13314                                         target_offset64(arg4, arg5), arg6));
13315 #endif /* !TARGET_MIPS */
13316 #else
13317         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13318 #endif
13319         return ret;
13320 #endif
13321 #if defined(TARGET_NR_sync_file_range2) || \
13322     defined(TARGET_NR_arm_sync_file_range)
13323 #if defined(TARGET_NR_sync_file_range2)
13324     case TARGET_NR_sync_file_range2:
13325 #endif
13326 #if defined(TARGET_NR_arm_sync_file_range)
13327     case TARGET_NR_arm_sync_file_range:
13328 #endif
13329         /* This is like sync_file_range but the arguments are reordered */
13330 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13331         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13332                                         target_offset64(arg5, arg6), arg2));
13333 #else
13334         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13335 #endif
13336         return ret;
13337 #endif
13338 #endif
13339 #if defined(TARGET_NR_signalfd4)
13340     case TARGET_NR_signalfd4:
13341         return do_signalfd4(arg1, arg2, arg4);
13342 #endif
13343 #if defined(TARGET_NR_signalfd)
13344     case TARGET_NR_signalfd:
13345         return do_signalfd4(arg1, arg2, 0);
13346 #endif
13347 #if defined(CONFIG_EPOLL)
13348 #if defined(TARGET_NR_epoll_create)
13349     case TARGET_NR_epoll_create:
13350         return get_errno(epoll_create(arg1));
13351 #endif
13352 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13353     case TARGET_NR_epoll_create1:
13354         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13355 #endif
13356 #if defined(TARGET_NR_epoll_ctl)
13357     case TARGET_NR_epoll_ctl:
13358     {
13359         struct epoll_event ep;
13360         struct epoll_event *epp = 0;
13361         if (arg4) {
13362             if (arg2 != EPOLL_CTL_DEL) {
13363                 struct target_epoll_event *target_ep;
13364                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13365                     return -TARGET_EFAULT;
13366                 }
13367                 ep.events = tswap32(target_ep->events);
13368                 /*
13369                  * The epoll_data_t union is just opaque data to the kernel,
13370                  * so we transfer all 64 bits across and need not worry what
13371                  * actual data type it is.
13372                  */
13373                 ep.data.u64 = tswap64(target_ep->data.u64);
13374                 unlock_user_struct(target_ep, arg4, 0);
13375             }
13376             /*
13377              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13378              * non-null pointer, even though this argument is ignored.
13379              *
13380              */
13381             epp = &ep;
13382         }
13383         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13384     }
13385 #endif
13386 
13387 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13388 #if defined(TARGET_NR_epoll_wait)
13389     case TARGET_NR_epoll_wait:
13390 #endif
13391 #if defined(TARGET_NR_epoll_pwait)
13392     case TARGET_NR_epoll_pwait:
13393 #endif
13394     {
13395         struct target_epoll_event *target_ep;
13396         struct epoll_event *ep;
13397         int epfd = arg1;
13398         int maxevents = arg3;
13399         int timeout = arg4;
13400 
13401         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13402             return -TARGET_EINVAL;
13403         }
13404 
13405         target_ep = lock_user(VERIFY_WRITE, arg2,
13406                               maxevents * sizeof(struct target_epoll_event), 1);
13407         if (!target_ep) {
13408             return -TARGET_EFAULT;
13409         }
13410 
13411         ep = g_try_new(struct epoll_event, maxevents);
13412         if (!ep) {
13413             unlock_user(target_ep, arg2, 0);
13414             return -TARGET_ENOMEM;
13415         }
13416 
13417         switch (num) {
13418 #if defined(TARGET_NR_epoll_pwait)
13419         case TARGET_NR_epoll_pwait:
13420         {
13421             sigset_t *set = NULL;
13422 
13423             if (arg5) {
13424                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13425                 if (ret != 0) {
13426                     break;
13427                 }
13428             }
13429 
13430             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13431                                              set, SIGSET_T_SIZE));
13432 
13433             if (set) {
13434                 finish_sigsuspend_mask(ret);
13435             }
13436             break;
13437         }
13438 #endif
13439 #if defined(TARGET_NR_epoll_wait)
13440         case TARGET_NR_epoll_wait:
13441             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13442                                              NULL, 0));
13443             break;
13444 #endif
13445         default:
13446             ret = -TARGET_ENOSYS;
13447         }
13448         if (!is_error(ret)) {
13449             int i;
13450             for (i = 0; i < ret; i++) {
13451                 target_ep[i].events = tswap32(ep[i].events);
13452                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13453             }
13454             unlock_user(target_ep, arg2,
13455                         ret * sizeof(struct target_epoll_event));
13456         } else {
13457             unlock_user(target_ep, arg2, 0);
13458         }
13459         g_free(ep);
13460         return ret;
13461     }
13462 #endif
13463 #endif
13464 #ifdef TARGET_NR_prlimit64
13465     case TARGET_NR_prlimit64:
13466     {
13467         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13468         struct target_rlimit64 *target_rnew, *target_rold;
13469         struct host_rlimit64 rnew, rold, *rnewp = 0;
13470         int resource = target_to_host_resource(arg2);
13471 
13472         if (arg3 && (resource != RLIMIT_AS &&
13473                      resource != RLIMIT_DATA &&
13474                      resource != RLIMIT_STACK)) {
13475             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13476                 return -TARGET_EFAULT;
13477             }
13478             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13479             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13480             unlock_user_struct(target_rnew, arg3, 0);
13481             rnewp = &rnew;
13482         }
13483 
13484         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13485         if (!is_error(ret) && arg4) {
13486             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13487                 return -TARGET_EFAULT;
13488             }
13489             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13490             __put_user(rold.rlim_max, &target_rold->rlim_max);
13491             unlock_user_struct(target_rold, arg4, 1);
13492         }
13493         return ret;
13494     }
13495 #endif
13496 #ifdef TARGET_NR_gethostname
13497     case TARGET_NR_gethostname:
13498     {
13499         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13500         if (name) {
13501             ret = get_errno(gethostname(name, arg2));
13502             unlock_user(name, arg1, arg2);
13503         } else {
13504             ret = -TARGET_EFAULT;
13505         }
13506         return ret;
13507     }
13508 #endif
13509 #ifdef TARGET_NR_atomic_cmpxchg_32
13510     case TARGET_NR_atomic_cmpxchg_32:
13511     {
13512         /* should use start_exclusive from main.c */
13513         abi_ulong mem_value;
13514         if (get_user_u32(mem_value, arg6)) {
13515             target_siginfo_t info;
13516             info.si_signo = SIGSEGV;
13517             info.si_errno = 0;
13518             info.si_code = TARGET_SEGV_MAPERR;
13519             info._sifields._sigfault._addr = arg6;
13520             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13521             ret = 0xdeadbeef;
13522 
13523         }
13524         if (mem_value == arg2)
13525             put_user_u32(arg1, arg6);
13526         return mem_value;
13527     }
13528 #endif
13529 #ifdef TARGET_NR_atomic_barrier
13530     case TARGET_NR_atomic_barrier:
13531         /* Like the kernel implementation and the
13532            qemu arm barrier, no-op this? */
13533         return 0;
13534 #endif
13535 
13536 #ifdef TARGET_NR_timer_create
13537     case TARGET_NR_timer_create:
13538     {
13539         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13540 
13541         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13542 
13543         int clkid = arg1;
13544         int timer_index = next_free_host_timer();
13545 
13546         if (timer_index < 0) {
13547             ret = -TARGET_EAGAIN;
13548         } else {
13549             timer_t *phtimer = g_posix_timers  + timer_index;
13550 
13551             if (arg2) {
13552                 phost_sevp = &host_sevp;
13553                 ret = target_to_host_sigevent(phost_sevp, arg2);
13554                 if (ret != 0) {
13555                     free_host_timer_slot(timer_index);
13556                     return ret;
13557                 }
13558             }
13559 
13560             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13561             if (ret) {
13562                 free_host_timer_slot(timer_index);
13563             } else {
13564                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13565                     timer_delete(*phtimer);
13566                     free_host_timer_slot(timer_index);
13567                     return -TARGET_EFAULT;
13568                 }
13569             }
13570         }
13571         return ret;
13572     }
13573 #endif
13574 
13575 #ifdef TARGET_NR_timer_settime
13576     case TARGET_NR_timer_settime:
13577     {
13578         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13579          * struct itimerspec * old_value */
13580         target_timer_t timerid = get_timer_id(arg1);
13581 
13582         if (timerid < 0) {
13583             ret = timerid;
13584         } else if (arg3 == 0) {
13585             ret = -TARGET_EINVAL;
13586         } else {
13587             timer_t htimer = g_posix_timers[timerid];
13588             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13589 
13590             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13591                 return -TARGET_EFAULT;
13592             }
13593             ret = get_errno(
13594                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13595             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13596                 return -TARGET_EFAULT;
13597             }
13598         }
13599         return ret;
13600     }
13601 #endif
13602 
13603 #ifdef TARGET_NR_timer_settime64
13604     case TARGET_NR_timer_settime64:
13605     {
13606         target_timer_t timerid = get_timer_id(arg1);
13607 
13608         if (timerid < 0) {
13609             ret = timerid;
13610         } else if (arg3 == 0) {
13611             ret = -TARGET_EINVAL;
13612         } else {
13613             timer_t htimer = g_posix_timers[timerid];
13614             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13615 
13616             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13617                 return -TARGET_EFAULT;
13618             }
13619             ret = get_errno(
13620                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13621             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13622                 return -TARGET_EFAULT;
13623             }
13624         }
13625         return ret;
13626     }
13627 #endif
13628 
13629 #ifdef TARGET_NR_timer_gettime
13630     case TARGET_NR_timer_gettime:
13631     {
13632         /* args: timer_t timerid, struct itimerspec *curr_value */
13633         target_timer_t timerid = get_timer_id(arg1);
13634 
13635         if (timerid < 0) {
13636             ret = timerid;
13637         } else if (!arg2) {
13638             ret = -TARGET_EFAULT;
13639         } else {
13640             timer_t htimer = g_posix_timers[timerid];
13641             struct itimerspec hspec;
13642             ret = get_errno(timer_gettime(htimer, &hspec));
13643 
13644             if (host_to_target_itimerspec(arg2, &hspec)) {
13645                 ret = -TARGET_EFAULT;
13646             }
13647         }
13648         return ret;
13649     }
13650 #endif
13651 
13652 #ifdef TARGET_NR_timer_gettime64
13653     case TARGET_NR_timer_gettime64:
13654     {
13655         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13656         target_timer_t timerid = get_timer_id(arg1);
13657 
13658         if (timerid < 0) {
13659             ret = timerid;
13660         } else if (!arg2) {
13661             ret = -TARGET_EFAULT;
13662         } else {
13663             timer_t htimer = g_posix_timers[timerid];
13664             struct itimerspec hspec;
13665             ret = get_errno(timer_gettime(htimer, &hspec));
13666 
13667             if (host_to_target_itimerspec64(arg2, &hspec)) {
13668                 ret = -TARGET_EFAULT;
13669             }
13670         }
13671         return ret;
13672     }
13673 #endif
13674 
13675 #ifdef TARGET_NR_timer_getoverrun
13676     case TARGET_NR_timer_getoverrun:
13677     {
13678         /* args: timer_t timerid */
13679         target_timer_t timerid = get_timer_id(arg1);
13680 
13681         if (timerid < 0) {
13682             ret = timerid;
13683         } else {
13684             timer_t htimer = g_posix_timers[timerid];
13685             ret = get_errno(timer_getoverrun(htimer));
13686         }
13687         return ret;
13688     }
13689 #endif
13690 
13691 #ifdef TARGET_NR_timer_delete
13692     case TARGET_NR_timer_delete:
13693     {
13694         /* args: timer_t timerid */
13695         target_timer_t timerid = get_timer_id(arg1);
13696 
13697         if (timerid < 0) {
13698             ret = timerid;
13699         } else {
13700             timer_t htimer = g_posix_timers[timerid];
13701             ret = get_errno(timer_delete(htimer));
13702             free_host_timer_slot(timerid);
13703         }
13704         return ret;
13705     }
13706 #endif
13707 
13708 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13709     case TARGET_NR_timerfd_create:
13710         ret = get_errno(timerfd_create(arg1,
13711                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13712         if (ret >= 0) {
13713             fd_trans_register(ret, &target_timerfd_trans);
13714         }
13715         return ret;
13716 #endif
13717 
13718 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13719     case TARGET_NR_timerfd_gettime:
13720         {
13721             struct itimerspec its_curr;
13722 
13723             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13724 
13725             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13726                 return -TARGET_EFAULT;
13727             }
13728         }
13729         return ret;
13730 #endif
13731 
13732 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13733     case TARGET_NR_timerfd_gettime64:
13734         {
13735             struct itimerspec its_curr;
13736 
13737             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13738 
13739             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13740                 return -TARGET_EFAULT;
13741             }
13742         }
13743         return ret;
13744 #endif
13745 
13746 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13747     case TARGET_NR_timerfd_settime:
13748         {
13749             struct itimerspec its_new, its_old, *p_new;
13750 
13751             if (arg3) {
13752                 if (target_to_host_itimerspec(&its_new, arg3)) {
13753                     return -TARGET_EFAULT;
13754                 }
13755                 p_new = &its_new;
13756             } else {
13757                 p_new = NULL;
13758             }
13759 
13760             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13761 
13762             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13763                 return -TARGET_EFAULT;
13764             }
13765         }
13766         return ret;
13767 #endif
13768 
13769 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13770     case TARGET_NR_timerfd_settime64:
13771         {
13772             struct itimerspec its_new, its_old, *p_new;
13773 
13774             if (arg3) {
13775                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13776                     return -TARGET_EFAULT;
13777                 }
13778                 p_new = &its_new;
13779             } else {
13780                 p_new = NULL;
13781             }
13782 
13783             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13784 
13785             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13786                 return -TARGET_EFAULT;
13787             }
13788         }
13789         return ret;
13790 #endif
13791 
13792 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13793     case TARGET_NR_ioprio_get:
13794         return get_errno(ioprio_get(arg1, arg2));
13795 #endif
13796 
13797 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13798     case TARGET_NR_ioprio_set:
13799         return get_errno(ioprio_set(arg1, arg2, arg3));
13800 #endif
13801 
13802 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13803     case TARGET_NR_setns:
13804         return get_errno(setns(arg1, arg2));
13805 #endif
13806 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13807     case TARGET_NR_unshare:
13808         return get_errno(unshare(arg1));
13809 #endif
13810 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13811     case TARGET_NR_kcmp:
13812         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13813 #endif
13814 #ifdef TARGET_NR_swapcontext
13815     case TARGET_NR_swapcontext:
13816         /* PowerPC specific.  */
13817         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13818 #endif
13819 #ifdef TARGET_NR_memfd_create
13820     case TARGET_NR_memfd_create:
13821         p = lock_user_string(arg1);
13822         if (!p) {
13823             return -TARGET_EFAULT;
13824         }
13825         ret = get_errno(memfd_create(p, arg2));
13826         fd_trans_unregister(ret);
13827         unlock_user(p, arg1, 0);
13828         return ret;
13829 #endif
13830 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13831     case TARGET_NR_membarrier:
13832         return get_errno(membarrier(arg1, arg2));
13833 #endif
13834 
13835 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13836     case TARGET_NR_copy_file_range:
13837         {
13838             loff_t inoff, outoff;
13839             loff_t *pinoff = NULL, *poutoff = NULL;
13840 
13841             if (arg2) {
13842                 if (get_user_u64(inoff, arg2)) {
13843                     return -TARGET_EFAULT;
13844                 }
13845                 pinoff = &inoff;
13846             }
13847             if (arg4) {
13848                 if (get_user_u64(outoff, arg4)) {
13849                     return -TARGET_EFAULT;
13850                 }
13851                 poutoff = &outoff;
13852             }
13853             /* Do not sign-extend the count parameter. */
13854             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13855                                                  (abi_ulong)arg5, arg6));
13856             if (!is_error(ret) && ret > 0) {
13857                 if (arg2) {
13858                     if (put_user_u64(inoff, arg2)) {
13859                         return -TARGET_EFAULT;
13860                     }
13861                 }
13862                 if (arg4) {
13863                     if (put_user_u64(outoff, arg4)) {
13864                         return -TARGET_EFAULT;
13865                     }
13866                 }
13867             }
13868         }
13869         return ret;
13870 #endif
13871 
13872 #if defined(TARGET_NR_pivot_root)
13873     case TARGET_NR_pivot_root:
13874         {
13875             void *p2;
13876             p = lock_user_string(arg1); /* new_root */
13877             p2 = lock_user_string(arg2); /* put_old */
13878             if (!p || !p2) {
13879                 ret = -TARGET_EFAULT;
13880             } else {
13881                 ret = get_errno(pivot_root(p, p2));
13882             }
13883             unlock_user(p2, arg2, 0);
13884             unlock_user(p, arg1, 0);
13885         }
13886         return ret;
13887 #endif
13888 
13889 #if defined(TARGET_NR_riscv_hwprobe)
13890     case TARGET_NR_riscv_hwprobe:
13891         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13892 #endif
13893 
13894     default:
13895         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13896         return -TARGET_ENOSYS;
13897     }
13898     return ret;
13899 }
13900 
13901 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13902                     abi_long arg2, abi_long arg3, abi_long arg4,
13903                     abi_long arg5, abi_long arg6, abi_long arg7,
13904                     abi_long arg8)
13905 {
13906     CPUState *cpu = env_cpu(cpu_env);
13907     abi_long ret;
13908 
13909 #ifdef DEBUG_ERESTARTSYS
13910     /* Debug-only code for exercising the syscall-restart code paths
13911      * in the per-architecture cpu main loops: restart every syscall
13912      * the guest makes once before letting it through.
13913      */
13914     {
13915         static bool flag;
13916         flag = !flag;
13917         if (flag) {
13918             return -QEMU_ERESTARTSYS;
13919         }
13920     }
13921 #endif
13922 
13923     record_syscall_start(cpu, num, arg1,
13924                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13925 
13926     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13927         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13928     }
13929 
13930     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13931                       arg5, arg6, arg7, arg8);
13932 
13933     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13934         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13935                           arg3, arg4, arg5, arg6);
13936     }
13937 
13938     record_syscall_return(cpu, num, ret);
13939     return ret;
13940 }
13941