xref: /openbmc/qemu/linux-user/syscall.c (revision dbdf841b)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
85 
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
92 
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
130 
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
321           loff_t *, res, uint, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458   { 0, 0, 0, 0 }
459 };
460 
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462 
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467           const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470                          const struct timespec times[2], int flags)
471 {
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477 
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482           const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485                          int newfd, const char *new, int flags)
486 {
487     if (flags == 0) {
488         return renameat(oldfd, old, newfd, new);
489     }
490     errno = ENOSYS;
491     return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495 
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY  */
505 
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513     uint64_t rlim_cur;
514     uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517           const struct host_rlimit64 *, new_limit,
518           struct host_rlimit64 *, old_limit)
519 #endif
520 
521 
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527 
528 static inline int next_free_host_timer(void)
529 {
530     int k;
531     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533             return k;
534         }
535     }
536     return -1;
537 }
538 
539 static inline void free_host_timer_slot(int id)
540 {
541     qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544 
545 static inline int host_to_target_errno(int host_errno)
546 {
547     switch (host_errno) {
548 #define E(X)  case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return host_errno;
553     }
554 }
555 
556 static inline int target_to_host_errno(int target_errno)
557 {
558     switch (target_errno) {
559 #define E(X)  case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562     default:
563         return target_errno;
564     }
565 }
566 
567 abi_long get_errno(abi_long ret)
568 {
569     if (ret == -1)
570         return -host_to_target_errno(errno);
571     else
572         return ret;
573 }
574 
575 const char *target_strerror(int err)
576 {
577     if (err == QEMU_ERESTARTSYS) {
578         return "To be restarted";
579     }
580     if (err == QEMU_ESIGRETURN) {
581         return "Successful exit from sigreturn";
582     }
583 
584     return strerror(target_to_host_errno(err));
585 }
586 
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589     int i;
590     uint8_t b;
591     if (usize <= ksize) {
592         return 1;
593     }
594     for (i = ksize; i < usize; i++) {
595         if (get_user_u8(b, addr + i)) {
596             return -TARGET_EFAULT;
597         }
598         if (b != 0) {
599             return 0;
600         }
601     }
602     return 1;
603 }
604 
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608     return safe_syscall(__NR_##name); \
609 }
610 
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614     return safe_syscall(__NR_##name, arg1); \
615 }
616 
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622 
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628 
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630     type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635 
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637     type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639     type5 arg5) \
640 { \
641     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643 
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645     type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647     type5 arg5, type6 arg6) \
648 { \
649     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651 
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655               int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658               struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661               int, options, struct rusage *, rusage)
662 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
663               char **, argv, char **, envp, int, flags)
664 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
665     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
666 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
667               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
668 #endif
669 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
670 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
671               struct timespec *, tsp, const sigset_t *, sigmask,
672               size_t, sigsetsize)
673 #endif
674 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
675               int, maxevents, int, timeout, const sigset_t *, sigmask,
676               size_t, sigsetsize)
677 #if defined(__NR_futex)
678 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
679               const struct timespec *,timeout,int *,uaddr2,int,val3)
680 #endif
681 #if defined(__NR_futex_time64)
682 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
683               const struct timespec *,timeout,int *,uaddr2,int,val3)
684 #endif
685 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
686 safe_syscall2(int, kill, pid_t, pid, int, sig)
687 safe_syscall2(int, tkill, int, tid, int, sig)
688 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
689 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
690 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
692               unsigned long, pos_l, unsigned long, pos_h)
693 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
694               unsigned long, pos_l, unsigned long, pos_h)
695 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
696               socklen_t, addrlen)
697 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
698               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
699 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
700               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
701 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
702 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
703 safe_syscall2(int, flock, int, fd, int, operation)
704 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
705 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
706               const struct timespec *, uts, size_t, sigsetsize)
707 #endif
708 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
709               int, flags)
710 #if defined(TARGET_NR_nanosleep)
711 safe_syscall2(int, nanosleep, const struct timespec *, req,
712               struct timespec *, rem)
713 #endif
714 #if defined(TARGET_NR_clock_nanosleep) || \
715     defined(TARGET_NR_clock_nanosleep_time64)
716 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
717               const struct timespec *, req, struct timespec *, rem)
718 #endif
719 #ifdef __NR_ipc
720 #ifdef __s390x__
721 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
722               void *, ptr)
723 #else
724 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
725               void *, ptr, long, fifth)
726 #endif
727 #endif
728 #ifdef __NR_msgsnd
729 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
730               int, flags)
731 #endif
732 #ifdef __NR_msgrcv
733 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
734               long, msgtype, int, flags)
735 #endif
736 #ifdef __NR_semtimedop
737 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
738               unsigned, nsops, const struct timespec *, timeout)
739 #endif
740 #if defined(TARGET_NR_mq_timedsend) || \
741     defined(TARGET_NR_mq_timedsend_time64)
742 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
743               size_t, len, unsigned, prio, const struct timespec *, timeout)
744 #endif
745 #if defined(TARGET_NR_mq_timedreceive) || \
746     defined(TARGET_NR_mq_timedreceive_time64)
747 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
748               size_t, len, unsigned *, prio, const struct timespec *, timeout)
749 #endif
750 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
751 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
752               int, outfd, loff_t *, poutoff, size_t, length,
753               unsigned int, flags)
754 #endif
755 
756 /* We do ioctl like this rather than via safe_syscall3 to preserve the
757  * "third argument might be integer or pointer or not present" behaviour of
758  * the libc function.
759  */
760 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
761 /* Similarly for fcntl. Note that callers must always:
762  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
763  *  use the flock64 struct rather than unsuffixed flock
764  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
765  */
766 #ifdef __NR_fcntl64
767 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
768 #else
769 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
770 #endif
771 
772 static inline int host_to_target_sock_type(int host_type)
773 {
774     int target_type;
775 
776     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
777     case SOCK_DGRAM:
778         target_type = TARGET_SOCK_DGRAM;
779         break;
780     case SOCK_STREAM:
781         target_type = TARGET_SOCK_STREAM;
782         break;
783     default:
784         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
785         break;
786     }
787 
788 #if defined(SOCK_CLOEXEC)
789     if (host_type & SOCK_CLOEXEC) {
790         target_type |= TARGET_SOCK_CLOEXEC;
791     }
792 #endif
793 
794 #if defined(SOCK_NONBLOCK)
795     if (host_type & SOCK_NONBLOCK) {
796         target_type |= TARGET_SOCK_NONBLOCK;
797     }
798 #endif
799 
800     return target_type;
801 }
802 
803 static abi_ulong target_brk;
804 static abi_ulong brk_page;
805 
806 void target_set_brk(abi_ulong new_brk)
807 {
808     target_brk = new_brk;
809     brk_page = HOST_PAGE_ALIGN(target_brk);
810 }
811 
812 /* do_brk() must return target values and target errnos. */
813 abi_long do_brk(abi_ulong brk_val)
814 {
815     abi_long mapped_addr;
816     abi_ulong new_alloc_size;
817     abi_ulong new_brk, new_host_brk_page;
818 
819     /* brk pointers are always untagged */
820 
821     /* return old brk value if brk_val unchanged or zero */
822     if (!brk_val || brk_val == target_brk) {
823         return target_brk;
824     }
825 
826     new_brk = TARGET_PAGE_ALIGN(brk_val);
827     new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
828 
829     /* brk_val and old target_brk might be on the same page */
830     if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
831         if (brk_val > target_brk) {
832             /* empty remaining bytes in (possibly larger) host page */
833             memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk);
834         }
835         target_brk = brk_val;
836         return target_brk;
837     }
838 
839     /* Release heap if necesary */
840     if (new_brk < target_brk) {
841         /* empty remaining bytes in (possibly larger) host page */
842         memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val);
843 
844         /* free unused host pages and set new brk_page */
845         target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
846         brk_page = new_host_brk_page;
847 
848         target_brk = brk_val;
849         return target_brk;
850     }
851 
852     /* We need to allocate more memory after the brk... Note that
853      * we don't use MAP_FIXED because that will map over the top of
854      * any existing mapping (like the one with the host libc or qemu
855      * itself); instead we treat "mapped but at wrong address" as
856      * a failure and unmap again.
857      */
858     new_alloc_size = new_host_brk_page - brk_page;
859     if (new_alloc_size) {
860         mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
861                                         PROT_READ|PROT_WRITE,
862                                         MAP_ANON|MAP_PRIVATE, 0, 0));
863     } else {
864         mapped_addr = brk_page;
865     }
866 
867     if (mapped_addr == brk_page) {
868         /* Heap contents are initialized to zero, as for anonymous
869          * mapped pages.  Technically the new pages are already
870          * initialized to zero since they *are* anonymous mapped
871          * pages, however we have to take care with the contents that
872          * come from the remaining part of the previous page: it may
873          * contains garbage data due to a previous heap usage (grown
874          * then shrunken).  */
875         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
876 
877         target_brk = brk_val;
878         brk_page = new_host_brk_page;
879         return target_brk;
880     } else if (mapped_addr != -1) {
881         /* Mapped but at wrong address, meaning there wasn't actually
882          * enough space for this brk.
883          */
884         target_munmap(mapped_addr, new_alloc_size);
885         mapped_addr = -1;
886     }
887 
888 #if defined(TARGET_ALPHA)
889     /* We (partially) emulate OSF/1 on Alpha, which requires we
890        return a proper errno, not an unchanged brk value.  */
891     return -TARGET_ENOMEM;
892 #endif
893     /* For everything else, return the previous break. */
894     return target_brk;
895 }
896 
897 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
898     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
899 static inline abi_long copy_from_user_fdset(fd_set *fds,
900                                             abi_ulong target_fds_addr,
901                                             int n)
902 {
903     int i, nw, j, k;
904     abi_ulong b, *target_fds;
905 
906     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
907     if (!(target_fds = lock_user(VERIFY_READ,
908                                  target_fds_addr,
909                                  sizeof(abi_ulong) * nw,
910                                  1)))
911         return -TARGET_EFAULT;
912 
913     FD_ZERO(fds);
914     k = 0;
915     for (i = 0; i < nw; i++) {
916         /* grab the abi_ulong */
917         __get_user(b, &target_fds[i]);
918         for (j = 0; j < TARGET_ABI_BITS; j++) {
919             /* check the bit inside the abi_ulong */
920             if ((b >> j) & 1)
921                 FD_SET(k, fds);
922             k++;
923         }
924     }
925 
926     unlock_user(target_fds, target_fds_addr, 0);
927 
928     return 0;
929 }
930 
931 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
932                                                  abi_ulong target_fds_addr,
933                                                  int n)
934 {
935     if (target_fds_addr) {
936         if (copy_from_user_fdset(fds, target_fds_addr, n))
937             return -TARGET_EFAULT;
938         *fds_ptr = fds;
939     } else {
940         *fds_ptr = NULL;
941     }
942     return 0;
943 }
944 
945 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
946                                           const fd_set *fds,
947                                           int n)
948 {
949     int i, nw, j, k;
950     abi_long v;
951     abi_ulong *target_fds;
952 
953     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
954     if (!(target_fds = lock_user(VERIFY_WRITE,
955                                  target_fds_addr,
956                                  sizeof(abi_ulong) * nw,
957                                  0)))
958         return -TARGET_EFAULT;
959 
960     k = 0;
961     for (i = 0; i < nw; i++) {
962         v = 0;
963         for (j = 0; j < TARGET_ABI_BITS; j++) {
964             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
965             k++;
966         }
967         __put_user(v, &target_fds[i]);
968     }
969 
970     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
971 
972     return 0;
973 }
974 #endif
975 
976 #if defined(__alpha__)
977 #define HOST_HZ 1024
978 #else
979 #define HOST_HZ 100
980 #endif
981 
982 static inline abi_long host_to_target_clock_t(long ticks)
983 {
984 #if HOST_HZ == TARGET_HZ
985     return ticks;
986 #else
987     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
988 #endif
989 }
990 
991 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
992                                              const struct rusage *rusage)
993 {
994     struct target_rusage *target_rusage;
995 
996     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
997         return -TARGET_EFAULT;
998     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
999     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1000     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1001     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1002     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1003     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1004     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1005     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1006     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1007     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1008     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1009     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1010     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1011     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1012     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1013     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1014     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1015     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1016     unlock_user_struct(target_rusage, target_addr, 1);
1017 
1018     return 0;
1019 }
1020 
1021 #ifdef TARGET_NR_setrlimit
1022 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1023 {
1024     abi_ulong target_rlim_swap;
1025     rlim_t result;
1026 
1027     target_rlim_swap = tswapal(target_rlim);
1028     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1029         return RLIM_INFINITY;
1030 
1031     result = target_rlim_swap;
1032     if (target_rlim_swap != (rlim_t)result)
1033         return RLIM_INFINITY;
1034 
1035     return result;
1036 }
1037 #endif
1038 
1039 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1040 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1041 {
1042     abi_ulong target_rlim_swap;
1043     abi_ulong result;
1044 
1045     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1046         target_rlim_swap = TARGET_RLIM_INFINITY;
1047     else
1048         target_rlim_swap = rlim;
1049     result = tswapal(target_rlim_swap);
1050 
1051     return result;
1052 }
1053 #endif
1054 
1055 static inline int target_to_host_resource(int code)
1056 {
1057     switch (code) {
1058     case TARGET_RLIMIT_AS:
1059         return RLIMIT_AS;
1060     case TARGET_RLIMIT_CORE:
1061         return RLIMIT_CORE;
1062     case TARGET_RLIMIT_CPU:
1063         return RLIMIT_CPU;
1064     case TARGET_RLIMIT_DATA:
1065         return RLIMIT_DATA;
1066     case TARGET_RLIMIT_FSIZE:
1067         return RLIMIT_FSIZE;
1068     case TARGET_RLIMIT_LOCKS:
1069         return RLIMIT_LOCKS;
1070     case TARGET_RLIMIT_MEMLOCK:
1071         return RLIMIT_MEMLOCK;
1072     case TARGET_RLIMIT_MSGQUEUE:
1073         return RLIMIT_MSGQUEUE;
1074     case TARGET_RLIMIT_NICE:
1075         return RLIMIT_NICE;
1076     case TARGET_RLIMIT_NOFILE:
1077         return RLIMIT_NOFILE;
1078     case TARGET_RLIMIT_NPROC:
1079         return RLIMIT_NPROC;
1080     case TARGET_RLIMIT_RSS:
1081         return RLIMIT_RSS;
1082     case TARGET_RLIMIT_RTPRIO:
1083         return RLIMIT_RTPRIO;
1084 #ifdef RLIMIT_RTTIME
1085     case TARGET_RLIMIT_RTTIME:
1086         return RLIMIT_RTTIME;
1087 #endif
1088     case TARGET_RLIMIT_SIGPENDING:
1089         return RLIMIT_SIGPENDING;
1090     case TARGET_RLIMIT_STACK:
1091         return RLIMIT_STACK;
1092     default:
1093         return code;
1094     }
1095 }
1096 
1097 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1098                                               abi_ulong target_tv_addr)
1099 {
1100     struct target_timeval *target_tv;
1101 
1102     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1103         return -TARGET_EFAULT;
1104     }
1105 
1106     __get_user(tv->tv_sec, &target_tv->tv_sec);
1107     __get_user(tv->tv_usec, &target_tv->tv_usec);
1108 
1109     unlock_user_struct(target_tv, target_tv_addr, 0);
1110 
1111     return 0;
1112 }
1113 
1114 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1115                                             const struct timeval *tv)
1116 {
1117     struct target_timeval *target_tv;
1118 
1119     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1120         return -TARGET_EFAULT;
1121     }
1122 
1123     __put_user(tv->tv_sec, &target_tv->tv_sec);
1124     __put_user(tv->tv_usec, &target_tv->tv_usec);
1125 
1126     unlock_user_struct(target_tv, target_tv_addr, 1);
1127 
1128     return 0;
1129 }
1130 
1131 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1132 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1133                                                 abi_ulong target_tv_addr)
1134 {
1135     struct target__kernel_sock_timeval *target_tv;
1136 
1137     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1138         return -TARGET_EFAULT;
1139     }
1140 
1141     __get_user(tv->tv_sec, &target_tv->tv_sec);
1142     __get_user(tv->tv_usec, &target_tv->tv_usec);
1143 
1144     unlock_user_struct(target_tv, target_tv_addr, 0);
1145 
1146     return 0;
1147 }
1148 #endif
1149 
1150 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1151                                               const struct timeval *tv)
1152 {
1153     struct target__kernel_sock_timeval *target_tv;
1154 
1155     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1156         return -TARGET_EFAULT;
1157     }
1158 
1159     __put_user(tv->tv_sec, &target_tv->tv_sec);
1160     __put_user(tv->tv_usec, &target_tv->tv_usec);
1161 
1162     unlock_user_struct(target_tv, target_tv_addr, 1);
1163 
1164     return 0;
1165 }
1166 
1167 #if defined(TARGET_NR_futex) || \
1168     defined(TARGET_NR_rt_sigtimedwait) || \
1169     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1170     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1171     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1172     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1173     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1174     defined(TARGET_NR_timer_settime) || \
1175     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1176 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1177                                                abi_ulong target_addr)
1178 {
1179     struct target_timespec *target_ts;
1180 
1181     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1182         return -TARGET_EFAULT;
1183     }
1184     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1185     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1186     unlock_user_struct(target_ts, target_addr, 0);
1187     return 0;
1188 }
1189 #endif
1190 
1191 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1192     defined(TARGET_NR_timer_settime64) || \
1193     defined(TARGET_NR_mq_timedsend_time64) || \
1194     defined(TARGET_NR_mq_timedreceive_time64) || \
1195     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1196     defined(TARGET_NR_clock_nanosleep_time64) || \
1197     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1198     defined(TARGET_NR_utimensat) || \
1199     defined(TARGET_NR_utimensat_time64) || \
1200     defined(TARGET_NR_semtimedop_time64) || \
1201     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1202 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1203                                                  abi_ulong target_addr)
1204 {
1205     struct target__kernel_timespec *target_ts;
1206 
1207     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1208         return -TARGET_EFAULT;
1209     }
1210     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1211     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1212     /* in 32bit mode, this drops the padding */
1213     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1214     unlock_user_struct(target_ts, target_addr, 0);
1215     return 0;
1216 }
1217 #endif
1218 
1219 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1220                                                struct timespec *host_ts)
1221 {
1222     struct target_timespec *target_ts;
1223 
1224     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1225         return -TARGET_EFAULT;
1226     }
1227     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1228     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1229     unlock_user_struct(target_ts, target_addr, 1);
1230     return 0;
1231 }
1232 
1233 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1234                                                  struct timespec *host_ts)
1235 {
1236     struct target__kernel_timespec *target_ts;
1237 
1238     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1239         return -TARGET_EFAULT;
1240     }
1241     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1242     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1243     unlock_user_struct(target_ts, target_addr, 1);
1244     return 0;
1245 }
1246 
1247 #if defined(TARGET_NR_gettimeofday)
1248 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1249                                              struct timezone *tz)
1250 {
1251     struct target_timezone *target_tz;
1252 
1253     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1254         return -TARGET_EFAULT;
1255     }
1256 
1257     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1258     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1259 
1260     unlock_user_struct(target_tz, target_tz_addr, 1);
1261 
1262     return 0;
1263 }
1264 #endif
1265 
1266 #if defined(TARGET_NR_settimeofday)
1267 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1268                                                abi_ulong target_tz_addr)
1269 {
1270     struct target_timezone *target_tz;
1271 
1272     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1273         return -TARGET_EFAULT;
1274     }
1275 
1276     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1277     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1278 
1279     unlock_user_struct(target_tz, target_tz_addr, 0);
1280 
1281     return 0;
1282 }
1283 #endif
1284 
1285 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1286 #include <mqueue.h>
1287 
1288 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1289                                               abi_ulong target_mq_attr_addr)
1290 {
1291     struct target_mq_attr *target_mq_attr;
1292 
1293     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1294                           target_mq_attr_addr, 1))
1295         return -TARGET_EFAULT;
1296 
1297     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1298     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1299     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1300     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1301 
1302     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1303 
1304     return 0;
1305 }
1306 
1307 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1308                                             const struct mq_attr *attr)
1309 {
1310     struct target_mq_attr *target_mq_attr;
1311 
1312     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1313                           target_mq_attr_addr, 0))
1314         return -TARGET_EFAULT;
1315 
1316     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1317     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1318     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1319     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1320 
1321     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1322 
1323     return 0;
1324 }
1325 #endif
1326 
1327 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1328 /* do_select() must return target values and target errnos. */
1329 static abi_long do_select(int n,
1330                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1331                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1332 {
1333     fd_set rfds, wfds, efds;
1334     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1335     struct timeval tv;
1336     struct timespec ts, *ts_ptr;
1337     abi_long ret;
1338 
1339     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1340     if (ret) {
1341         return ret;
1342     }
1343     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1344     if (ret) {
1345         return ret;
1346     }
1347     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1348     if (ret) {
1349         return ret;
1350     }
1351 
1352     if (target_tv_addr) {
1353         if (copy_from_user_timeval(&tv, target_tv_addr))
1354             return -TARGET_EFAULT;
1355         ts.tv_sec = tv.tv_sec;
1356         ts.tv_nsec = tv.tv_usec * 1000;
1357         ts_ptr = &ts;
1358     } else {
1359         ts_ptr = NULL;
1360     }
1361 
1362     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1363                                   ts_ptr, NULL));
1364 
1365     if (!is_error(ret)) {
1366         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1367             return -TARGET_EFAULT;
1368         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1369             return -TARGET_EFAULT;
1370         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1371             return -TARGET_EFAULT;
1372 
1373         if (target_tv_addr) {
1374             tv.tv_sec = ts.tv_sec;
1375             tv.tv_usec = ts.tv_nsec / 1000;
1376             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1377                 return -TARGET_EFAULT;
1378             }
1379         }
1380     }
1381 
1382     return ret;
1383 }
1384 
1385 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1386 static abi_long do_old_select(abi_ulong arg1)
1387 {
1388     struct target_sel_arg_struct *sel;
1389     abi_ulong inp, outp, exp, tvp;
1390     long nsel;
1391 
1392     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1393         return -TARGET_EFAULT;
1394     }
1395 
1396     nsel = tswapal(sel->n);
1397     inp = tswapal(sel->inp);
1398     outp = tswapal(sel->outp);
1399     exp = tswapal(sel->exp);
1400     tvp = tswapal(sel->tvp);
1401 
1402     unlock_user_struct(sel, arg1, 0);
1403 
1404     return do_select(nsel, inp, outp, exp, tvp);
1405 }
1406 #endif
1407 #endif
1408 
1409 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1410 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1411                             abi_long arg4, abi_long arg5, abi_long arg6,
1412                             bool time64)
1413 {
1414     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1415     fd_set rfds, wfds, efds;
1416     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1417     struct timespec ts, *ts_ptr;
1418     abi_long ret;
1419 
1420     /*
1421      * The 6th arg is actually two args smashed together,
1422      * so we cannot use the C library.
1423      */
1424     struct {
1425         sigset_t *set;
1426         size_t size;
1427     } sig, *sig_ptr;
1428 
1429     abi_ulong arg_sigset, arg_sigsize, *arg7;
1430 
1431     n = arg1;
1432     rfd_addr = arg2;
1433     wfd_addr = arg3;
1434     efd_addr = arg4;
1435     ts_addr = arg5;
1436 
1437     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1438     if (ret) {
1439         return ret;
1440     }
1441     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1442     if (ret) {
1443         return ret;
1444     }
1445     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1446     if (ret) {
1447         return ret;
1448     }
1449 
1450     /*
1451      * This takes a timespec, and not a timeval, so we cannot
1452      * use the do_select() helper ...
1453      */
1454     if (ts_addr) {
1455         if (time64) {
1456             if (target_to_host_timespec64(&ts, ts_addr)) {
1457                 return -TARGET_EFAULT;
1458             }
1459         } else {
1460             if (target_to_host_timespec(&ts, ts_addr)) {
1461                 return -TARGET_EFAULT;
1462             }
1463         }
1464             ts_ptr = &ts;
1465     } else {
1466         ts_ptr = NULL;
1467     }
1468 
1469     /* Extract the two packed args for the sigset */
1470     sig_ptr = NULL;
1471     if (arg6) {
1472         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1473         if (!arg7) {
1474             return -TARGET_EFAULT;
1475         }
1476         arg_sigset = tswapal(arg7[0]);
1477         arg_sigsize = tswapal(arg7[1]);
1478         unlock_user(arg7, arg6, 0);
1479 
1480         if (arg_sigset) {
1481             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1482             if (ret != 0) {
1483                 return ret;
1484             }
1485             sig_ptr = &sig;
1486             sig.size = SIGSET_T_SIZE;
1487         }
1488     }
1489 
1490     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1491                                   ts_ptr, sig_ptr));
1492 
1493     if (sig_ptr) {
1494         finish_sigsuspend_mask(ret);
1495     }
1496 
1497     if (!is_error(ret)) {
1498         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1499             return -TARGET_EFAULT;
1500         }
1501         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1502             return -TARGET_EFAULT;
1503         }
1504         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1505             return -TARGET_EFAULT;
1506         }
1507         if (time64) {
1508             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1509                 return -TARGET_EFAULT;
1510             }
1511         } else {
1512             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1513                 return -TARGET_EFAULT;
1514             }
1515         }
1516     }
1517     return ret;
1518 }
1519 #endif
1520 
1521 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1522     defined(TARGET_NR_ppoll_time64)
1523 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1524                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1525 {
1526     struct target_pollfd *target_pfd;
1527     unsigned int nfds = arg2;
1528     struct pollfd *pfd;
1529     unsigned int i;
1530     abi_long ret;
1531 
1532     pfd = NULL;
1533     target_pfd = NULL;
1534     if (nfds) {
1535         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1536             return -TARGET_EINVAL;
1537         }
1538         target_pfd = lock_user(VERIFY_WRITE, arg1,
1539                                sizeof(struct target_pollfd) * nfds, 1);
1540         if (!target_pfd) {
1541             return -TARGET_EFAULT;
1542         }
1543 
1544         pfd = alloca(sizeof(struct pollfd) * nfds);
1545         for (i = 0; i < nfds; i++) {
1546             pfd[i].fd = tswap32(target_pfd[i].fd);
1547             pfd[i].events = tswap16(target_pfd[i].events);
1548         }
1549     }
1550     if (ppoll) {
1551         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1552         sigset_t *set = NULL;
1553 
1554         if (arg3) {
1555             if (time64) {
1556                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1557                     unlock_user(target_pfd, arg1, 0);
1558                     return -TARGET_EFAULT;
1559                 }
1560             } else {
1561                 if (target_to_host_timespec(timeout_ts, arg3)) {
1562                     unlock_user(target_pfd, arg1, 0);
1563                     return -TARGET_EFAULT;
1564                 }
1565             }
1566         } else {
1567             timeout_ts = NULL;
1568         }
1569 
1570         if (arg4) {
1571             ret = process_sigsuspend_mask(&set, arg4, arg5);
1572             if (ret != 0) {
1573                 unlock_user(target_pfd, arg1, 0);
1574                 return ret;
1575             }
1576         }
1577 
1578         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1579                                    set, SIGSET_T_SIZE));
1580 
1581         if (set) {
1582             finish_sigsuspend_mask(ret);
1583         }
1584         if (!is_error(ret) && arg3) {
1585             if (time64) {
1586                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1587                     return -TARGET_EFAULT;
1588                 }
1589             } else {
1590                 if (host_to_target_timespec(arg3, timeout_ts)) {
1591                     return -TARGET_EFAULT;
1592                 }
1593             }
1594         }
1595     } else {
1596           struct timespec ts, *pts;
1597 
1598           if (arg3 >= 0) {
1599               /* Convert ms to secs, ns */
1600               ts.tv_sec = arg3 / 1000;
1601               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1602               pts = &ts;
1603           } else {
1604               /* -ve poll() timeout means "infinite" */
1605               pts = NULL;
1606           }
1607           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1608     }
1609 
1610     if (!is_error(ret)) {
1611         for (i = 0; i < nfds; i++) {
1612             target_pfd[i].revents = tswap16(pfd[i].revents);
1613         }
1614     }
1615     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1616     return ret;
1617 }
1618 #endif
1619 
1620 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1621                         int flags, int is_pipe2)
1622 {
1623     int host_pipe[2];
1624     abi_long ret;
1625     ret = pipe2(host_pipe, flags);
1626 
1627     if (is_error(ret))
1628         return get_errno(ret);
1629 
1630     /* Several targets have special calling conventions for the original
1631        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1632     if (!is_pipe2) {
1633 #if defined(TARGET_ALPHA)
1634         cpu_env->ir[IR_A4] = host_pipe[1];
1635         return host_pipe[0];
1636 #elif defined(TARGET_MIPS)
1637         cpu_env->active_tc.gpr[3] = host_pipe[1];
1638         return host_pipe[0];
1639 #elif defined(TARGET_SH4)
1640         cpu_env->gregs[1] = host_pipe[1];
1641         return host_pipe[0];
1642 #elif defined(TARGET_SPARC)
1643         cpu_env->regwptr[1] = host_pipe[1];
1644         return host_pipe[0];
1645 #endif
1646     }
1647 
1648     if (put_user_s32(host_pipe[0], pipedes)
1649         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1650         return -TARGET_EFAULT;
1651     return get_errno(ret);
1652 }
1653 
1654 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1655                                               abi_ulong target_addr,
1656                                               socklen_t len)
1657 {
1658     struct target_ip_mreqn *target_smreqn;
1659 
1660     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1661     if (!target_smreqn)
1662         return -TARGET_EFAULT;
1663     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1664     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1665     if (len == sizeof(struct target_ip_mreqn))
1666         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1667     unlock_user(target_smreqn, target_addr, 0);
1668 
1669     return 0;
1670 }
1671 
1672 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1673                                                abi_ulong target_addr,
1674                                                socklen_t len)
1675 {
1676     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1677     sa_family_t sa_family;
1678     struct target_sockaddr *target_saddr;
1679 
1680     if (fd_trans_target_to_host_addr(fd)) {
1681         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1682     }
1683 
1684     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1685     if (!target_saddr)
1686         return -TARGET_EFAULT;
1687 
1688     sa_family = tswap16(target_saddr->sa_family);
1689 
1690     /* Oops. The caller might send a incomplete sun_path; sun_path
1691      * must be terminated by \0 (see the manual page), but
1692      * unfortunately it is quite common to specify sockaddr_un
1693      * length as "strlen(x->sun_path)" while it should be
1694      * "strlen(...) + 1". We'll fix that here if needed.
1695      * Linux kernel has a similar feature.
1696      */
1697 
1698     if (sa_family == AF_UNIX) {
1699         if (len < unix_maxlen && len > 0) {
1700             char *cp = (char*)target_saddr;
1701 
1702             if ( cp[len-1] && !cp[len] )
1703                 len++;
1704         }
1705         if (len > unix_maxlen)
1706             len = unix_maxlen;
1707     }
1708 
1709     memcpy(addr, target_saddr, len);
1710     addr->sa_family = sa_family;
1711     if (sa_family == AF_NETLINK) {
1712         struct sockaddr_nl *nladdr;
1713 
1714         nladdr = (struct sockaddr_nl *)addr;
1715         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1716         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1717     } else if (sa_family == AF_PACKET) {
1718 	struct target_sockaddr_ll *lladdr;
1719 
1720 	lladdr = (struct target_sockaddr_ll *)addr;
1721 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1722 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1723     } else if (sa_family == AF_INET6) {
1724         struct sockaddr_in6 *in6addr;
1725 
1726         in6addr = (struct sockaddr_in6 *)addr;
1727         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1728     }
1729     unlock_user(target_saddr, target_addr, 0);
1730 
1731     return 0;
1732 }
1733 
1734 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1735                                                struct sockaddr *addr,
1736                                                socklen_t len)
1737 {
1738     struct target_sockaddr *target_saddr;
1739 
1740     if (len == 0) {
1741         return 0;
1742     }
1743     assert(addr);
1744 
1745     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1746     if (!target_saddr)
1747         return -TARGET_EFAULT;
1748     memcpy(target_saddr, addr, len);
1749     if (len >= offsetof(struct target_sockaddr, sa_family) +
1750         sizeof(target_saddr->sa_family)) {
1751         target_saddr->sa_family = tswap16(addr->sa_family);
1752     }
1753     if (addr->sa_family == AF_NETLINK &&
1754         len >= sizeof(struct target_sockaddr_nl)) {
1755         struct target_sockaddr_nl *target_nl =
1756                (struct target_sockaddr_nl *)target_saddr;
1757         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1758         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1759     } else if (addr->sa_family == AF_PACKET) {
1760         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1761         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1762         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1763     } else if (addr->sa_family == AF_INET6 &&
1764                len >= sizeof(struct target_sockaddr_in6)) {
1765         struct target_sockaddr_in6 *target_in6 =
1766                (struct target_sockaddr_in6 *)target_saddr;
1767         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1768     }
1769     unlock_user(target_saddr, target_addr, len);
1770 
1771     return 0;
1772 }
1773 
1774 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1775                                            struct target_msghdr *target_msgh)
1776 {
1777     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1778     abi_long msg_controllen;
1779     abi_ulong target_cmsg_addr;
1780     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1781     socklen_t space = 0;
1782 
1783     msg_controllen = tswapal(target_msgh->msg_controllen);
1784     if (msg_controllen < sizeof (struct target_cmsghdr))
1785         goto the_end;
1786     target_cmsg_addr = tswapal(target_msgh->msg_control);
1787     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1788     target_cmsg_start = target_cmsg;
1789     if (!target_cmsg)
1790         return -TARGET_EFAULT;
1791 
1792     while (cmsg && target_cmsg) {
1793         void *data = CMSG_DATA(cmsg);
1794         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1795 
1796         int len = tswapal(target_cmsg->cmsg_len)
1797             - sizeof(struct target_cmsghdr);
1798 
1799         space += CMSG_SPACE(len);
1800         if (space > msgh->msg_controllen) {
1801             space -= CMSG_SPACE(len);
1802             /* This is a QEMU bug, since we allocated the payload
1803              * area ourselves (unlike overflow in host-to-target
1804              * conversion, which is just the guest giving us a buffer
1805              * that's too small). It can't happen for the payload types
1806              * we currently support; if it becomes an issue in future
1807              * we would need to improve our allocation strategy to
1808              * something more intelligent than "twice the size of the
1809              * target buffer we're reading from".
1810              */
1811             qemu_log_mask(LOG_UNIMP,
1812                           ("Unsupported ancillary data %d/%d: "
1813                            "unhandled msg size\n"),
1814                           tswap32(target_cmsg->cmsg_level),
1815                           tswap32(target_cmsg->cmsg_type));
1816             break;
1817         }
1818 
1819         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1820             cmsg->cmsg_level = SOL_SOCKET;
1821         } else {
1822             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1823         }
1824         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1825         cmsg->cmsg_len = CMSG_LEN(len);
1826 
1827         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1828             int *fd = (int *)data;
1829             int *target_fd = (int *)target_data;
1830             int i, numfds = len / sizeof(int);
1831 
1832             for (i = 0; i < numfds; i++) {
1833                 __get_user(fd[i], target_fd + i);
1834             }
1835         } else if (cmsg->cmsg_level == SOL_SOCKET
1836                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1837             struct ucred *cred = (struct ucred *)data;
1838             struct target_ucred *target_cred =
1839                 (struct target_ucred *)target_data;
1840 
1841             __get_user(cred->pid, &target_cred->pid);
1842             __get_user(cred->uid, &target_cred->uid);
1843             __get_user(cred->gid, &target_cred->gid);
1844         } else if (cmsg->cmsg_level == SOL_ALG) {
1845             uint32_t *dst = (uint32_t *)data;
1846 
1847             memcpy(dst, target_data, len);
1848             /* fix endianess of first 32-bit word */
1849             if (len >= sizeof(uint32_t)) {
1850                 *dst = tswap32(*dst);
1851             }
1852         } else {
1853             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1854                           cmsg->cmsg_level, cmsg->cmsg_type);
1855             memcpy(data, target_data, len);
1856         }
1857 
1858         cmsg = CMSG_NXTHDR(msgh, cmsg);
1859         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1860                                          target_cmsg_start);
1861     }
1862     unlock_user(target_cmsg, target_cmsg_addr, 0);
1863  the_end:
1864     msgh->msg_controllen = space;
1865     return 0;
1866 }
1867 
1868 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1869                                            struct msghdr *msgh)
1870 {
1871     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1872     abi_long msg_controllen;
1873     abi_ulong target_cmsg_addr;
1874     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1875     socklen_t space = 0;
1876 
1877     msg_controllen = tswapal(target_msgh->msg_controllen);
1878     if (msg_controllen < sizeof (struct target_cmsghdr))
1879         goto the_end;
1880     target_cmsg_addr = tswapal(target_msgh->msg_control);
1881     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1882     target_cmsg_start = target_cmsg;
1883     if (!target_cmsg)
1884         return -TARGET_EFAULT;
1885 
1886     while (cmsg && target_cmsg) {
1887         void *data = CMSG_DATA(cmsg);
1888         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1889 
1890         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1891         int tgt_len, tgt_space;
1892 
1893         /* We never copy a half-header but may copy half-data;
1894          * this is Linux's behaviour in put_cmsg(). Note that
1895          * truncation here is a guest problem (which we report
1896          * to the guest via the CTRUNC bit), unlike truncation
1897          * in target_to_host_cmsg, which is a QEMU bug.
1898          */
1899         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1900             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1901             break;
1902         }
1903 
1904         if (cmsg->cmsg_level == SOL_SOCKET) {
1905             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1906         } else {
1907             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1908         }
1909         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1910 
1911         /* Payload types which need a different size of payload on
1912          * the target must adjust tgt_len here.
1913          */
1914         tgt_len = len;
1915         switch (cmsg->cmsg_level) {
1916         case SOL_SOCKET:
1917             switch (cmsg->cmsg_type) {
1918             case SO_TIMESTAMP:
1919                 tgt_len = sizeof(struct target_timeval);
1920                 break;
1921             default:
1922                 break;
1923             }
1924             break;
1925         default:
1926             break;
1927         }
1928 
1929         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1930             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1931             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1932         }
1933 
1934         /* We must now copy-and-convert len bytes of payload
1935          * into tgt_len bytes of destination space. Bear in mind
1936          * that in both source and destination we may be dealing
1937          * with a truncated value!
1938          */
1939         switch (cmsg->cmsg_level) {
1940         case SOL_SOCKET:
1941             switch (cmsg->cmsg_type) {
1942             case SCM_RIGHTS:
1943             {
1944                 int *fd = (int *)data;
1945                 int *target_fd = (int *)target_data;
1946                 int i, numfds = tgt_len / sizeof(int);
1947 
1948                 for (i = 0; i < numfds; i++) {
1949                     __put_user(fd[i], target_fd + i);
1950                 }
1951                 break;
1952             }
1953             case SO_TIMESTAMP:
1954             {
1955                 struct timeval *tv = (struct timeval *)data;
1956                 struct target_timeval *target_tv =
1957                     (struct target_timeval *)target_data;
1958 
1959                 if (len != sizeof(struct timeval) ||
1960                     tgt_len != sizeof(struct target_timeval)) {
1961                     goto unimplemented;
1962                 }
1963 
1964                 /* copy struct timeval to target */
1965                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1966                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1967                 break;
1968             }
1969             case SCM_CREDENTIALS:
1970             {
1971                 struct ucred *cred = (struct ucred *)data;
1972                 struct target_ucred *target_cred =
1973                     (struct target_ucred *)target_data;
1974 
1975                 __put_user(cred->pid, &target_cred->pid);
1976                 __put_user(cred->uid, &target_cred->uid);
1977                 __put_user(cred->gid, &target_cred->gid);
1978                 break;
1979             }
1980             default:
1981                 goto unimplemented;
1982             }
1983             break;
1984 
1985         case SOL_IP:
1986             switch (cmsg->cmsg_type) {
1987             case IP_TTL:
1988             {
1989                 uint32_t *v = (uint32_t *)data;
1990                 uint32_t *t_int = (uint32_t *)target_data;
1991 
1992                 if (len != sizeof(uint32_t) ||
1993                     tgt_len != sizeof(uint32_t)) {
1994                     goto unimplemented;
1995                 }
1996                 __put_user(*v, t_int);
1997                 break;
1998             }
1999             case IP_RECVERR:
2000             {
2001                 struct errhdr_t {
2002                    struct sock_extended_err ee;
2003                    struct sockaddr_in offender;
2004                 };
2005                 struct errhdr_t *errh = (struct errhdr_t *)data;
2006                 struct errhdr_t *target_errh =
2007                     (struct errhdr_t *)target_data;
2008 
2009                 if (len != sizeof(struct errhdr_t) ||
2010                     tgt_len != sizeof(struct errhdr_t)) {
2011                     goto unimplemented;
2012                 }
2013                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2014                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2015                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2016                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2017                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2018                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2019                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2020                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2021                     (void *) &errh->offender, sizeof(errh->offender));
2022                 break;
2023             }
2024             default:
2025                 goto unimplemented;
2026             }
2027             break;
2028 
2029         case SOL_IPV6:
2030             switch (cmsg->cmsg_type) {
2031             case IPV6_HOPLIMIT:
2032             {
2033                 uint32_t *v = (uint32_t *)data;
2034                 uint32_t *t_int = (uint32_t *)target_data;
2035 
2036                 if (len != sizeof(uint32_t) ||
2037                     tgt_len != sizeof(uint32_t)) {
2038                     goto unimplemented;
2039                 }
2040                 __put_user(*v, t_int);
2041                 break;
2042             }
2043             case IPV6_RECVERR:
2044             {
2045                 struct errhdr6_t {
2046                    struct sock_extended_err ee;
2047                    struct sockaddr_in6 offender;
2048                 };
2049                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2050                 struct errhdr6_t *target_errh =
2051                     (struct errhdr6_t *)target_data;
2052 
2053                 if (len != sizeof(struct errhdr6_t) ||
2054                     tgt_len != sizeof(struct errhdr6_t)) {
2055                     goto unimplemented;
2056                 }
2057                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2058                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2059                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2060                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2061                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2062                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2063                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2064                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2065                     (void *) &errh->offender, sizeof(errh->offender));
2066                 break;
2067             }
2068             default:
2069                 goto unimplemented;
2070             }
2071             break;
2072 
2073         default:
2074         unimplemented:
2075             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2076                           cmsg->cmsg_level, cmsg->cmsg_type);
2077             memcpy(target_data, data, MIN(len, tgt_len));
2078             if (tgt_len > len) {
2079                 memset(target_data + len, 0, tgt_len - len);
2080             }
2081         }
2082 
2083         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2084         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2085         if (msg_controllen < tgt_space) {
2086             tgt_space = msg_controllen;
2087         }
2088         msg_controllen -= tgt_space;
2089         space += tgt_space;
2090         cmsg = CMSG_NXTHDR(msgh, cmsg);
2091         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2092                                          target_cmsg_start);
2093     }
2094     unlock_user(target_cmsg, target_cmsg_addr, space);
2095  the_end:
2096     target_msgh->msg_controllen = tswapal(space);
2097     return 0;
2098 }
2099 
2100 /* do_setsockopt() Must return target values and target errnos. */
2101 static abi_long do_setsockopt(int sockfd, int level, int optname,
2102                               abi_ulong optval_addr, socklen_t optlen)
2103 {
2104     abi_long ret;
2105     int val;
2106     struct ip_mreqn *ip_mreq;
2107     struct ip_mreq_source *ip_mreq_source;
2108 
2109     switch(level) {
2110     case SOL_TCP:
2111     case SOL_UDP:
2112         /* TCP and UDP options all take an 'int' value.  */
2113         if (optlen < sizeof(uint32_t))
2114             return -TARGET_EINVAL;
2115 
2116         if (get_user_u32(val, optval_addr))
2117             return -TARGET_EFAULT;
2118         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2119         break;
2120     case SOL_IP:
2121         switch(optname) {
2122         case IP_TOS:
2123         case IP_TTL:
2124         case IP_HDRINCL:
2125         case IP_ROUTER_ALERT:
2126         case IP_RECVOPTS:
2127         case IP_RETOPTS:
2128         case IP_PKTINFO:
2129         case IP_MTU_DISCOVER:
2130         case IP_RECVERR:
2131         case IP_RECVTTL:
2132         case IP_RECVTOS:
2133 #ifdef IP_FREEBIND
2134         case IP_FREEBIND:
2135 #endif
2136         case IP_MULTICAST_TTL:
2137         case IP_MULTICAST_LOOP:
2138             val = 0;
2139             if (optlen >= sizeof(uint32_t)) {
2140                 if (get_user_u32(val, optval_addr))
2141                     return -TARGET_EFAULT;
2142             } else if (optlen >= 1) {
2143                 if (get_user_u8(val, optval_addr))
2144                     return -TARGET_EFAULT;
2145             }
2146             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2147             break;
2148         case IP_ADD_MEMBERSHIP:
2149         case IP_DROP_MEMBERSHIP:
2150             if (optlen < sizeof (struct target_ip_mreq) ||
2151                 optlen > sizeof (struct target_ip_mreqn))
2152                 return -TARGET_EINVAL;
2153 
2154             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2155             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2156             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2157             break;
2158 
2159         case IP_BLOCK_SOURCE:
2160         case IP_UNBLOCK_SOURCE:
2161         case IP_ADD_SOURCE_MEMBERSHIP:
2162         case IP_DROP_SOURCE_MEMBERSHIP:
2163             if (optlen != sizeof (struct target_ip_mreq_source))
2164                 return -TARGET_EINVAL;
2165 
2166             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2167             if (!ip_mreq_source) {
2168                 return -TARGET_EFAULT;
2169             }
2170             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2171             unlock_user (ip_mreq_source, optval_addr, 0);
2172             break;
2173 
2174         default:
2175             goto unimplemented;
2176         }
2177         break;
2178     case SOL_IPV6:
2179         switch (optname) {
2180         case IPV6_MTU_DISCOVER:
2181         case IPV6_MTU:
2182         case IPV6_V6ONLY:
2183         case IPV6_RECVPKTINFO:
2184         case IPV6_UNICAST_HOPS:
2185         case IPV6_MULTICAST_HOPS:
2186         case IPV6_MULTICAST_LOOP:
2187         case IPV6_RECVERR:
2188         case IPV6_RECVHOPLIMIT:
2189         case IPV6_2292HOPLIMIT:
2190         case IPV6_CHECKSUM:
2191         case IPV6_ADDRFORM:
2192         case IPV6_2292PKTINFO:
2193         case IPV6_RECVTCLASS:
2194         case IPV6_RECVRTHDR:
2195         case IPV6_2292RTHDR:
2196         case IPV6_RECVHOPOPTS:
2197         case IPV6_2292HOPOPTS:
2198         case IPV6_RECVDSTOPTS:
2199         case IPV6_2292DSTOPTS:
2200         case IPV6_TCLASS:
2201         case IPV6_ADDR_PREFERENCES:
2202 #ifdef IPV6_RECVPATHMTU
2203         case IPV6_RECVPATHMTU:
2204 #endif
2205 #ifdef IPV6_TRANSPARENT
2206         case IPV6_TRANSPARENT:
2207 #endif
2208 #ifdef IPV6_FREEBIND
2209         case IPV6_FREEBIND:
2210 #endif
2211 #ifdef IPV6_RECVORIGDSTADDR
2212         case IPV6_RECVORIGDSTADDR:
2213 #endif
2214             val = 0;
2215             if (optlen < sizeof(uint32_t)) {
2216                 return -TARGET_EINVAL;
2217             }
2218             if (get_user_u32(val, optval_addr)) {
2219                 return -TARGET_EFAULT;
2220             }
2221             ret = get_errno(setsockopt(sockfd, level, optname,
2222                                        &val, sizeof(val)));
2223             break;
2224         case IPV6_PKTINFO:
2225         {
2226             struct in6_pktinfo pki;
2227 
2228             if (optlen < sizeof(pki)) {
2229                 return -TARGET_EINVAL;
2230             }
2231 
2232             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2233                 return -TARGET_EFAULT;
2234             }
2235 
2236             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2237 
2238             ret = get_errno(setsockopt(sockfd, level, optname,
2239                                        &pki, sizeof(pki)));
2240             break;
2241         }
2242         case IPV6_ADD_MEMBERSHIP:
2243         case IPV6_DROP_MEMBERSHIP:
2244         {
2245             struct ipv6_mreq ipv6mreq;
2246 
2247             if (optlen < sizeof(ipv6mreq)) {
2248                 return -TARGET_EINVAL;
2249             }
2250 
2251             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2252                 return -TARGET_EFAULT;
2253             }
2254 
2255             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2256 
2257             ret = get_errno(setsockopt(sockfd, level, optname,
2258                                        &ipv6mreq, sizeof(ipv6mreq)));
2259             break;
2260         }
2261         default:
2262             goto unimplemented;
2263         }
2264         break;
2265     case SOL_ICMPV6:
2266         switch (optname) {
2267         case ICMPV6_FILTER:
2268         {
2269             struct icmp6_filter icmp6f;
2270 
2271             if (optlen > sizeof(icmp6f)) {
2272                 optlen = sizeof(icmp6f);
2273             }
2274 
2275             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2276                 return -TARGET_EFAULT;
2277             }
2278 
2279             for (val = 0; val < 8; val++) {
2280                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2281             }
2282 
2283             ret = get_errno(setsockopt(sockfd, level, optname,
2284                                        &icmp6f, optlen));
2285             break;
2286         }
2287         default:
2288             goto unimplemented;
2289         }
2290         break;
2291     case SOL_RAW:
2292         switch (optname) {
2293         case ICMP_FILTER:
2294         case IPV6_CHECKSUM:
2295             /* those take an u32 value */
2296             if (optlen < sizeof(uint32_t)) {
2297                 return -TARGET_EINVAL;
2298             }
2299 
2300             if (get_user_u32(val, optval_addr)) {
2301                 return -TARGET_EFAULT;
2302             }
2303             ret = get_errno(setsockopt(sockfd, level, optname,
2304                                        &val, sizeof(val)));
2305             break;
2306 
2307         default:
2308             goto unimplemented;
2309         }
2310         break;
2311 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2312     case SOL_ALG:
2313         switch (optname) {
2314         case ALG_SET_KEY:
2315         {
2316             char *alg_key = g_malloc(optlen);
2317 
2318             if (!alg_key) {
2319                 return -TARGET_ENOMEM;
2320             }
2321             if (copy_from_user(alg_key, optval_addr, optlen)) {
2322                 g_free(alg_key);
2323                 return -TARGET_EFAULT;
2324             }
2325             ret = get_errno(setsockopt(sockfd, level, optname,
2326                                        alg_key, optlen));
2327             g_free(alg_key);
2328             break;
2329         }
2330         case ALG_SET_AEAD_AUTHSIZE:
2331         {
2332             ret = get_errno(setsockopt(sockfd, level, optname,
2333                                        NULL, optlen));
2334             break;
2335         }
2336         default:
2337             goto unimplemented;
2338         }
2339         break;
2340 #endif
2341     case TARGET_SOL_SOCKET:
2342         switch (optname) {
2343         case TARGET_SO_RCVTIMEO:
2344         {
2345                 struct timeval tv;
2346 
2347                 optname = SO_RCVTIMEO;
2348 
2349 set_timeout:
2350                 if (optlen != sizeof(struct target_timeval)) {
2351                     return -TARGET_EINVAL;
2352                 }
2353 
2354                 if (copy_from_user_timeval(&tv, optval_addr)) {
2355                     return -TARGET_EFAULT;
2356                 }
2357 
2358                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2359                                 &tv, sizeof(tv)));
2360                 return ret;
2361         }
2362         case TARGET_SO_SNDTIMEO:
2363                 optname = SO_SNDTIMEO;
2364                 goto set_timeout;
2365         case TARGET_SO_ATTACH_FILTER:
2366         {
2367                 struct target_sock_fprog *tfprog;
2368                 struct target_sock_filter *tfilter;
2369                 struct sock_fprog fprog;
2370                 struct sock_filter *filter;
2371                 int i;
2372 
2373                 if (optlen != sizeof(*tfprog)) {
2374                     return -TARGET_EINVAL;
2375                 }
2376                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2377                     return -TARGET_EFAULT;
2378                 }
2379                 if (!lock_user_struct(VERIFY_READ, tfilter,
2380                                       tswapal(tfprog->filter), 0)) {
2381                     unlock_user_struct(tfprog, optval_addr, 1);
2382                     return -TARGET_EFAULT;
2383                 }
2384 
2385                 fprog.len = tswap16(tfprog->len);
2386                 filter = g_try_new(struct sock_filter, fprog.len);
2387                 if (filter == NULL) {
2388                     unlock_user_struct(tfilter, tfprog->filter, 1);
2389                     unlock_user_struct(tfprog, optval_addr, 1);
2390                     return -TARGET_ENOMEM;
2391                 }
2392                 for (i = 0; i < fprog.len; i++) {
2393                     filter[i].code = tswap16(tfilter[i].code);
2394                     filter[i].jt = tfilter[i].jt;
2395                     filter[i].jf = tfilter[i].jf;
2396                     filter[i].k = tswap32(tfilter[i].k);
2397                 }
2398                 fprog.filter = filter;
2399 
2400                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2401                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2402                 g_free(filter);
2403 
2404                 unlock_user_struct(tfilter, tfprog->filter, 1);
2405                 unlock_user_struct(tfprog, optval_addr, 1);
2406                 return ret;
2407         }
2408 	case TARGET_SO_BINDTODEVICE:
2409 	{
2410 		char *dev_ifname, *addr_ifname;
2411 
2412 		if (optlen > IFNAMSIZ - 1) {
2413 		    optlen = IFNAMSIZ - 1;
2414 		}
2415 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2416 		if (!dev_ifname) {
2417 		    return -TARGET_EFAULT;
2418 		}
2419 		optname = SO_BINDTODEVICE;
2420 		addr_ifname = alloca(IFNAMSIZ);
2421 		memcpy(addr_ifname, dev_ifname, optlen);
2422 		addr_ifname[optlen] = 0;
2423 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2424                                            addr_ifname, optlen));
2425 		unlock_user (dev_ifname, optval_addr, 0);
2426 		return ret;
2427 	}
2428         case TARGET_SO_LINGER:
2429         {
2430                 struct linger lg;
2431                 struct target_linger *tlg;
2432 
2433                 if (optlen != sizeof(struct target_linger)) {
2434                     return -TARGET_EINVAL;
2435                 }
2436                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2437                     return -TARGET_EFAULT;
2438                 }
2439                 __get_user(lg.l_onoff, &tlg->l_onoff);
2440                 __get_user(lg.l_linger, &tlg->l_linger);
2441                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2442                                 &lg, sizeof(lg)));
2443                 unlock_user_struct(tlg, optval_addr, 0);
2444                 return ret;
2445         }
2446             /* Options with 'int' argument.  */
2447         case TARGET_SO_DEBUG:
2448 		optname = SO_DEBUG;
2449 		break;
2450         case TARGET_SO_REUSEADDR:
2451 		optname = SO_REUSEADDR;
2452 		break;
2453 #ifdef SO_REUSEPORT
2454         case TARGET_SO_REUSEPORT:
2455                 optname = SO_REUSEPORT;
2456                 break;
2457 #endif
2458         case TARGET_SO_TYPE:
2459 		optname = SO_TYPE;
2460 		break;
2461         case TARGET_SO_ERROR:
2462 		optname = SO_ERROR;
2463 		break;
2464         case TARGET_SO_DONTROUTE:
2465 		optname = SO_DONTROUTE;
2466 		break;
2467         case TARGET_SO_BROADCAST:
2468 		optname = SO_BROADCAST;
2469 		break;
2470         case TARGET_SO_SNDBUF:
2471 		optname = SO_SNDBUF;
2472 		break;
2473         case TARGET_SO_SNDBUFFORCE:
2474                 optname = SO_SNDBUFFORCE;
2475                 break;
2476         case TARGET_SO_RCVBUF:
2477 		optname = SO_RCVBUF;
2478 		break;
2479         case TARGET_SO_RCVBUFFORCE:
2480                 optname = SO_RCVBUFFORCE;
2481                 break;
2482         case TARGET_SO_KEEPALIVE:
2483 		optname = SO_KEEPALIVE;
2484 		break;
2485         case TARGET_SO_OOBINLINE:
2486 		optname = SO_OOBINLINE;
2487 		break;
2488         case TARGET_SO_NO_CHECK:
2489 		optname = SO_NO_CHECK;
2490 		break;
2491         case TARGET_SO_PRIORITY:
2492 		optname = SO_PRIORITY;
2493 		break;
2494 #ifdef SO_BSDCOMPAT
2495         case TARGET_SO_BSDCOMPAT:
2496 		optname = SO_BSDCOMPAT;
2497 		break;
2498 #endif
2499         case TARGET_SO_PASSCRED:
2500 		optname = SO_PASSCRED;
2501 		break;
2502         case TARGET_SO_PASSSEC:
2503                 optname = SO_PASSSEC;
2504                 break;
2505         case TARGET_SO_TIMESTAMP:
2506 		optname = SO_TIMESTAMP;
2507 		break;
2508         case TARGET_SO_RCVLOWAT:
2509 		optname = SO_RCVLOWAT;
2510 		break;
2511         default:
2512             goto unimplemented;
2513         }
2514 	if (optlen < sizeof(uint32_t))
2515             return -TARGET_EINVAL;
2516 
2517 	if (get_user_u32(val, optval_addr))
2518             return -TARGET_EFAULT;
2519 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2520         break;
2521 #ifdef SOL_NETLINK
2522     case SOL_NETLINK:
2523         switch (optname) {
2524         case NETLINK_PKTINFO:
2525         case NETLINK_ADD_MEMBERSHIP:
2526         case NETLINK_DROP_MEMBERSHIP:
2527         case NETLINK_BROADCAST_ERROR:
2528         case NETLINK_NO_ENOBUFS:
2529 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2530         case NETLINK_LISTEN_ALL_NSID:
2531         case NETLINK_CAP_ACK:
2532 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2533 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2534         case NETLINK_EXT_ACK:
2535 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2536 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2537         case NETLINK_GET_STRICT_CHK:
2538 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2539             break;
2540         default:
2541             goto unimplemented;
2542         }
2543         val = 0;
2544         if (optlen < sizeof(uint32_t)) {
2545             return -TARGET_EINVAL;
2546         }
2547         if (get_user_u32(val, optval_addr)) {
2548             return -TARGET_EFAULT;
2549         }
2550         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2551                                    sizeof(val)));
2552         break;
2553 #endif /* SOL_NETLINK */
2554     default:
2555     unimplemented:
2556         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2557                       level, optname);
2558         ret = -TARGET_ENOPROTOOPT;
2559     }
2560     return ret;
2561 }
2562 
2563 /* do_getsockopt() Must return target values and target errnos. */
2564 static abi_long do_getsockopt(int sockfd, int level, int optname,
2565                               abi_ulong optval_addr, abi_ulong optlen)
2566 {
2567     abi_long ret;
2568     int len, val;
2569     socklen_t lv;
2570 
2571     switch(level) {
2572     case TARGET_SOL_SOCKET:
2573         level = SOL_SOCKET;
2574         switch (optname) {
2575         /* These don't just return a single integer */
2576         case TARGET_SO_PEERNAME:
2577             goto unimplemented;
2578         case TARGET_SO_RCVTIMEO: {
2579             struct timeval tv;
2580             socklen_t tvlen;
2581 
2582             optname = SO_RCVTIMEO;
2583 
2584 get_timeout:
2585             if (get_user_u32(len, optlen)) {
2586                 return -TARGET_EFAULT;
2587             }
2588             if (len < 0) {
2589                 return -TARGET_EINVAL;
2590             }
2591 
2592             tvlen = sizeof(tv);
2593             ret = get_errno(getsockopt(sockfd, level, optname,
2594                                        &tv, &tvlen));
2595             if (ret < 0) {
2596                 return ret;
2597             }
2598             if (len > sizeof(struct target_timeval)) {
2599                 len = sizeof(struct target_timeval);
2600             }
2601             if (copy_to_user_timeval(optval_addr, &tv)) {
2602                 return -TARGET_EFAULT;
2603             }
2604             if (put_user_u32(len, optlen)) {
2605                 return -TARGET_EFAULT;
2606             }
2607             break;
2608         }
2609         case TARGET_SO_SNDTIMEO:
2610             optname = SO_SNDTIMEO;
2611             goto get_timeout;
2612         case TARGET_SO_PEERCRED: {
2613             struct ucred cr;
2614             socklen_t crlen;
2615             struct target_ucred *tcr;
2616 
2617             if (get_user_u32(len, optlen)) {
2618                 return -TARGET_EFAULT;
2619             }
2620             if (len < 0) {
2621                 return -TARGET_EINVAL;
2622             }
2623 
2624             crlen = sizeof(cr);
2625             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2626                                        &cr, &crlen));
2627             if (ret < 0) {
2628                 return ret;
2629             }
2630             if (len > crlen) {
2631                 len = crlen;
2632             }
2633             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2634                 return -TARGET_EFAULT;
2635             }
2636             __put_user(cr.pid, &tcr->pid);
2637             __put_user(cr.uid, &tcr->uid);
2638             __put_user(cr.gid, &tcr->gid);
2639             unlock_user_struct(tcr, optval_addr, 1);
2640             if (put_user_u32(len, optlen)) {
2641                 return -TARGET_EFAULT;
2642             }
2643             break;
2644         }
2645         case TARGET_SO_PEERSEC: {
2646             char *name;
2647 
2648             if (get_user_u32(len, optlen)) {
2649                 return -TARGET_EFAULT;
2650             }
2651             if (len < 0) {
2652                 return -TARGET_EINVAL;
2653             }
2654             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2655             if (!name) {
2656                 return -TARGET_EFAULT;
2657             }
2658             lv = len;
2659             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2660                                        name, &lv));
2661             if (put_user_u32(lv, optlen)) {
2662                 ret = -TARGET_EFAULT;
2663             }
2664             unlock_user(name, optval_addr, lv);
2665             break;
2666         }
2667         case TARGET_SO_LINGER:
2668         {
2669             struct linger lg;
2670             socklen_t lglen;
2671             struct target_linger *tlg;
2672 
2673             if (get_user_u32(len, optlen)) {
2674                 return -TARGET_EFAULT;
2675             }
2676             if (len < 0) {
2677                 return -TARGET_EINVAL;
2678             }
2679 
2680             lglen = sizeof(lg);
2681             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2682                                        &lg, &lglen));
2683             if (ret < 0) {
2684                 return ret;
2685             }
2686             if (len > lglen) {
2687                 len = lglen;
2688             }
2689             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2690                 return -TARGET_EFAULT;
2691             }
2692             __put_user(lg.l_onoff, &tlg->l_onoff);
2693             __put_user(lg.l_linger, &tlg->l_linger);
2694             unlock_user_struct(tlg, optval_addr, 1);
2695             if (put_user_u32(len, optlen)) {
2696                 return -TARGET_EFAULT;
2697             }
2698             break;
2699         }
2700         /* Options with 'int' argument.  */
2701         case TARGET_SO_DEBUG:
2702             optname = SO_DEBUG;
2703             goto int_case;
2704         case TARGET_SO_REUSEADDR:
2705             optname = SO_REUSEADDR;
2706             goto int_case;
2707 #ifdef SO_REUSEPORT
2708         case TARGET_SO_REUSEPORT:
2709             optname = SO_REUSEPORT;
2710             goto int_case;
2711 #endif
2712         case TARGET_SO_TYPE:
2713             optname = SO_TYPE;
2714             goto int_case;
2715         case TARGET_SO_ERROR:
2716             optname = SO_ERROR;
2717             goto int_case;
2718         case TARGET_SO_DONTROUTE:
2719             optname = SO_DONTROUTE;
2720             goto int_case;
2721         case TARGET_SO_BROADCAST:
2722             optname = SO_BROADCAST;
2723             goto int_case;
2724         case TARGET_SO_SNDBUF:
2725             optname = SO_SNDBUF;
2726             goto int_case;
2727         case TARGET_SO_RCVBUF:
2728             optname = SO_RCVBUF;
2729             goto int_case;
2730         case TARGET_SO_KEEPALIVE:
2731             optname = SO_KEEPALIVE;
2732             goto int_case;
2733         case TARGET_SO_OOBINLINE:
2734             optname = SO_OOBINLINE;
2735             goto int_case;
2736         case TARGET_SO_NO_CHECK:
2737             optname = SO_NO_CHECK;
2738             goto int_case;
2739         case TARGET_SO_PRIORITY:
2740             optname = SO_PRIORITY;
2741             goto int_case;
2742 #ifdef SO_BSDCOMPAT
2743         case TARGET_SO_BSDCOMPAT:
2744             optname = SO_BSDCOMPAT;
2745             goto int_case;
2746 #endif
2747         case TARGET_SO_PASSCRED:
2748             optname = SO_PASSCRED;
2749             goto int_case;
2750         case TARGET_SO_TIMESTAMP:
2751             optname = SO_TIMESTAMP;
2752             goto int_case;
2753         case TARGET_SO_RCVLOWAT:
2754             optname = SO_RCVLOWAT;
2755             goto int_case;
2756         case TARGET_SO_ACCEPTCONN:
2757             optname = SO_ACCEPTCONN;
2758             goto int_case;
2759         case TARGET_SO_PROTOCOL:
2760             optname = SO_PROTOCOL;
2761             goto int_case;
2762         case TARGET_SO_DOMAIN:
2763             optname = SO_DOMAIN;
2764             goto int_case;
2765         default:
2766             goto int_case;
2767         }
2768         break;
2769     case SOL_TCP:
2770     case SOL_UDP:
2771         /* TCP and UDP options all take an 'int' value.  */
2772     int_case:
2773         if (get_user_u32(len, optlen))
2774             return -TARGET_EFAULT;
2775         if (len < 0)
2776             return -TARGET_EINVAL;
2777         lv = sizeof(lv);
2778         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2779         if (ret < 0)
2780             return ret;
2781         switch (optname) {
2782         case SO_TYPE:
2783             val = host_to_target_sock_type(val);
2784             break;
2785         case SO_ERROR:
2786             val = host_to_target_errno(val);
2787             break;
2788         }
2789         if (len > lv)
2790             len = lv;
2791         if (len == 4) {
2792             if (put_user_u32(val, optval_addr))
2793                 return -TARGET_EFAULT;
2794         } else {
2795             if (put_user_u8(val, optval_addr))
2796                 return -TARGET_EFAULT;
2797         }
2798         if (put_user_u32(len, optlen))
2799             return -TARGET_EFAULT;
2800         break;
2801     case SOL_IP:
2802         switch(optname) {
2803         case IP_TOS:
2804         case IP_TTL:
2805         case IP_HDRINCL:
2806         case IP_ROUTER_ALERT:
2807         case IP_RECVOPTS:
2808         case IP_RETOPTS:
2809         case IP_PKTINFO:
2810         case IP_MTU_DISCOVER:
2811         case IP_RECVERR:
2812         case IP_RECVTOS:
2813 #ifdef IP_FREEBIND
2814         case IP_FREEBIND:
2815 #endif
2816         case IP_MULTICAST_TTL:
2817         case IP_MULTICAST_LOOP:
2818             if (get_user_u32(len, optlen))
2819                 return -TARGET_EFAULT;
2820             if (len < 0)
2821                 return -TARGET_EINVAL;
2822             lv = sizeof(lv);
2823             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2824             if (ret < 0)
2825                 return ret;
2826             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2827                 len = 1;
2828                 if (put_user_u32(len, optlen)
2829                     || put_user_u8(val, optval_addr))
2830                     return -TARGET_EFAULT;
2831             } else {
2832                 if (len > sizeof(int))
2833                     len = sizeof(int);
2834                 if (put_user_u32(len, optlen)
2835                     || put_user_u32(val, optval_addr))
2836                     return -TARGET_EFAULT;
2837             }
2838             break;
2839         default:
2840             ret = -TARGET_ENOPROTOOPT;
2841             break;
2842         }
2843         break;
2844     case SOL_IPV6:
2845         switch (optname) {
2846         case IPV6_MTU_DISCOVER:
2847         case IPV6_MTU:
2848         case IPV6_V6ONLY:
2849         case IPV6_RECVPKTINFO:
2850         case IPV6_UNICAST_HOPS:
2851         case IPV6_MULTICAST_HOPS:
2852         case IPV6_MULTICAST_LOOP:
2853         case IPV6_RECVERR:
2854         case IPV6_RECVHOPLIMIT:
2855         case IPV6_2292HOPLIMIT:
2856         case IPV6_CHECKSUM:
2857         case IPV6_ADDRFORM:
2858         case IPV6_2292PKTINFO:
2859         case IPV6_RECVTCLASS:
2860         case IPV6_RECVRTHDR:
2861         case IPV6_2292RTHDR:
2862         case IPV6_RECVHOPOPTS:
2863         case IPV6_2292HOPOPTS:
2864         case IPV6_RECVDSTOPTS:
2865         case IPV6_2292DSTOPTS:
2866         case IPV6_TCLASS:
2867         case IPV6_ADDR_PREFERENCES:
2868 #ifdef IPV6_RECVPATHMTU
2869         case IPV6_RECVPATHMTU:
2870 #endif
2871 #ifdef IPV6_TRANSPARENT
2872         case IPV6_TRANSPARENT:
2873 #endif
2874 #ifdef IPV6_FREEBIND
2875         case IPV6_FREEBIND:
2876 #endif
2877 #ifdef IPV6_RECVORIGDSTADDR
2878         case IPV6_RECVORIGDSTADDR:
2879 #endif
2880             if (get_user_u32(len, optlen))
2881                 return -TARGET_EFAULT;
2882             if (len < 0)
2883                 return -TARGET_EINVAL;
2884             lv = sizeof(lv);
2885             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2886             if (ret < 0)
2887                 return ret;
2888             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2889                 len = 1;
2890                 if (put_user_u32(len, optlen)
2891                     || put_user_u8(val, optval_addr))
2892                     return -TARGET_EFAULT;
2893             } else {
2894                 if (len > sizeof(int))
2895                     len = sizeof(int);
2896                 if (put_user_u32(len, optlen)
2897                     || put_user_u32(val, optval_addr))
2898                     return -TARGET_EFAULT;
2899             }
2900             break;
2901         default:
2902             ret = -TARGET_ENOPROTOOPT;
2903             break;
2904         }
2905         break;
2906 #ifdef SOL_NETLINK
2907     case SOL_NETLINK:
2908         switch (optname) {
2909         case NETLINK_PKTINFO:
2910         case NETLINK_BROADCAST_ERROR:
2911         case NETLINK_NO_ENOBUFS:
2912 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2913         case NETLINK_LISTEN_ALL_NSID:
2914         case NETLINK_CAP_ACK:
2915 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2916 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2917         case NETLINK_EXT_ACK:
2918 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2919 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2920         case NETLINK_GET_STRICT_CHK:
2921 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2922             if (get_user_u32(len, optlen)) {
2923                 return -TARGET_EFAULT;
2924             }
2925             if (len != sizeof(val)) {
2926                 return -TARGET_EINVAL;
2927             }
2928             lv = len;
2929             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2930             if (ret < 0) {
2931                 return ret;
2932             }
2933             if (put_user_u32(lv, optlen)
2934                 || put_user_u32(val, optval_addr)) {
2935                 return -TARGET_EFAULT;
2936             }
2937             break;
2938 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2939         case NETLINK_LIST_MEMBERSHIPS:
2940         {
2941             uint32_t *results;
2942             int i;
2943             if (get_user_u32(len, optlen)) {
2944                 return -TARGET_EFAULT;
2945             }
2946             if (len < 0) {
2947                 return -TARGET_EINVAL;
2948             }
2949             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2950             if (!results && len > 0) {
2951                 return -TARGET_EFAULT;
2952             }
2953             lv = len;
2954             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2955             if (ret < 0) {
2956                 unlock_user(results, optval_addr, 0);
2957                 return ret;
2958             }
2959             /* swap host endianess to target endianess. */
2960             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2961                 results[i] = tswap32(results[i]);
2962             }
2963             if (put_user_u32(lv, optlen)) {
2964                 return -TARGET_EFAULT;
2965             }
2966             unlock_user(results, optval_addr, 0);
2967             break;
2968         }
2969 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2970         default:
2971             goto unimplemented;
2972         }
2973         break;
2974 #endif /* SOL_NETLINK */
2975     default:
2976     unimplemented:
2977         qemu_log_mask(LOG_UNIMP,
2978                       "getsockopt level=%d optname=%d not yet supported\n",
2979                       level, optname);
2980         ret = -TARGET_EOPNOTSUPP;
2981         break;
2982     }
2983     return ret;
2984 }
2985 
2986 /* Convert target low/high pair representing file offset into the host
2987  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2988  * as the kernel doesn't handle them either.
2989  */
2990 static void target_to_host_low_high(abi_ulong tlow,
2991                                     abi_ulong thigh,
2992                                     unsigned long *hlow,
2993                                     unsigned long *hhigh)
2994 {
2995     uint64_t off = tlow |
2996         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2997         TARGET_LONG_BITS / 2;
2998 
2999     *hlow = off;
3000     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3001 }
3002 
3003 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3004                                 abi_ulong count, int copy)
3005 {
3006     struct target_iovec *target_vec;
3007     struct iovec *vec;
3008     abi_ulong total_len, max_len;
3009     int i;
3010     int err = 0;
3011     bool bad_address = false;
3012 
3013     if (count == 0) {
3014         errno = 0;
3015         return NULL;
3016     }
3017     if (count > IOV_MAX) {
3018         errno = EINVAL;
3019         return NULL;
3020     }
3021 
3022     vec = g_try_new0(struct iovec, count);
3023     if (vec == NULL) {
3024         errno = ENOMEM;
3025         return NULL;
3026     }
3027 
3028     target_vec = lock_user(VERIFY_READ, target_addr,
3029                            count * sizeof(struct target_iovec), 1);
3030     if (target_vec == NULL) {
3031         err = EFAULT;
3032         goto fail2;
3033     }
3034 
3035     /* ??? If host page size > target page size, this will result in a
3036        value larger than what we can actually support.  */
3037     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3038     total_len = 0;
3039 
3040     for (i = 0; i < count; i++) {
3041         abi_ulong base = tswapal(target_vec[i].iov_base);
3042         abi_long len = tswapal(target_vec[i].iov_len);
3043 
3044         if (len < 0) {
3045             err = EINVAL;
3046             goto fail;
3047         } else if (len == 0) {
3048             /* Zero length pointer is ignored.  */
3049             vec[i].iov_base = 0;
3050         } else {
3051             vec[i].iov_base = lock_user(type, base, len, copy);
3052             /* If the first buffer pointer is bad, this is a fault.  But
3053              * subsequent bad buffers will result in a partial write; this
3054              * is realized by filling the vector with null pointers and
3055              * zero lengths. */
3056             if (!vec[i].iov_base) {
3057                 if (i == 0) {
3058                     err = EFAULT;
3059                     goto fail;
3060                 } else {
3061                     bad_address = true;
3062                 }
3063             }
3064             if (bad_address) {
3065                 len = 0;
3066             }
3067             if (len > max_len - total_len) {
3068                 len = max_len - total_len;
3069             }
3070         }
3071         vec[i].iov_len = len;
3072         total_len += len;
3073     }
3074 
3075     unlock_user(target_vec, target_addr, 0);
3076     return vec;
3077 
3078  fail:
3079     while (--i >= 0) {
3080         if (tswapal(target_vec[i].iov_len) > 0) {
3081             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3082         }
3083     }
3084     unlock_user(target_vec, target_addr, 0);
3085  fail2:
3086     g_free(vec);
3087     errno = err;
3088     return NULL;
3089 }
3090 
3091 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3092                          abi_ulong count, int copy)
3093 {
3094     struct target_iovec *target_vec;
3095     int i;
3096 
3097     target_vec = lock_user(VERIFY_READ, target_addr,
3098                            count * sizeof(struct target_iovec), 1);
3099     if (target_vec) {
3100         for (i = 0; i < count; i++) {
3101             abi_ulong base = tswapal(target_vec[i].iov_base);
3102             abi_long len = tswapal(target_vec[i].iov_len);
3103             if (len < 0) {
3104                 break;
3105             }
3106             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3107         }
3108         unlock_user(target_vec, target_addr, 0);
3109     }
3110 
3111     g_free(vec);
3112 }
3113 
3114 static inline int target_to_host_sock_type(int *type)
3115 {
3116     int host_type = 0;
3117     int target_type = *type;
3118 
3119     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3120     case TARGET_SOCK_DGRAM:
3121         host_type = SOCK_DGRAM;
3122         break;
3123     case TARGET_SOCK_STREAM:
3124         host_type = SOCK_STREAM;
3125         break;
3126     default:
3127         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3128         break;
3129     }
3130     if (target_type & TARGET_SOCK_CLOEXEC) {
3131 #if defined(SOCK_CLOEXEC)
3132         host_type |= SOCK_CLOEXEC;
3133 #else
3134         return -TARGET_EINVAL;
3135 #endif
3136     }
3137     if (target_type & TARGET_SOCK_NONBLOCK) {
3138 #if defined(SOCK_NONBLOCK)
3139         host_type |= SOCK_NONBLOCK;
3140 #elif !defined(O_NONBLOCK)
3141         return -TARGET_EINVAL;
3142 #endif
3143     }
3144     *type = host_type;
3145     return 0;
3146 }
3147 
3148 /* Try to emulate socket type flags after socket creation.  */
3149 static int sock_flags_fixup(int fd, int target_type)
3150 {
3151 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3152     if (target_type & TARGET_SOCK_NONBLOCK) {
3153         int flags = fcntl(fd, F_GETFL);
3154         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3155             close(fd);
3156             return -TARGET_EINVAL;
3157         }
3158     }
3159 #endif
3160     return fd;
3161 }
3162 
3163 /* do_socket() Must return target values and target errnos. */
3164 static abi_long do_socket(int domain, int type, int protocol)
3165 {
3166     int target_type = type;
3167     int ret;
3168 
3169     ret = target_to_host_sock_type(&type);
3170     if (ret) {
3171         return ret;
3172     }
3173 
3174     if (domain == PF_NETLINK && !(
3175 #ifdef CONFIG_RTNETLINK
3176          protocol == NETLINK_ROUTE ||
3177 #endif
3178          protocol == NETLINK_KOBJECT_UEVENT ||
3179          protocol == NETLINK_AUDIT)) {
3180         return -TARGET_EPROTONOSUPPORT;
3181     }
3182 
3183     if (domain == AF_PACKET ||
3184         (domain == AF_INET && type == SOCK_PACKET)) {
3185         protocol = tswap16(protocol);
3186     }
3187 
3188     ret = get_errno(socket(domain, type, protocol));
3189     if (ret >= 0) {
3190         ret = sock_flags_fixup(ret, target_type);
3191         if (type == SOCK_PACKET) {
3192             /* Manage an obsolete case :
3193              * if socket type is SOCK_PACKET, bind by name
3194              */
3195             fd_trans_register(ret, &target_packet_trans);
3196         } else if (domain == PF_NETLINK) {
3197             switch (protocol) {
3198 #ifdef CONFIG_RTNETLINK
3199             case NETLINK_ROUTE:
3200                 fd_trans_register(ret, &target_netlink_route_trans);
3201                 break;
3202 #endif
3203             case NETLINK_KOBJECT_UEVENT:
3204                 /* nothing to do: messages are strings */
3205                 break;
3206             case NETLINK_AUDIT:
3207                 fd_trans_register(ret, &target_netlink_audit_trans);
3208                 break;
3209             default:
3210                 g_assert_not_reached();
3211             }
3212         }
3213     }
3214     return ret;
3215 }
3216 
3217 /* do_bind() Must return target values and target errnos. */
3218 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3219                         socklen_t addrlen)
3220 {
3221     void *addr;
3222     abi_long ret;
3223 
3224     if ((int)addrlen < 0) {
3225         return -TARGET_EINVAL;
3226     }
3227 
3228     addr = alloca(addrlen+1);
3229 
3230     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3231     if (ret)
3232         return ret;
3233 
3234     return get_errno(bind(sockfd, addr, addrlen));
3235 }
3236 
3237 /* do_connect() Must return target values and target errnos. */
3238 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3239                            socklen_t addrlen)
3240 {
3241     void *addr;
3242     abi_long ret;
3243 
3244     if ((int)addrlen < 0) {
3245         return -TARGET_EINVAL;
3246     }
3247 
3248     addr = alloca(addrlen+1);
3249 
3250     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3251     if (ret)
3252         return ret;
3253 
3254     return get_errno(safe_connect(sockfd, addr, addrlen));
3255 }
3256 
3257 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3258 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3259                                       int flags, int send)
3260 {
3261     abi_long ret, len;
3262     struct msghdr msg;
3263     abi_ulong count;
3264     struct iovec *vec;
3265     abi_ulong target_vec;
3266 
3267     if (msgp->msg_name) {
3268         msg.msg_namelen = tswap32(msgp->msg_namelen);
3269         msg.msg_name = alloca(msg.msg_namelen+1);
3270         ret = target_to_host_sockaddr(fd, msg.msg_name,
3271                                       tswapal(msgp->msg_name),
3272                                       msg.msg_namelen);
3273         if (ret == -TARGET_EFAULT) {
3274             /* For connected sockets msg_name and msg_namelen must
3275              * be ignored, so returning EFAULT immediately is wrong.
3276              * Instead, pass a bad msg_name to the host kernel, and
3277              * let it decide whether to return EFAULT or not.
3278              */
3279             msg.msg_name = (void *)-1;
3280         } else if (ret) {
3281             goto out2;
3282         }
3283     } else {
3284         msg.msg_name = NULL;
3285         msg.msg_namelen = 0;
3286     }
3287     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3288     msg.msg_control = alloca(msg.msg_controllen);
3289     memset(msg.msg_control, 0, msg.msg_controllen);
3290 
3291     msg.msg_flags = tswap32(msgp->msg_flags);
3292 
3293     count = tswapal(msgp->msg_iovlen);
3294     target_vec = tswapal(msgp->msg_iov);
3295 
3296     if (count > IOV_MAX) {
3297         /* sendrcvmsg returns a different errno for this condition than
3298          * readv/writev, so we must catch it here before lock_iovec() does.
3299          */
3300         ret = -TARGET_EMSGSIZE;
3301         goto out2;
3302     }
3303 
3304     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3305                      target_vec, count, send);
3306     if (vec == NULL) {
3307         ret = -host_to_target_errno(errno);
3308         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3309         if (!send || ret) {
3310             goto out2;
3311         }
3312     }
3313     msg.msg_iovlen = count;
3314     msg.msg_iov = vec;
3315 
3316     if (send) {
3317         if (fd_trans_target_to_host_data(fd)) {
3318             void *host_msg;
3319 
3320             host_msg = g_malloc(msg.msg_iov->iov_len);
3321             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3322             ret = fd_trans_target_to_host_data(fd)(host_msg,
3323                                                    msg.msg_iov->iov_len);
3324             if (ret >= 0) {
3325                 msg.msg_iov->iov_base = host_msg;
3326                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3327             }
3328             g_free(host_msg);
3329         } else {
3330             ret = target_to_host_cmsg(&msg, msgp);
3331             if (ret == 0) {
3332                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3333             }
3334         }
3335     } else {
3336         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3337         if (!is_error(ret)) {
3338             len = ret;
3339             if (fd_trans_host_to_target_data(fd)) {
3340                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3341                                                MIN(msg.msg_iov->iov_len, len));
3342             }
3343             if (!is_error(ret)) {
3344                 ret = host_to_target_cmsg(msgp, &msg);
3345             }
3346             if (!is_error(ret)) {
3347                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3348                 msgp->msg_flags = tswap32(msg.msg_flags);
3349                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3350                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3351                                     msg.msg_name, msg.msg_namelen);
3352                     if (ret) {
3353                         goto out;
3354                     }
3355                 }
3356 
3357                 ret = len;
3358             }
3359         }
3360     }
3361 
3362 out:
3363     if (vec) {
3364         unlock_iovec(vec, target_vec, count, !send);
3365     }
3366 out2:
3367     return ret;
3368 }
3369 
3370 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3371                                int flags, int send)
3372 {
3373     abi_long ret;
3374     struct target_msghdr *msgp;
3375 
3376     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3377                           msgp,
3378                           target_msg,
3379                           send ? 1 : 0)) {
3380         return -TARGET_EFAULT;
3381     }
3382     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3383     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3384     return ret;
3385 }
3386 
3387 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3388  * so it might not have this *mmsg-specific flag either.
3389  */
3390 #ifndef MSG_WAITFORONE
3391 #define MSG_WAITFORONE 0x10000
3392 #endif
3393 
3394 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3395                                 unsigned int vlen, unsigned int flags,
3396                                 int send)
3397 {
3398     struct target_mmsghdr *mmsgp;
3399     abi_long ret = 0;
3400     int i;
3401 
3402     if (vlen > UIO_MAXIOV) {
3403         vlen = UIO_MAXIOV;
3404     }
3405 
3406     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3407     if (!mmsgp) {
3408         return -TARGET_EFAULT;
3409     }
3410 
3411     for (i = 0; i < vlen; i++) {
3412         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3413         if (is_error(ret)) {
3414             break;
3415         }
3416         mmsgp[i].msg_len = tswap32(ret);
3417         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3418         if (flags & MSG_WAITFORONE) {
3419             flags |= MSG_DONTWAIT;
3420         }
3421     }
3422 
3423     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3424 
3425     /* Return number of datagrams sent if we sent any at all;
3426      * otherwise return the error.
3427      */
3428     if (i) {
3429         return i;
3430     }
3431     return ret;
3432 }
3433 
3434 /* do_accept4() Must return target values and target errnos. */
3435 static abi_long do_accept4(int fd, abi_ulong target_addr,
3436                            abi_ulong target_addrlen_addr, int flags)
3437 {
3438     socklen_t addrlen, ret_addrlen;
3439     void *addr;
3440     abi_long ret;
3441     int host_flags;
3442 
3443     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3444         return -TARGET_EINVAL;
3445     }
3446 
3447     host_flags = 0;
3448     if (flags & TARGET_SOCK_NONBLOCK) {
3449         host_flags |= SOCK_NONBLOCK;
3450     }
3451     if (flags & TARGET_SOCK_CLOEXEC) {
3452         host_flags |= SOCK_CLOEXEC;
3453     }
3454 
3455     if (target_addr == 0) {
3456         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3457     }
3458 
3459     /* linux returns EFAULT if addrlen pointer is invalid */
3460     if (get_user_u32(addrlen, target_addrlen_addr))
3461         return -TARGET_EFAULT;
3462 
3463     if ((int)addrlen < 0) {
3464         return -TARGET_EINVAL;
3465     }
3466 
3467     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3468         return -TARGET_EFAULT;
3469     }
3470 
3471     addr = alloca(addrlen);
3472 
3473     ret_addrlen = addrlen;
3474     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3475     if (!is_error(ret)) {
3476         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3477         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3478             ret = -TARGET_EFAULT;
3479         }
3480     }
3481     return ret;
3482 }
3483 
3484 /* do_getpeername() Must return target values and target errnos. */
3485 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3486                                abi_ulong target_addrlen_addr)
3487 {
3488     socklen_t addrlen, ret_addrlen;
3489     void *addr;
3490     abi_long ret;
3491 
3492     if (get_user_u32(addrlen, target_addrlen_addr))
3493         return -TARGET_EFAULT;
3494 
3495     if ((int)addrlen < 0) {
3496         return -TARGET_EINVAL;
3497     }
3498 
3499     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3500         return -TARGET_EFAULT;
3501     }
3502 
3503     addr = alloca(addrlen);
3504 
3505     ret_addrlen = addrlen;
3506     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3507     if (!is_error(ret)) {
3508         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3509         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3510             ret = -TARGET_EFAULT;
3511         }
3512     }
3513     return ret;
3514 }
3515 
3516 /* do_getsockname() Must return target values and target errnos. */
3517 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3518                                abi_ulong target_addrlen_addr)
3519 {
3520     socklen_t addrlen, ret_addrlen;
3521     void *addr;
3522     abi_long ret;
3523 
3524     if (get_user_u32(addrlen, target_addrlen_addr))
3525         return -TARGET_EFAULT;
3526 
3527     if ((int)addrlen < 0) {
3528         return -TARGET_EINVAL;
3529     }
3530 
3531     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3532         return -TARGET_EFAULT;
3533     }
3534 
3535     addr = alloca(addrlen);
3536 
3537     ret_addrlen = addrlen;
3538     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3539     if (!is_error(ret)) {
3540         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3541         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3542             ret = -TARGET_EFAULT;
3543         }
3544     }
3545     return ret;
3546 }
3547 
3548 /* do_socketpair() Must return target values and target errnos. */
3549 static abi_long do_socketpair(int domain, int type, int protocol,
3550                               abi_ulong target_tab_addr)
3551 {
3552     int tab[2];
3553     abi_long ret;
3554 
3555     target_to_host_sock_type(&type);
3556 
3557     ret = get_errno(socketpair(domain, type, protocol, tab));
3558     if (!is_error(ret)) {
3559         if (put_user_s32(tab[0], target_tab_addr)
3560             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3561             ret = -TARGET_EFAULT;
3562     }
3563     return ret;
3564 }
3565 
3566 /* do_sendto() Must return target values and target errnos. */
3567 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3568                           abi_ulong target_addr, socklen_t addrlen)
3569 {
3570     void *addr;
3571     void *host_msg;
3572     void *copy_msg = NULL;
3573     abi_long ret;
3574 
3575     if ((int)addrlen < 0) {
3576         return -TARGET_EINVAL;
3577     }
3578 
3579     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3580     if (!host_msg)
3581         return -TARGET_EFAULT;
3582     if (fd_trans_target_to_host_data(fd)) {
3583         copy_msg = host_msg;
3584         host_msg = g_malloc(len);
3585         memcpy(host_msg, copy_msg, len);
3586         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3587         if (ret < 0) {
3588             goto fail;
3589         }
3590     }
3591     if (target_addr) {
3592         addr = alloca(addrlen+1);
3593         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3594         if (ret) {
3595             goto fail;
3596         }
3597         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3598     } else {
3599         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3600     }
3601 fail:
3602     if (copy_msg) {
3603         g_free(host_msg);
3604         host_msg = copy_msg;
3605     }
3606     unlock_user(host_msg, msg, 0);
3607     return ret;
3608 }
3609 
3610 /* do_recvfrom() Must return target values and target errnos. */
3611 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3612                             abi_ulong target_addr,
3613                             abi_ulong target_addrlen)
3614 {
3615     socklen_t addrlen, ret_addrlen;
3616     void *addr;
3617     void *host_msg;
3618     abi_long ret;
3619 
3620     if (!msg) {
3621         host_msg = NULL;
3622     } else {
3623         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3624         if (!host_msg) {
3625             return -TARGET_EFAULT;
3626         }
3627     }
3628     if (target_addr) {
3629         if (get_user_u32(addrlen, target_addrlen)) {
3630             ret = -TARGET_EFAULT;
3631             goto fail;
3632         }
3633         if ((int)addrlen < 0) {
3634             ret = -TARGET_EINVAL;
3635             goto fail;
3636         }
3637         addr = alloca(addrlen);
3638         ret_addrlen = addrlen;
3639         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3640                                       addr, &ret_addrlen));
3641     } else {
3642         addr = NULL; /* To keep compiler quiet.  */
3643         addrlen = 0; /* To keep compiler quiet.  */
3644         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3645     }
3646     if (!is_error(ret)) {
3647         if (fd_trans_host_to_target_data(fd)) {
3648             abi_long trans;
3649             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3650             if (is_error(trans)) {
3651                 ret = trans;
3652                 goto fail;
3653             }
3654         }
3655         if (target_addr) {
3656             host_to_target_sockaddr(target_addr, addr,
3657                                     MIN(addrlen, ret_addrlen));
3658             if (put_user_u32(ret_addrlen, target_addrlen)) {
3659                 ret = -TARGET_EFAULT;
3660                 goto fail;
3661             }
3662         }
3663         unlock_user(host_msg, msg, len);
3664     } else {
3665 fail:
3666         unlock_user(host_msg, msg, 0);
3667     }
3668     return ret;
3669 }
3670 
3671 #ifdef TARGET_NR_socketcall
3672 /* do_socketcall() must return target values and target errnos. */
3673 static abi_long do_socketcall(int num, abi_ulong vptr)
3674 {
3675     static const unsigned nargs[] = { /* number of arguments per operation */
3676         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3677         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3678         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3679         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3680         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3681         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3682         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3683         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3684         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3685         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3686         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3687         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3688         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3689         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3690         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3691         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3692         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3693         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3694         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3695         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3696     };
3697     abi_long a[6]; /* max 6 args */
3698     unsigned i;
3699 
3700     /* check the range of the first argument num */
3701     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3702     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3703         return -TARGET_EINVAL;
3704     }
3705     /* ensure we have space for args */
3706     if (nargs[num] > ARRAY_SIZE(a)) {
3707         return -TARGET_EINVAL;
3708     }
3709     /* collect the arguments in a[] according to nargs[] */
3710     for (i = 0; i < nargs[num]; ++i) {
3711         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3712             return -TARGET_EFAULT;
3713         }
3714     }
3715     /* now when we have the args, invoke the appropriate underlying function */
3716     switch (num) {
3717     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3718         return do_socket(a[0], a[1], a[2]);
3719     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3720         return do_bind(a[0], a[1], a[2]);
3721     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3722         return do_connect(a[0], a[1], a[2]);
3723     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3724         return get_errno(listen(a[0], a[1]));
3725     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3726         return do_accept4(a[0], a[1], a[2], 0);
3727     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3728         return do_getsockname(a[0], a[1], a[2]);
3729     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3730         return do_getpeername(a[0], a[1], a[2]);
3731     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3732         return do_socketpair(a[0], a[1], a[2], a[3]);
3733     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3734         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3735     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3736         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3737     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3738         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3739     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3740         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3741     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3742         return get_errno(shutdown(a[0], a[1]));
3743     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3744         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3745     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3746         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3747     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3748         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3749     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3750         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3751     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3752         return do_accept4(a[0], a[1], a[2], a[3]);
3753     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3754         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3755     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3756         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3757     default:
3758         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3759         return -TARGET_EINVAL;
3760     }
3761 }
3762 #endif
3763 
3764 #define N_SHM_REGIONS	32
3765 
3766 static struct shm_region {
3767     abi_ulong start;
3768     abi_ulong size;
3769     bool in_use;
3770 } shm_regions[N_SHM_REGIONS];
3771 
3772 #ifndef TARGET_SEMID64_DS
3773 /* asm-generic version of this struct */
3774 struct target_semid64_ds
3775 {
3776   struct target_ipc_perm sem_perm;
3777   abi_ulong sem_otime;
3778 #if TARGET_ABI_BITS == 32
3779   abi_ulong __unused1;
3780 #endif
3781   abi_ulong sem_ctime;
3782 #if TARGET_ABI_BITS == 32
3783   abi_ulong __unused2;
3784 #endif
3785   abi_ulong sem_nsems;
3786   abi_ulong __unused3;
3787   abi_ulong __unused4;
3788 };
3789 #endif
3790 
3791 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3792                                                abi_ulong target_addr)
3793 {
3794     struct target_ipc_perm *target_ip;
3795     struct target_semid64_ds *target_sd;
3796 
3797     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3798         return -TARGET_EFAULT;
3799     target_ip = &(target_sd->sem_perm);
3800     host_ip->__key = tswap32(target_ip->__key);
3801     host_ip->uid = tswap32(target_ip->uid);
3802     host_ip->gid = tswap32(target_ip->gid);
3803     host_ip->cuid = tswap32(target_ip->cuid);
3804     host_ip->cgid = tswap32(target_ip->cgid);
3805 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3806     host_ip->mode = tswap32(target_ip->mode);
3807 #else
3808     host_ip->mode = tswap16(target_ip->mode);
3809 #endif
3810 #if defined(TARGET_PPC)
3811     host_ip->__seq = tswap32(target_ip->__seq);
3812 #else
3813     host_ip->__seq = tswap16(target_ip->__seq);
3814 #endif
3815     unlock_user_struct(target_sd, target_addr, 0);
3816     return 0;
3817 }
3818 
3819 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3820                                                struct ipc_perm *host_ip)
3821 {
3822     struct target_ipc_perm *target_ip;
3823     struct target_semid64_ds *target_sd;
3824 
3825     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3826         return -TARGET_EFAULT;
3827     target_ip = &(target_sd->sem_perm);
3828     target_ip->__key = tswap32(host_ip->__key);
3829     target_ip->uid = tswap32(host_ip->uid);
3830     target_ip->gid = tswap32(host_ip->gid);
3831     target_ip->cuid = tswap32(host_ip->cuid);
3832     target_ip->cgid = tswap32(host_ip->cgid);
3833 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3834     target_ip->mode = tswap32(host_ip->mode);
3835 #else
3836     target_ip->mode = tswap16(host_ip->mode);
3837 #endif
3838 #if defined(TARGET_PPC)
3839     target_ip->__seq = tswap32(host_ip->__seq);
3840 #else
3841     target_ip->__seq = tswap16(host_ip->__seq);
3842 #endif
3843     unlock_user_struct(target_sd, target_addr, 1);
3844     return 0;
3845 }
3846 
3847 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3848                                                abi_ulong target_addr)
3849 {
3850     struct target_semid64_ds *target_sd;
3851 
3852     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3853         return -TARGET_EFAULT;
3854     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3855         return -TARGET_EFAULT;
3856     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3857     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3858     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3859     unlock_user_struct(target_sd, target_addr, 0);
3860     return 0;
3861 }
3862 
3863 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3864                                                struct semid_ds *host_sd)
3865 {
3866     struct target_semid64_ds *target_sd;
3867 
3868     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3869         return -TARGET_EFAULT;
3870     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3871         return -TARGET_EFAULT;
3872     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3873     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3874     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3875     unlock_user_struct(target_sd, target_addr, 1);
3876     return 0;
3877 }
3878 
3879 struct target_seminfo {
3880     int semmap;
3881     int semmni;
3882     int semmns;
3883     int semmnu;
3884     int semmsl;
3885     int semopm;
3886     int semume;
3887     int semusz;
3888     int semvmx;
3889     int semaem;
3890 };
3891 
3892 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3893                                               struct seminfo *host_seminfo)
3894 {
3895     struct target_seminfo *target_seminfo;
3896     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3897         return -TARGET_EFAULT;
3898     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3899     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3900     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3901     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3902     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3903     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3904     __put_user(host_seminfo->semume, &target_seminfo->semume);
3905     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3906     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3907     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3908     unlock_user_struct(target_seminfo, target_addr, 1);
3909     return 0;
3910 }
3911 
3912 union semun {
3913 	int val;
3914 	struct semid_ds *buf;
3915 	unsigned short *array;
3916 	struct seminfo *__buf;
3917 };
3918 
3919 union target_semun {
3920 	int val;
3921 	abi_ulong buf;
3922 	abi_ulong array;
3923 	abi_ulong __buf;
3924 };
3925 
3926 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3927                                                abi_ulong target_addr)
3928 {
3929     int nsems;
3930     unsigned short *array;
3931     union semun semun;
3932     struct semid_ds semid_ds;
3933     int i, ret;
3934 
3935     semun.buf = &semid_ds;
3936 
3937     ret = semctl(semid, 0, IPC_STAT, semun);
3938     if (ret == -1)
3939         return get_errno(ret);
3940 
3941     nsems = semid_ds.sem_nsems;
3942 
3943     *host_array = g_try_new(unsigned short, nsems);
3944     if (!*host_array) {
3945         return -TARGET_ENOMEM;
3946     }
3947     array = lock_user(VERIFY_READ, target_addr,
3948                       nsems*sizeof(unsigned short), 1);
3949     if (!array) {
3950         g_free(*host_array);
3951         return -TARGET_EFAULT;
3952     }
3953 
3954     for(i=0; i<nsems; i++) {
3955         __get_user((*host_array)[i], &array[i]);
3956     }
3957     unlock_user(array, target_addr, 0);
3958 
3959     return 0;
3960 }
3961 
3962 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3963                                                unsigned short **host_array)
3964 {
3965     int nsems;
3966     unsigned short *array;
3967     union semun semun;
3968     struct semid_ds semid_ds;
3969     int i, ret;
3970 
3971     semun.buf = &semid_ds;
3972 
3973     ret = semctl(semid, 0, IPC_STAT, semun);
3974     if (ret == -1)
3975         return get_errno(ret);
3976 
3977     nsems = semid_ds.sem_nsems;
3978 
3979     array = lock_user(VERIFY_WRITE, target_addr,
3980                       nsems*sizeof(unsigned short), 0);
3981     if (!array)
3982         return -TARGET_EFAULT;
3983 
3984     for(i=0; i<nsems; i++) {
3985         __put_user((*host_array)[i], &array[i]);
3986     }
3987     g_free(*host_array);
3988     unlock_user(array, target_addr, 1);
3989 
3990     return 0;
3991 }
3992 
3993 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3994                                  abi_ulong target_arg)
3995 {
3996     union target_semun target_su = { .buf = target_arg };
3997     union semun arg;
3998     struct semid_ds dsarg;
3999     unsigned short *array = NULL;
4000     struct seminfo seminfo;
4001     abi_long ret = -TARGET_EINVAL;
4002     abi_long err;
4003     cmd &= 0xff;
4004 
4005     switch( cmd ) {
4006 	case GETVAL:
4007 	case SETVAL:
4008             /* In 64 bit cross-endian situations, we will erroneously pick up
4009              * the wrong half of the union for the "val" element.  To rectify
4010              * this, the entire 8-byte structure is byteswapped, followed by
4011 	     * a swap of the 4 byte val field. In other cases, the data is
4012 	     * already in proper host byte order. */
4013 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4014 		target_su.buf = tswapal(target_su.buf);
4015 		arg.val = tswap32(target_su.val);
4016 	    } else {
4017 		arg.val = target_su.val;
4018 	    }
4019             ret = get_errno(semctl(semid, semnum, cmd, arg));
4020             break;
4021 	case GETALL:
4022 	case SETALL:
4023             err = target_to_host_semarray(semid, &array, target_su.array);
4024             if (err)
4025                 return err;
4026             arg.array = array;
4027             ret = get_errno(semctl(semid, semnum, cmd, arg));
4028             err = host_to_target_semarray(semid, target_su.array, &array);
4029             if (err)
4030                 return err;
4031             break;
4032 	case IPC_STAT:
4033 	case IPC_SET:
4034 	case SEM_STAT:
4035             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4036             if (err)
4037                 return err;
4038             arg.buf = &dsarg;
4039             ret = get_errno(semctl(semid, semnum, cmd, arg));
4040             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4041             if (err)
4042                 return err;
4043             break;
4044 	case IPC_INFO:
4045 	case SEM_INFO:
4046             arg.__buf = &seminfo;
4047             ret = get_errno(semctl(semid, semnum, cmd, arg));
4048             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4049             if (err)
4050                 return err;
4051             break;
4052 	case IPC_RMID:
4053 	case GETPID:
4054 	case GETNCNT:
4055 	case GETZCNT:
4056             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4057             break;
4058     }
4059 
4060     return ret;
4061 }
4062 
4063 struct target_sembuf {
4064     unsigned short sem_num;
4065     short sem_op;
4066     short sem_flg;
4067 };
4068 
4069 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4070                                              abi_ulong target_addr,
4071                                              unsigned nsops)
4072 {
4073     struct target_sembuf *target_sembuf;
4074     int i;
4075 
4076     target_sembuf = lock_user(VERIFY_READ, target_addr,
4077                               nsops*sizeof(struct target_sembuf), 1);
4078     if (!target_sembuf)
4079         return -TARGET_EFAULT;
4080 
4081     for(i=0; i<nsops; i++) {
4082         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4083         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4084         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4085     }
4086 
4087     unlock_user(target_sembuf, target_addr, 0);
4088 
4089     return 0;
4090 }
4091 
4092 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4093     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4094 
4095 /*
4096  * This macro is required to handle the s390 variants, which passes the
4097  * arguments in a different order than default.
4098  */
4099 #ifdef __s390x__
4100 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4101   (__nsops), (__timeout), (__sops)
4102 #else
4103 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4104   (__nsops), 0, (__sops), (__timeout)
4105 #endif
4106 
4107 static inline abi_long do_semtimedop(int semid,
4108                                      abi_long ptr,
4109                                      unsigned nsops,
4110                                      abi_long timeout, bool time64)
4111 {
4112     struct sembuf *sops;
4113     struct timespec ts, *pts = NULL;
4114     abi_long ret;
4115 
4116     if (timeout) {
4117         pts = &ts;
4118         if (time64) {
4119             if (target_to_host_timespec64(pts, timeout)) {
4120                 return -TARGET_EFAULT;
4121             }
4122         } else {
4123             if (target_to_host_timespec(pts, timeout)) {
4124                 return -TARGET_EFAULT;
4125             }
4126         }
4127     }
4128 
4129     if (nsops > TARGET_SEMOPM) {
4130         return -TARGET_E2BIG;
4131     }
4132 
4133     sops = g_new(struct sembuf, nsops);
4134 
4135     if (target_to_host_sembuf(sops, ptr, nsops)) {
4136         g_free(sops);
4137         return -TARGET_EFAULT;
4138     }
4139 
4140     ret = -TARGET_ENOSYS;
4141 #ifdef __NR_semtimedop
4142     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4143 #endif
4144 #ifdef __NR_ipc
4145     if (ret == -TARGET_ENOSYS) {
4146         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4147                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4148     }
4149 #endif
4150     g_free(sops);
4151     return ret;
4152 }
4153 #endif
4154 
4155 struct target_msqid_ds
4156 {
4157     struct target_ipc_perm msg_perm;
4158     abi_ulong msg_stime;
4159 #if TARGET_ABI_BITS == 32
4160     abi_ulong __unused1;
4161 #endif
4162     abi_ulong msg_rtime;
4163 #if TARGET_ABI_BITS == 32
4164     abi_ulong __unused2;
4165 #endif
4166     abi_ulong msg_ctime;
4167 #if TARGET_ABI_BITS == 32
4168     abi_ulong __unused3;
4169 #endif
4170     abi_ulong __msg_cbytes;
4171     abi_ulong msg_qnum;
4172     abi_ulong msg_qbytes;
4173     abi_ulong msg_lspid;
4174     abi_ulong msg_lrpid;
4175     abi_ulong __unused4;
4176     abi_ulong __unused5;
4177 };
4178 
4179 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4180                                                abi_ulong target_addr)
4181 {
4182     struct target_msqid_ds *target_md;
4183 
4184     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4185         return -TARGET_EFAULT;
4186     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4187         return -TARGET_EFAULT;
4188     host_md->msg_stime = tswapal(target_md->msg_stime);
4189     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4190     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4191     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4192     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4193     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4194     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4195     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4196     unlock_user_struct(target_md, target_addr, 0);
4197     return 0;
4198 }
4199 
4200 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4201                                                struct msqid_ds *host_md)
4202 {
4203     struct target_msqid_ds *target_md;
4204 
4205     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4206         return -TARGET_EFAULT;
4207     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4208         return -TARGET_EFAULT;
4209     target_md->msg_stime = tswapal(host_md->msg_stime);
4210     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4211     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4212     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4213     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4214     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4215     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4216     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4217     unlock_user_struct(target_md, target_addr, 1);
4218     return 0;
4219 }
4220 
4221 struct target_msginfo {
4222     int msgpool;
4223     int msgmap;
4224     int msgmax;
4225     int msgmnb;
4226     int msgmni;
4227     int msgssz;
4228     int msgtql;
4229     unsigned short int msgseg;
4230 };
4231 
4232 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4233                                               struct msginfo *host_msginfo)
4234 {
4235     struct target_msginfo *target_msginfo;
4236     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4237         return -TARGET_EFAULT;
4238     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4239     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4240     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4241     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4242     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4243     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4244     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4245     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4246     unlock_user_struct(target_msginfo, target_addr, 1);
4247     return 0;
4248 }
4249 
4250 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4251 {
4252     struct msqid_ds dsarg;
4253     struct msginfo msginfo;
4254     abi_long ret = -TARGET_EINVAL;
4255 
4256     cmd &= 0xff;
4257 
4258     switch (cmd) {
4259     case IPC_STAT:
4260     case IPC_SET:
4261     case MSG_STAT:
4262         if (target_to_host_msqid_ds(&dsarg,ptr))
4263             return -TARGET_EFAULT;
4264         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4265         if (host_to_target_msqid_ds(ptr,&dsarg))
4266             return -TARGET_EFAULT;
4267         break;
4268     case IPC_RMID:
4269         ret = get_errno(msgctl(msgid, cmd, NULL));
4270         break;
4271     case IPC_INFO:
4272     case MSG_INFO:
4273         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4274         if (host_to_target_msginfo(ptr, &msginfo))
4275             return -TARGET_EFAULT;
4276         break;
4277     }
4278 
4279     return ret;
4280 }
4281 
4282 struct target_msgbuf {
4283     abi_long mtype;
4284     char	mtext[1];
4285 };
4286 
4287 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4288                                  ssize_t msgsz, int msgflg)
4289 {
4290     struct target_msgbuf *target_mb;
4291     struct msgbuf *host_mb;
4292     abi_long ret = 0;
4293 
4294     if (msgsz < 0) {
4295         return -TARGET_EINVAL;
4296     }
4297 
4298     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4299         return -TARGET_EFAULT;
4300     host_mb = g_try_malloc(msgsz + sizeof(long));
4301     if (!host_mb) {
4302         unlock_user_struct(target_mb, msgp, 0);
4303         return -TARGET_ENOMEM;
4304     }
4305     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4306     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4307     ret = -TARGET_ENOSYS;
4308 #ifdef __NR_msgsnd
4309     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4310 #endif
4311 #ifdef __NR_ipc
4312     if (ret == -TARGET_ENOSYS) {
4313 #ifdef __s390x__
4314         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4315                                  host_mb));
4316 #else
4317         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4318                                  host_mb, 0));
4319 #endif
4320     }
4321 #endif
4322     g_free(host_mb);
4323     unlock_user_struct(target_mb, msgp, 0);
4324 
4325     return ret;
4326 }
4327 
4328 #ifdef __NR_ipc
4329 #if defined(__sparc__)
4330 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4331 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4332 #elif defined(__s390x__)
4333 /* The s390 sys_ipc variant has only five parameters.  */
4334 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4335     ((long int[]){(long int)__msgp, __msgtyp})
4336 #else
4337 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4338     ((long int[]){(long int)__msgp, __msgtyp}), 0
4339 #endif
4340 #endif
4341 
4342 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4343                                  ssize_t msgsz, abi_long msgtyp,
4344                                  int msgflg)
4345 {
4346     struct target_msgbuf *target_mb;
4347     char *target_mtext;
4348     struct msgbuf *host_mb;
4349     abi_long ret = 0;
4350 
4351     if (msgsz < 0) {
4352         return -TARGET_EINVAL;
4353     }
4354 
4355     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4356         return -TARGET_EFAULT;
4357 
4358     host_mb = g_try_malloc(msgsz + sizeof(long));
4359     if (!host_mb) {
4360         ret = -TARGET_ENOMEM;
4361         goto end;
4362     }
4363     ret = -TARGET_ENOSYS;
4364 #ifdef __NR_msgrcv
4365     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4366 #endif
4367 #ifdef __NR_ipc
4368     if (ret == -TARGET_ENOSYS) {
4369         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4370                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4371     }
4372 #endif
4373 
4374     if (ret > 0) {
4375         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4376         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4377         if (!target_mtext) {
4378             ret = -TARGET_EFAULT;
4379             goto end;
4380         }
4381         memcpy(target_mb->mtext, host_mb->mtext, ret);
4382         unlock_user(target_mtext, target_mtext_addr, ret);
4383     }
4384 
4385     target_mb->mtype = tswapal(host_mb->mtype);
4386 
4387 end:
4388     if (target_mb)
4389         unlock_user_struct(target_mb, msgp, 1);
4390     g_free(host_mb);
4391     return ret;
4392 }
4393 
4394 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4395                                                abi_ulong target_addr)
4396 {
4397     struct target_shmid_ds *target_sd;
4398 
4399     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4400         return -TARGET_EFAULT;
4401     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4402         return -TARGET_EFAULT;
4403     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4404     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4405     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4406     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4407     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4408     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4409     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4410     unlock_user_struct(target_sd, target_addr, 0);
4411     return 0;
4412 }
4413 
4414 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4415                                                struct shmid_ds *host_sd)
4416 {
4417     struct target_shmid_ds *target_sd;
4418 
4419     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4420         return -TARGET_EFAULT;
4421     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4422         return -TARGET_EFAULT;
4423     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4424     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4425     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4426     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4427     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4428     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4429     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4430     unlock_user_struct(target_sd, target_addr, 1);
4431     return 0;
4432 }
4433 
4434 struct  target_shminfo {
4435     abi_ulong shmmax;
4436     abi_ulong shmmin;
4437     abi_ulong shmmni;
4438     abi_ulong shmseg;
4439     abi_ulong shmall;
4440 };
4441 
4442 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4443                                               struct shminfo *host_shminfo)
4444 {
4445     struct target_shminfo *target_shminfo;
4446     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4447         return -TARGET_EFAULT;
4448     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4449     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4450     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4451     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4452     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4453     unlock_user_struct(target_shminfo, target_addr, 1);
4454     return 0;
4455 }
4456 
4457 struct target_shm_info {
4458     int used_ids;
4459     abi_ulong shm_tot;
4460     abi_ulong shm_rss;
4461     abi_ulong shm_swp;
4462     abi_ulong swap_attempts;
4463     abi_ulong swap_successes;
4464 };
4465 
4466 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4467                                                struct shm_info *host_shm_info)
4468 {
4469     struct target_shm_info *target_shm_info;
4470     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4471         return -TARGET_EFAULT;
4472     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4473     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4474     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4475     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4476     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4477     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4478     unlock_user_struct(target_shm_info, target_addr, 1);
4479     return 0;
4480 }
4481 
4482 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4483 {
4484     struct shmid_ds dsarg;
4485     struct shminfo shminfo;
4486     struct shm_info shm_info;
4487     abi_long ret = -TARGET_EINVAL;
4488 
4489     cmd &= 0xff;
4490 
4491     switch(cmd) {
4492     case IPC_STAT:
4493     case IPC_SET:
4494     case SHM_STAT:
4495         if (target_to_host_shmid_ds(&dsarg, buf))
4496             return -TARGET_EFAULT;
4497         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4498         if (host_to_target_shmid_ds(buf, &dsarg))
4499             return -TARGET_EFAULT;
4500         break;
4501     case IPC_INFO:
4502         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4503         if (host_to_target_shminfo(buf, &shminfo))
4504             return -TARGET_EFAULT;
4505         break;
4506     case SHM_INFO:
4507         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4508         if (host_to_target_shm_info(buf, &shm_info))
4509             return -TARGET_EFAULT;
4510         break;
4511     case IPC_RMID:
4512     case SHM_LOCK:
4513     case SHM_UNLOCK:
4514         ret = get_errno(shmctl(shmid, cmd, NULL));
4515         break;
4516     }
4517 
4518     return ret;
4519 }
4520 
4521 #ifndef TARGET_FORCE_SHMLBA
4522 /* For most architectures, SHMLBA is the same as the page size;
4523  * some architectures have larger values, in which case they should
4524  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4525  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4526  * and defining its own value for SHMLBA.
4527  *
4528  * The kernel also permits SHMLBA to be set by the architecture to a
4529  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4530  * this means that addresses are rounded to the large size if
4531  * SHM_RND is set but addresses not aligned to that size are not rejected
4532  * as long as they are at least page-aligned. Since the only architecture
4533  * which uses this is ia64 this code doesn't provide for that oddity.
4534  */
4535 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4536 {
4537     return TARGET_PAGE_SIZE;
4538 }
4539 #endif
4540 
4541 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4542                                  int shmid, abi_ulong shmaddr, int shmflg)
4543 {
4544     CPUState *cpu = env_cpu(cpu_env);
4545     abi_long raddr;
4546     void *host_raddr;
4547     struct shmid_ds shm_info;
4548     int i,ret;
4549     abi_ulong shmlba;
4550 
4551     /* shmat pointers are always untagged */
4552 
4553     /* find out the length of the shared memory segment */
4554     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4555     if (is_error(ret)) {
4556         /* can't get length, bail out */
4557         return ret;
4558     }
4559 
4560     shmlba = target_shmlba(cpu_env);
4561 
4562     if (shmaddr & (shmlba - 1)) {
4563         if (shmflg & SHM_RND) {
4564             shmaddr &= ~(shmlba - 1);
4565         } else {
4566             return -TARGET_EINVAL;
4567         }
4568     }
4569     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4570         return -TARGET_EINVAL;
4571     }
4572 
4573     mmap_lock();
4574 
4575     /*
4576      * We're mapping shared memory, so ensure we generate code for parallel
4577      * execution and flush old translations.  This will work up to the level
4578      * supported by the host -- anything that requires EXCP_ATOMIC will not
4579      * be atomic with respect to an external process.
4580      */
4581     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4582         cpu->tcg_cflags |= CF_PARALLEL;
4583         tb_flush(cpu);
4584     }
4585 
4586     if (shmaddr)
4587         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4588     else {
4589         abi_ulong mmap_start;
4590 
4591         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4592         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4593 
4594         if (mmap_start == -1) {
4595             errno = ENOMEM;
4596             host_raddr = (void *)-1;
4597         } else
4598             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4599                                shmflg | SHM_REMAP);
4600     }
4601 
4602     if (host_raddr == (void *)-1) {
4603         mmap_unlock();
4604         return get_errno((long)host_raddr);
4605     }
4606     raddr=h2g((unsigned long)host_raddr);
4607 
4608     page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4609                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4610                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4611 
4612     for (i = 0; i < N_SHM_REGIONS; i++) {
4613         if (!shm_regions[i].in_use) {
4614             shm_regions[i].in_use = true;
4615             shm_regions[i].start = raddr;
4616             shm_regions[i].size = shm_info.shm_segsz;
4617             break;
4618         }
4619     }
4620 
4621     mmap_unlock();
4622     return raddr;
4623 
4624 }
4625 
4626 static inline abi_long do_shmdt(abi_ulong shmaddr)
4627 {
4628     int i;
4629     abi_long rv;
4630 
4631     /* shmdt pointers are always untagged */
4632 
4633     mmap_lock();
4634 
4635     for (i = 0; i < N_SHM_REGIONS; ++i) {
4636         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4637             shm_regions[i].in_use = false;
4638             page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4639             break;
4640         }
4641     }
4642     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4643 
4644     mmap_unlock();
4645 
4646     return rv;
4647 }
4648 
4649 #ifdef TARGET_NR_ipc
4650 /* ??? This only works with linear mappings.  */
4651 /* do_ipc() must return target values and target errnos. */
4652 static abi_long do_ipc(CPUArchState *cpu_env,
4653                        unsigned int call, abi_long first,
4654                        abi_long second, abi_long third,
4655                        abi_long ptr, abi_long fifth)
4656 {
4657     int version;
4658     abi_long ret = 0;
4659 
4660     version = call >> 16;
4661     call &= 0xffff;
4662 
4663     switch (call) {
4664     case IPCOP_semop:
4665         ret = do_semtimedop(first, ptr, second, 0, false);
4666         break;
4667     case IPCOP_semtimedop:
4668     /*
4669      * The s390 sys_ipc variant has only five parameters instead of six
4670      * (as for default variant) and the only difference is the handling of
4671      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4672      * to a struct timespec where the generic variant uses fifth parameter.
4673      */
4674 #if defined(TARGET_S390X)
4675         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4676 #else
4677         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4678 #endif
4679         break;
4680 
4681     case IPCOP_semget:
4682         ret = get_errno(semget(first, second, third));
4683         break;
4684 
4685     case IPCOP_semctl: {
4686         /* The semun argument to semctl is passed by value, so dereference the
4687          * ptr argument. */
4688         abi_ulong atptr;
4689         get_user_ual(atptr, ptr);
4690         ret = do_semctl(first, second, third, atptr);
4691         break;
4692     }
4693 
4694     case IPCOP_msgget:
4695         ret = get_errno(msgget(first, second));
4696         break;
4697 
4698     case IPCOP_msgsnd:
4699         ret = do_msgsnd(first, ptr, second, third);
4700         break;
4701 
4702     case IPCOP_msgctl:
4703         ret = do_msgctl(first, second, ptr);
4704         break;
4705 
4706     case IPCOP_msgrcv:
4707         switch (version) {
4708         case 0:
4709             {
4710                 struct target_ipc_kludge {
4711                     abi_long msgp;
4712                     abi_long msgtyp;
4713                 } *tmp;
4714 
4715                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4716                     ret = -TARGET_EFAULT;
4717                     break;
4718                 }
4719 
4720                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4721 
4722                 unlock_user_struct(tmp, ptr, 0);
4723                 break;
4724             }
4725         default:
4726             ret = do_msgrcv(first, ptr, second, fifth, third);
4727         }
4728         break;
4729 
4730     case IPCOP_shmat:
4731         switch (version) {
4732         default:
4733         {
4734             abi_ulong raddr;
4735             raddr = do_shmat(cpu_env, first, ptr, second);
4736             if (is_error(raddr))
4737                 return get_errno(raddr);
4738             if (put_user_ual(raddr, third))
4739                 return -TARGET_EFAULT;
4740             break;
4741         }
4742         case 1:
4743             ret = -TARGET_EINVAL;
4744             break;
4745         }
4746 	break;
4747     case IPCOP_shmdt:
4748         ret = do_shmdt(ptr);
4749 	break;
4750 
4751     case IPCOP_shmget:
4752 	/* IPC_* flag values are the same on all linux platforms */
4753 	ret = get_errno(shmget(first, second, third));
4754 	break;
4755 
4756 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4757     case IPCOP_shmctl:
4758         ret = do_shmctl(first, second, ptr);
4759         break;
4760     default:
4761         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4762                       call, version);
4763 	ret = -TARGET_ENOSYS;
4764 	break;
4765     }
4766     return ret;
4767 }
4768 #endif
4769 
4770 /* kernel structure types definitions */
4771 
4772 #define STRUCT(name, ...) STRUCT_ ## name,
4773 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4774 enum {
4775 #include "syscall_types.h"
4776 STRUCT_MAX
4777 };
4778 #undef STRUCT
4779 #undef STRUCT_SPECIAL
4780 
4781 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4782 #define STRUCT_SPECIAL(name)
4783 #include "syscall_types.h"
4784 #undef STRUCT
4785 #undef STRUCT_SPECIAL
4786 
4787 #define MAX_STRUCT_SIZE 4096
4788 
4789 #ifdef CONFIG_FIEMAP
4790 /* So fiemap access checks don't overflow on 32 bit systems.
4791  * This is very slightly smaller than the limit imposed by
4792  * the underlying kernel.
4793  */
4794 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4795                             / sizeof(struct fiemap_extent))
4796 
4797 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4798                                        int fd, int cmd, abi_long arg)
4799 {
4800     /* The parameter for this ioctl is a struct fiemap followed
4801      * by an array of struct fiemap_extent whose size is set
4802      * in fiemap->fm_extent_count. The array is filled in by the
4803      * ioctl.
4804      */
4805     int target_size_in, target_size_out;
4806     struct fiemap *fm;
4807     const argtype *arg_type = ie->arg_type;
4808     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4809     void *argptr, *p;
4810     abi_long ret;
4811     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4812     uint32_t outbufsz;
4813     int free_fm = 0;
4814 
4815     assert(arg_type[0] == TYPE_PTR);
4816     assert(ie->access == IOC_RW);
4817     arg_type++;
4818     target_size_in = thunk_type_size(arg_type, 0);
4819     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4820     if (!argptr) {
4821         return -TARGET_EFAULT;
4822     }
4823     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4824     unlock_user(argptr, arg, 0);
4825     fm = (struct fiemap *)buf_temp;
4826     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4827         return -TARGET_EINVAL;
4828     }
4829 
4830     outbufsz = sizeof (*fm) +
4831         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4832 
4833     if (outbufsz > MAX_STRUCT_SIZE) {
4834         /* We can't fit all the extents into the fixed size buffer.
4835          * Allocate one that is large enough and use it instead.
4836          */
4837         fm = g_try_malloc(outbufsz);
4838         if (!fm) {
4839             return -TARGET_ENOMEM;
4840         }
4841         memcpy(fm, buf_temp, sizeof(struct fiemap));
4842         free_fm = 1;
4843     }
4844     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4845     if (!is_error(ret)) {
4846         target_size_out = target_size_in;
4847         /* An extent_count of 0 means we were only counting the extents
4848          * so there are no structs to copy
4849          */
4850         if (fm->fm_extent_count != 0) {
4851             target_size_out += fm->fm_mapped_extents * extent_size;
4852         }
4853         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4854         if (!argptr) {
4855             ret = -TARGET_EFAULT;
4856         } else {
4857             /* Convert the struct fiemap */
4858             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4859             if (fm->fm_extent_count != 0) {
4860                 p = argptr + target_size_in;
4861                 /* ...and then all the struct fiemap_extents */
4862                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4863                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4864                                   THUNK_TARGET);
4865                     p += extent_size;
4866                 }
4867             }
4868             unlock_user(argptr, arg, target_size_out);
4869         }
4870     }
4871     if (free_fm) {
4872         g_free(fm);
4873     }
4874     return ret;
4875 }
4876 #endif
4877 
4878 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4879                                 int fd, int cmd, abi_long arg)
4880 {
4881     const argtype *arg_type = ie->arg_type;
4882     int target_size;
4883     void *argptr;
4884     int ret;
4885     struct ifconf *host_ifconf;
4886     uint32_t outbufsz;
4887     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4888     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4889     int target_ifreq_size;
4890     int nb_ifreq;
4891     int free_buf = 0;
4892     int i;
4893     int target_ifc_len;
4894     abi_long target_ifc_buf;
4895     int host_ifc_len;
4896     char *host_ifc_buf;
4897 
4898     assert(arg_type[0] == TYPE_PTR);
4899     assert(ie->access == IOC_RW);
4900 
4901     arg_type++;
4902     target_size = thunk_type_size(arg_type, 0);
4903 
4904     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4905     if (!argptr)
4906         return -TARGET_EFAULT;
4907     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4908     unlock_user(argptr, arg, 0);
4909 
4910     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4911     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4912     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4913 
4914     if (target_ifc_buf != 0) {
4915         target_ifc_len = host_ifconf->ifc_len;
4916         nb_ifreq = target_ifc_len / target_ifreq_size;
4917         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4918 
4919         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4920         if (outbufsz > MAX_STRUCT_SIZE) {
4921             /*
4922              * We can't fit all the extents into the fixed size buffer.
4923              * Allocate one that is large enough and use it instead.
4924              */
4925             host_ifconf = g_try_malloc(outbufsz);
4926             if (!host_ifconf) {
4927                 return -TARGET_ENOMEM;
4928             }
4929             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4930             free_buf = 1;
4931         }
4932         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4933 
4934         host_ifconf->ifc_len = host_ifc_len;
4935     } else {
4936       host_ifc_buf = NULL;
4937     }
4938     host_ifconf->ifc_buf = host_ifc_buf;
4939 
4940     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4941     if (!is_error(ret)) {
4942 	/* convert host ifc_len to target ifc_len */
4943 
4944         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4945         target_ifc_len = nb_ifreq * target_ifreq_size;
4946         host_ifconf->ifc_len = target_ifc_len;
4947 
4948 	/* restore target ifc_buf */
4949 
4950         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4951 
4952 	/* copy struct ifconf to target user */
4953 
4954         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4955         if (!argptr)
4956             return -TARGET_EFAULT;
4957         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4958         unlock_user(argptr, arg, target_size);
4959 
4960         if (target_ifc_buf != 0) {
4961             /* copy ifreq[] to target user */
4962             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4963             for (i = 0; i < nb_ifreq ; i++) {
4964                 thunk_convert(argptr + i * target_ifreq_size,
4965                               host_ifc_buf + i * sizeof(struct ifreq),
4966                               ifreq_arg_type, THUNK_TARGET);
4967             }
4968             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4969         }
4970     }
4971 
4972     if (free_buf) {
4973         g_free(host_ifconf);
4974     }
4975 
4976     return ret;
4977 }
4978 
4979 #if defined(CONFIG_USBFS)
4980 #if HOST_LONG_BITS > 64
4981 #error USBDEVFS thunks do not support >64 bit hosts yet.
4982 #endif
4983 struct live_urb {
4984     uint64_t target_urb_adr;
4985     uint64_t target_buf_adr;
4986     char *target_buf_ptr;
4987     struct usbdevfs_urb host_urb;
4988 };
4989 
4990 static GHashTable *usbdevfs_urb_hashtable(void)
4991 {
4992     static GHashTable *urb_hashtable;
4993 
4994     if (!urb_hashtable) {
4995         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4996     }
4997     return urb_hashtable;
4998 }
4999 
5000 static void urb_hashtable_insert(struct live_urb *urb)
5001 {
5002     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5003     g_hash_table_insert(urb_hashtable, urb, urb);
5004 }
5005 
5006 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5007 {
5008     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5009     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5010 }
5011 
5012 static void urb_hashtable_remove(struct live_urb *urb)
5013 {
5014     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5015     g_hash_table_remove(urb_hashtable, urb);
5016 }
5017 
5018 static abi_long
5019 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5020                           int fd, int cmd, abi_long arg)
5021 {
5022     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5023     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5024     struct live_urb *lurb;
5025     void *argptr;
5026     uint64_t hurb;
5027     int target_size;
5028     uintptr_t target_urb_adr;
5029     abi_long ret;
5030 
5031     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5032 
5033     memset(buf_temp, 0, sizeof(uint64_t));
5034     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5035     if (is_error(ret)) {
5036         return ret;
5037     }
5038 
5039     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5040     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5041     if (!lurb->target_urb_adr) {
5042         return -TARGET_EFAULT;
5043     }
5044     urb_hashtable_remove(lurb);
5045     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5046         lurb->host_urb.buffer_length);
5047     lurb->target_buf_ptr = NULL;
5048 
5049     /* restore the guest buffer pointer */
5050     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5051 
5052     /* update the guest urb struct */
5053     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5054     if (!argptr) {
5055         g_free(lurb);
5056         return -TARGET_EFAULT;
5057     }
5058     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5059     unlock_user(argptr, lurb->target_urb_adr, target_size);
5060 
5061     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5062     /* write back the urb handle */
5063     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5064     if (!argptr) {
5065         g_free(lurb);
5066         return -TARGET_EFAULT;
5067     }
5068 
5069     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5070     target_urb_adr = lurb->target_urb_adr;
5071     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5072     unlock_user(argptr, arg, target_size);
5073 
5074     g_free(lurb);
5075     return ret;
5076 }
5077 
5078 static abi_long
5079 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5080                              uint8_t *buf_temp __attribute__((unused)),
5081                              int fd, int cmd, abi_long arg)
5082 {
5083     struct live_urb *lurb;
5084 
5085     /* map target address back to host URB with metadata. */
5086     lurb = urb_hashtable_lookup(arg);
5087     if (!lurb) {
5088         return -TARGET_EFAULT;
5089     }
5090     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5091 }
5092 
5093 static abi_long
5094 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5095                             int fd, int cmd, abi_long arg)
5096 {
5097     const argtype *arg_type = ie->arg_type;
5098     int target_size;
5099     abi_long ret;
5100     void *argptr;
5101     int rw_dir;
5102     struct live_urb *lurb;
5103 
5104     /*
5105      * each submitted URB needs to map to a unique ID for the
5106      * kernel, and that unique ID needs to be a pointer to
5107      * host memory.  hence, we need to malloc for each URB.
5108      * isochronous transfers have a variable length struct.
5109      */
5110     arg_type++;
5111     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5112 
5113     /* construct host copy of urb and metadata */
5114     lurb = g_try_new0(struct live_urb, 1);
5115     if (!lurb) {
5116         return -TARGET_ENOMEM;
5117     }
5118 
5119     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5120     if (!argptr) {
5121         g_free(lurb);
5122         return -TARGET_EFAULT;
5123     }
5124     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5125     unlock_user(argptr, arg, 0);
5126 
5127     lurb->target_urb_adr = arg;
5128     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5129 
5130     /* buffer space used depends on endpoint type so lock the entire buffer */
5131     /* control type urbs should check the buffer contents for true direction */
5132     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5133     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5134         lurb->host_urb.buffer_length, 1);
5135     if (lurb->target_buf_ptr == NULL) {
5136         g_free(lurb);
5137         return -TARGET_EFAULT;
5138     }
5139 
5140     /* update buffer pointer in host copy */
5141     lurb->host_urb.buffer = lurb->target_buf_ptr;
5142 
5143     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5144     if (is_error(ret)) {
5145         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5146         g_free(lurb);
5147     } else {
5148         urb_hashtable_insert(lurb);
5149     }
5150 
5151     return ret;
5152 }
5153 #endif /* CONFIG_USBFS */
5154 
5155 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5156                             int cmd, abi_long arg)
5157 {
5158     void *argptr;
5159     struct dm_ioctl *host_dm;
5160     abi_long guest_data;
5161     uint32_t guest_data_size;
5162     int target_size;
5163     const argtype *arg_type = ie->arg_type;
5164     abi_long ret;
5165     void *big_buf = NULL;
5166     char *host_data;
5167 
5168     arg_type++;
5169     target_size = thunk_type_size(arg_type, 0);
5170     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5171     if (!argptr) {
5172         ret = -TARGET_EFAULT;
5173         goto out;
5174     }
5175     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5176     unlock_user(argptr, arg, 0);
5177 
5178     /* buf_temp is too small, so fetch things into a bigger buffer */
5179     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5180     memcpy(big_buf, buf_temp, target_size);
5181     buf_temp = big_buf;
5182     host_dm = big_buf;
5183 
5184     guest_data = arg + host_dm->data_start;
5185     if ((guest_data - arg) < 0) {
5186         ret = -TARGET_EINVAL;
5187         goto out;
5188     }
5189     guest_data_size = host_dm->data_size - host_dm->data_start;
5190     host_data = (char*)host_dm + host_dm->data_start;
5191 
5192     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5193     if (!argptr) {
5194         ret = -TARGET_EFAULT;
5195         goto out;
5196     }
5197 
5198     switch (ie->host_cmd) {
5199     case DM_REMOVE_ALL:
5200     case DM_LIST_DEVICES:
5201     case DM_DEV_CREATE:
5202     case DM_DEV_REMOVE:
5203     case DM_DEV_SUSPEND:
5204     case DM_DEV_STATUS:
5205     case DM_DEV_WAIT:
5206     case DM_TABLE_STATUS:
5207     case DM_TABLE_CLEAR:
5208     case DM_TABLE_DEPS:
5209     case DM_LIST_VERSIONS:
5210         /* no input data */
5211         break;
5212     case DM_DEV_RENAME:
5213     case DM_DEV_SET_GEOMETRY:
5214         /* data contains only strings */
5215         memcpy(host_data, argptr, guest_data_size);
5216         break;
5217     case DM_TARGET_MSG:
5218         memcpy(host_data, argptr, guest_data_size);
5219         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5220         break;
5221     case DM_TABLE_LOAD:
5222     {
5223         void *gspec = argptr;
5224         void *cur_data = host_data;
5225         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5226         int spec_size = thunk_type_size(arg_type, 0);
5227         int i;
5228 
5229         for (i = 0; i < host_dm->target_count; i++) {
5230             struct dm_target_spec *spec = cur_data;
5231             uint32_t next;
5232             int slen;
5233 
5234             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5235             slen = strlen((char*)gspec + spec_size) + 1;
5236             next = spec->next;
5237             spec->next = sizeof(*spec) + slen;
5238             strcpy((char*)&spec[1], gspec + spec_size);
5239             gspec += next;
5240             cur_data += spec->next;
5241         }
5242         break;
5243     }
5244     default:
5245         ret = -TARGET_EINVAL;
5246         unlock_user(argptr, guest_data, 0);
5247         goto out;
5248     }
5249     unlock_user(argptr, guest_data, 0);
5250 
5251     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5252     if (!is_error(ret)) {
5253         guest_data = arg + host_dm->data_start;
5254         guest_data_size = host_dm->data_size - host_dm->data_start;
5255         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5256         switch (ie->host_cmd) {
5257         case DM_REMOVE_ALL:
5258         case DM_DEV_CREATE:
5259         case DM_DEV_REMOVE:
5260         case DM_DEV_RENAME:
5261         case DM_DEV_SUSPEND:
5262         case DM_DEV_STATUS:
5263         case DM_TABLE_LOAD:
5264         case DM_TABLE_CLEAR:
5265         case DM_TARGET_MSG:
5266         case DM_DEV_SET_GEOMETRY:
5267             /* no return data */
5268             break;
5269         case DM_LIST_DEVICES:
5270         {
5271             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5272             uint32_t remaining_data = guest_data_size;
5273             void *cur_data = argptr;
5274             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5275             int nl_size = 12; /* can't use thunk_size due to alignment */
5276 
5277             while (1) {
5278                 uint32_t next = nl->next;
5279                 if (next) {
5280                     nl->next = nl_size + (strlen(nl->name) + 1);
5281                 }
5282                 if (remaining_data < nl->next) {
5283                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5284                     break;
5285                 }
5286                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5287                 strcpy(cur_data + nl_size, nl->name);
5288                 cur_data += nl->next;
5289                 remaining_data -= nl->next;
5290                 if (!next) {
5291                     break;
5292                 }
5293                 nl = (void*)nl + next;
5294             }
5295             break;
5296         }
5297         case DM_DEV_WAIT:
5298         case DM_TABLE_STATUS:
5299         {
5300             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5301             void *cur_data = argptr;
5302             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5303             int spec_size = thunk_type_size(arg_type, 0);
5304             int i;
5305 
5306             for (i = 0; i < host_dm->target_count; i++) {
5307                 uint32_t next = spec->next;
5308                 int slen = strlen((char*)&spec[1]) + 1;
5309                 spec->next = (cur_data - argptr) + spec_size + slen;
5310                 if (guest_data_size < spec->next) {
5311                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5312                     break;
5313                 }
5314                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5315                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5316                 cur_data = argptr + spec->next;
5317                 spec = (void*)host_dm + host_dm->data_start + next;
5318             }
5319             break;
5320         }
5321         case DM_TABLE_DEPS:
5322         {
5323             void *hdata = (void*)host_dm + host_dm->data_start;
5324             int count = *(uint32_t*)hdata;
5325             uint64_t *hdev = hdata + 8;
5326             uint64_t *gdev = argptr + 8;
5327             int i;
5328 
5329             *(uint32_t*)argptr = tswap32(count);
5330             for (i = 0; i < count; i++) {
5331                 *gdev = tswap64(*hdev);
5332                 gdev++;
5333                 hdev++;
5334             }
5335             break;
5336         }
5337         case DM_LIST_VERSIONS:
5338         {
5339             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5340             uint32_t remaining_data = guest_data_size;
5341             void *cur_data = argptr;
5342             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5343             int vers_size = thunk_type_size(arg_type, 0);
5344 
5345             while (1) {
5346                 uint32_t next = vers->next;
5347                 if (next) {
5348                     vers->next = vers_size + (strlen(vers->name) + 1);
5349                 }
5350                 if (remaining_data < vers->next) {
5351                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5352                     break;
5353                 }
5354                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5355                 strcpy(cur_data + vers_size, vers->name);
5356                 cur_data += vers->next;
5357                 remaining_data -= vers->next;
5358                 if (!next) {
5359                     break;
5360                 }
5361                 vers = (void*)vers + next;
5362             }
5363             break;
5364         }
5365         default:
5366             unlock_user(argptr, guest_data, 0);
5367             ret = -TARGET_EINVAL;
5368             goto out;
5369         }
5370         unlock_user(argptr, guest_data, guest_data_size);
5371 
5372         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5373         if (!argptr) {
5374             ret = -TARGET_EFAULT;
5375             goto out;
5376         }
5377         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5378         unlock_user(argptr, arg, target_size);
5379     }
5380 out:
5381     g_free(big_buf);
5382     return ret;
5383 }
5384 
5385 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5386                                int cmd, abi_long arg)
5387 {
5388     void *argptr;
5389     int target_size;
5390     const argtype *arg_type = ie->arg_type;
5391     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5392     abi_long ret;
5393 
5394     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5395     struct blkpg_partition host_part;
5396 
5397     /* Read and convert blkpg */
5398     arg_type++;
5399     target_size = thunk_type_size(arg_type, 0);
5400     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5401     if (!argptr) {
5402         ret = -TARGET_EFAULT;
5403         goto out;
5404     }
5405     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5406     unlock_user(argptr, arg, 0);
5407 
5408     switch (host_blkpg->op) {
5409     case BLKPG_ADD_PARTITION:
5410     case BLKPG_DEL_PARTITION:
5411         /* payload is struct blkpg_partition */
5412         break;
5413     default:
5414         /* Unknown opcode */
5415         ret = -TARGET_EINVAL;
5416         goto out;
5417     }
5418 
5419     /* Read and convert blkpg->data */
5420     arg = (abi_long)(uintptr_t)host_blkpg->data;
5421     target_size = thunk_type_size(part_arg_type, 0);
5422     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5423     if (!argptr) {
5424         ret = -TARGET_EFAULT;
5425         goto out;
5426     }
5427     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5428     unlock_user(argptr, arg, 0);
5429 
5430     /* Swizzle the data pointer to our local copy and call! */
5431     host_blkpg->data = &host_part;
5432     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5433 
5434 out:
5435     return ret;
5436 }
5437 
5438 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5439                                 int fd, int cmd, abi_long arg)
5440 {
5441     const argtype *arg_type = ie->arg_type;
5442     const StructEntry *se;
5443     const argtype *field_types;
5444     const int *dst_offsets, *src_offsets;
5445     int target_size;
5446     void *argptr;
5447     abi_ulong *target_rt_dev_ptr = NULL;
5448     unsigned long *host_rt_dev_ptr = NULL;
5449     abi_long ret;
5450     int i;
5451 
5452     assert(ie->access == IOC_W);
5453     assert(*arg_type == TYPE_PTR);
5454     arg_type++;
5455     assert(*arg_type == TYPE_STRUCT);
5456     target_size = thunk_type_size(arg_type, 0);
5457     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5458     if (!argptr) {
5459         return -TARGET_EFAULT;
5460     }
5461     arg_type++;
5462     assert(*arg_type == (int)STRUCT_rtentry);
5463     se = struct_entries + *arg_type++;
5464     assert(se->convert[0] == NULL);
5465     /* convert struct here to be able to catch rt_dev string */
5466     field_types = se->field_types;
5467     dst_offsets = se->field_offsets[THUNK_HOST];
5468     src_offsets = se->field_offsets[THUNK_TARGET];
5469     for (i = 0; i < se->nb_fields; i++) {
5470         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5471             assert(*field_types == TYPE_PTRVOID);
5472             target_rt_dev_ptr = argptr + src_offsets[i];
5473             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5474             if (*target_rt_dev_ptr != 0) {
5475                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5476                                                   tswapal(*target_rt_dev_ptr));
5477                 if (!*host_rt_dev_ptr) {
5478                     unlock_user(argptr, arg, 0);
5479                     return -TARGET_EFAULT;
5480                 }
5481             } else {
5482                 *host_rt_dev_ptr = 0;
5483             }
5484             field_types++;
5485             continue;
5486         }
5487         field_types = thunk_convert(buf_temp + dst_offsets[i],
5488                                     argptr + src_offsets[i],
5489                                     field_types, THUNK_HOST);
5490     }
5491     unlock_user(argptr, arg, 0);
5492 
5493     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5494 
5495     assert(host_rt_dev_ptr != NULL);
5496     assert(target_rt_dev_ptr != NULL);
5497     if (*host_rt_dev_ptr != 0) {
5498         unlock_user((void *)*host_rt_dev_ptr,
5499                     *target_rt_dev_ptr, 0);
5500     }
5501     return ret;
5502 }
5503 
5504 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5505                                      int fd, int cmd, abi_long arg)
5506 {
5507     int sig = target_to_host_signal(arg);
5508     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5509 }
5510 
5511 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5512                                     int fd, int cmd, abi_long arg)
5513 {
5514     struct timeval tv;
5515     abi_long ret;
5516 
5517     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5518     if (is_error(ret)) {
5519         return ret;
5520     }
5521 
5522     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5523         if (copy_to_user_timeval(arg, &tv)) {
5524             return -TARGET_EFAULT;
5525         }
5526     } else {
5527         if (copy_to_user_timeval64(arg, &tv)) {
5528             return -TARGET_EFAULT;
5529         }
5530     }
5531 
5532     return ret;
5533 }
5534 
5535 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5536                                       int fd, int cmd, abi_long arg)
5537 {
5538     struct timespec ts;
5539     abi_long ret;
5540 
5541     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5542     if (is_error(ret)) {
5543         return ret;
5544     }
5545 
5546     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5547         if (host_to_target_timespec(arg, &ts)) {
5548             return -TARGET_EFAULT;
5549         }
5550     } else{
5551         if (host_to_target_timespec64(arg, &ts)) {
5552             return -TARGET_EFAULT;
5553         }
5554     }
5555 
5556     return ret;
5557 }
5558 
5559 #ifdef TIOCGPTPEER
5560 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5561                                      int fd, int cmd, abi_long arg)
5562 {
5563     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5564     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5565 }
5566 #endif
5567 
5568 #ifdef HAVE_DRM_H
5569 
5570 static void unlock_drm_version(struct drm_version *host_ver,
5571                                struct target_drm_version *target_ver,
5572                                bool copy)
5573 {
5574     unlock_user(host_ver->name, target_ver->name,
5575                                 copy ? host_ver->name_len : 0);
5576     unlock_user(host_ver->date, target_ver->date,
5577                                 copy ? host_ver->date_len : 0);
5578     unlock_user(host_ver->desc, target_ver->desc,
5579                                 copy ? host_ver->desc_len : 0);
5580 }
5581 
5582 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5583                                           struct target_drm_version *target_ver)
5584 {
5585     memset(host_ver, 0, sizeof(*host_ver));
5586 
5587     __get_user(host_ver->name_len, &target_ver->name_len);
5588     if (host_ver->name_len) {
5589         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5590                                    target_ver->name_len, 0);
5591         if (!host_ver->name) {
5592             return -EFAULT;
5593         }
5594     }
5595 
5596     __get_user(host_ver->date_len, &target_ver->date_len);
5597     if (host_ver->date_len) {
5598         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5599                                    target_ver->date_len, 0);
5600         if (!host_ver->date) {
5601             goto err;
5602         }
5603     }
5604 
5605     __get_user(host_ver->desc_len, &target_ver->desc_len);
5606     if (host_ver->desc_len) {
5607         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5608                                    target_ver->desc_len, 0);
5609         if (!host_ver->desc) {
5610             goto err;
5611         }
5612     }
5613 
5614     return 0;
5615 err:
5616     unlock_drm_version(host_ver, target_ver, false);
5617     return -EFAULT;
5618 }
5619 
5620 static inline void host_to_target_drmversion(
5621                                           struct target_drm_version *target_ver,
5622                                           struct drm_version *host_ver)
5623 {
5624     __put_user(host_ver->version_major, &target_ver->version_major);
5625     __put_user(host_ver->version_minor, &target_ver->version_minor);
5626     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5627     __put_user(host_ver->name_len, &target_ver->name_len);
5628     __put_user(host_ver->date_len, &target_ver->date_len);
5629     __put_user(host_ver->desc_len, &target_ver->desc_len);
5630     unlock_drm_version(host_ver, target_ver, true);
5631 }
5632 
5633 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5634                              int fd, int cmd, abi_long arg)
5635 {
5636     struct drm_version *ver;
5637     struct target_drm_version *target_ver;
5638     abi_long ret;
5639 
5640     switch (ie->host_cmd) {
5641     case DRM_IOCTL_VERSION:
5642         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5643             return -TARGET_EFAULT;
5644         }
5645         ver = (struct drm_version *)buf_temp;
5646         ret = target_to_host_drmversion(ver, target_ver);
5647         if (!is_error(ret)) {
5648             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5649             if (is_error(ret)) {
5650                 unlock_drm_version(ver, target_ver, false);
5651             } else {
5652                 host_to_target_drmversion(target_ver, ver);
5653             }
5654         }
5655         unlock_user_struct(target_ver, arg, 0);
5656         return ret;
5657     }
5658     return -TARGET_ENOSYS;
5659 }
5660 
5661 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5662                                            struct drm_i915_getparam *gparam,
5663                                            int fd, abi_long arg)
5664 {
5665     abi_long ret;
5666     int value;
5667     struct target_drm_i915_getparam *target_gparam;
5668 
5669     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5670         return -TARGET_EFAULT;
5671     }
5672 
5673     __get_user(gparam->param, &target_gparam->param);
5674     gparam->value = &value;
5675     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5676     put_user_s32(value, target_gparam->value);
5677 
5678     unlock_user_struct(target_gparam, arg, 0);
5679     return ret;
5680 }
5681 
5682 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5683                                   int fd, int cmd, abi_long arg)
5684 {
5685     switch (ie->host_cmd) {
5686     case DRM_IOCTL_I915_GETPARAM:
5687         return do_ioctl_drm_i915_getparam(ie,
5688                                           (struct drm_i915_getparam *)buf_temp,
5689                                           fd, arg);
5690     default:
5691         return -TARGET_ENOSYS;
5692     }
5693 }
5694 
5695 #endif
5696 
5697 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5698                                         int fd, int cmd, abi_long arg)
5699 {
5700     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5701     struct tun_filter *target_filter;
5702     char *target_addr;
5703 
5704     assert(ie->access == IOC_W);
5705 
5706     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5707     if (!target_filter) {
5708         return -TARGET_EFAULT;
5709     }
5710     filter->flags = tswap16(target_filter->flags);
5711     filter->count = tswap16(target_filter->count);
5712     unlock_user(target_filter, arg, 0);
5713 
5714     if (filter->count) {
5715         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5716             MAX_STRUCT_SIZE) {
5717             return -TARGET_EFAULT;
5718         }
5719 
5720         target_addr = lock_user(VERIFY_READ,
5721                                 arg + offsetof(struct tun_filter, addr),
5722                                 filter->count * ETH_ALEN, 1);
5723         if (!target_addr) {
5724             return -TARGET_EFAULT;
5725         }
5726         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5727         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5728     }
5729 
5730     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5731 }
5732 
5733 IOCTLEntry ioctl_entries[] = {
5734 #define IOCTL(cmd, access, ...) \
5735     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5736 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5737     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5738 #define IOCTL_IGNORE(cmd) \
5739     { TARGET_ ## cmd, 0, #cmd },
5740 #include "ioctls.h"
5741     { 0, 0, },
5742 };
5743 
5744 /* ??? Implement proper locking for ioctls.  */
5745 /* do_ioctl() Must return target values and target errnos. */
5746 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5747 {
5748     const IOCTLEntry *ie;
5749     const argtype *arg_type;
5750     abi_long ret;
5751     uint8_t buf_temp[MAX_STRUCT_SIZE];
5752     int target_size;
5753     void *argptr;
5754 
5755     ie = ioctl_entries;
5756     for(;;) {
5757         if (ie->target_cmd == 0) {
5758             qemu_log_mask(
5759                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5760             return -TARGET_ENOTTY;
5761         }
5762         if (ie->target_cmd == cmd)
5763             break;
5764         ie++;
5765     }
5766     arg_type = ie->arg_type;
5767     if (ie->do_ioctl) {
5768         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5769     } else if (!ie->host_cmd) {
5770         /* Some architectures define BSD ioctls in their headers
5771            that are not implemented in Linux.  */
5772         return -TARGET_ENOTTY;
5773     }
5774 
5775     switch(arg_type[0]) {
5776     case TYPE_NULL:
5777         /* no argument */
5778         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5779         break;
5780     case TYPE_PTRVOID:
5781     case TYPE_INT:
5782     case TYPE_LONG:
5783     case TYPE_ULONG:
5784         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5785         break;
5786     case TYPE_PTR:
5787         arg_type++;
5788         target_size = thunk_type_size(arg_type, 0);
5789         switch(ie->access) {
5790         case IOC_R:
5791             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5792             if (!is_error(ret)) {
5793                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5794                 if (!argptr)
5795                     return -TARGET_EFAULT;
5796                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5797                 unlock_user(argptr, arg, target_size);
5798             }
5799             break;
5800         case IOC_W:
5801             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5802             if (!argptr)
5803                 return -TARGET_EFAULT;
5804             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5805             unlock_user(argptr, arg, 0);
5806             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5807             break;
5808         default:
5809         case IOC_RW:
5810             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5811             if (!argptr)
5812                 return -TARGET_EFAULT;
5813             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5814             unlock_user(argptr, arg, 0);
5815             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5816             if (!is_error(ret)) {
5817                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5818                 if (!argptr)
5819                     return -TARGET_EFAULT;
5820                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5821                 unlock_user(argptr, arg, target_size);
5822             }
5823             break;
5824         }
5825         break;
5826     default:
5827         qemu_log_mask(LOG_UNIMP,
5828                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5829                       (long)cmd, arg_type[0]);
5830         ret = -TARGET_ENOTTY;
5831         break;
5832     }
5833     return ret;
5834 }
5835 
5836 static const bitmask_transtbl iflag_tbl[] = {
5837         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5838         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5839         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5840         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5841         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5842         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5843         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5844         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5845         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5846         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5847         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5848         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5849         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5850         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5851         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5852         { 0, 0, 0, 0 }
5853 };
5854 
5855 static const bitmask_transtbl oflag_tbl[] = {
5856 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5857 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5858 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5859 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5860 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5861 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5862 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5863 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5864 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5865 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5866 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5867 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5868 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5869 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5870 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5871 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5872 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5873 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5874 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5875 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5876 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5877 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5878 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5879 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5880 	{ 0, 0, 0, 0 }
5881 };
5882 
5883 static const bitmask_transtbl cflag_tbl[] = {
5884 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5885 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5886 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5887 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5888 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5889 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5890 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5891 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5892 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5893 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5894 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5895 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5896 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5897 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5898 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5899 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5900 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5901 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5902 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5903 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5904 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5905 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5906 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5907 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5908 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5909 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5910 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5911 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5912 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5913 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5914 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5915 	{ 0, 0, 0, 0 }
5916 };
5917 
5918 static const bitmask_transtbl lflag_tbl[] = {
5919   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5920   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5921   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5922   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5923   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5924   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5925   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5926   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5927   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5928   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5929   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5930   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5931   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5932   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5933   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5934   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5935   { 0, 0, 0, 0 }
5936 };
5937 
5938 static void target_to_host_termios (void *dst, const void *src)
5939 {
5940     struct host_termios *host = dst;
5941     const struct target_termios *target = src;
5942 
5943     host->c_iflag =
5944         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5945     host->c_oflag =
5946         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5947     host->c_cflag =
5948         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5949     host->c_lflag =
5950         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5951     host->c_line = target->c_line;
5952 
5953     memset(host->c_cc, 0, sizeof(host->c_cc));
5954     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5955     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5956     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5957     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5958     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5959     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5960     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5961     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5962     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5963     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5964     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5965     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5966     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5967     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5968     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5969     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5970     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5971 }
5972 
5973 static void host_to_target_termios (void *dst, const void *src)
5974 {
5975     struct target_termios *target = dst;
5976     const struct host_termios *host = src;
5977 
5978     target->c_iflag =
5979         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5980     target->c_oflag =
5981         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5982     target->c_cflag =
5983         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5984     target->c_lflag =
5985         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5986     target->c_line = host->c_line;
5987 
5988     memset(target->c_cc, 0, sizeof(target->c_cc));
5989     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5990     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5991     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5992     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5993     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5994     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5995     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5996     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5997     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5998     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5999     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6000     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6001     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6002     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6003     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6004     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6005     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6006 }
6007 
6008 static const StructEntry struct_termios_def = {
6009     .convert = { host_to_target_termios, target_to_host_termios },
6010     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6011     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6012     .print = print_termios,
6013 };
6014 
6015 static const bitmask_transtbl mmap_flags_tbl[] = {
6016     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6017     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6018     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6019     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6020       MAP_ANONYMOUS, MAP_ANONYMOUS },
6021     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6022       MAP_GROWSDOWN, MAP_GROWSDOWN },
6023     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6024       MAP_DENYWRITE, MAP_DENYWRITE },
6025     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6026       MAP_EXECUTABLE, MAP_EXECUTABLE },
6027     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6028     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6029       MAP_NORESERVE, MAP_NORESERVE },
6030     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6031     /* MAP_STACK had been ignored by the kernel for quite some time.
6032        Recognize it for the target insofar as we do not want to pass
6033        it through to the host.  */
6034     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6035     { 0, 0, 0, 0 }
6036 };
6037 
6038 /*
6039  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6040  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6041  */
6042 #if defined(TARGET_I386)
6043 
6044 /* NOTE: there is really one LDT for all the threads */
6045 static uint8_t *ldt_table;
6046 
6047 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6048 {
6049     int size;
6050     void *p;
6051 
6052     if (!ldt_table)
6053         return 0;
6054     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6055     if (size > bytecount)
6056         size = bytecount;
6057     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6058     if (!p)
6059         return -TARGET_EFAULT;
6060     /* ??? Should this by byteswapped?  */
6061     memcpy(p, ldt_table, size);
6062     unlock_user(p, ptr, size);
6063     return size;
6064 }
6065 
6066 /* XXX: add locking support */
6067 static abi_long write_ldt(CPUX86State *env,
6068                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6069 {
6070     struct target_modify_ldt_ldt_s ldt_info;
6071     struct target_modify_ldt_ldt_s *target_ldt_info;
6072     int seg_32bit, contents, read_exec_only, limit_in_pages;
6073     int seg_not_present, useable, lm;
6074     uint32_t *lp, entry_1, entry_2;
6075 
6076     if (bytecount != sizeof(ldt_info))
6077         return -TARGET_EINVAL;
6078     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6079         return -TARGET_EFAULT;
6080     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6081     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6082     ldt_info.limit = tswap32(target_ldt_info->limit);
6083     ldt_info.flags = tswap32(target_ldt_info->flags);
6084     unlock_user_struct(target_ldt_info, ptr, 0);
6085 
6086     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6087         return -TARGET_EINVAL;
6088     seg_32bit = ldt_info.flags & 1;
6089     contents = (ldt_info.flags >> 1) & 3;
6090     read_exec_only = (ldt_info.flags >> 3) & 1;
6091     limit_in_pages = (ldt_info.flags >> 4) & 1;
6092     seg_not_present = (ldt_info.flags >> 5) & 1;
6093     useable = (ldt_info.flags >> 6) & 1;
6094 #ifdef TARGET_ABI32
6095     lm = 0;
6096 #else
6097     lm = (ldt_info.flags >> 7) & 1;
6098 #endif
6099     if (contents == 3) {
6100         if (oldmode)
6101             return -TARGET_EINVAL;
6102         if (seg_not_present == 0)
6103             return -TARGET_EINVAL;
6104     }
6105     /* allocate the LDT */
6106     if (!ldt_table) {
6107         env->ldt.base = target_mmap(0,
6108                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6109                                     PROT_READ|PROT_WRITE,
6110                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6111         if (env->ldt.base == -1)
6112             return -TARGET_ENOMEM;
6113         memset(g2h_untagged(env->ldt.base), 0,
6114                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6115         env->ldt.limit = 0xffff;
6116         ldt_table = g2h_untagged(env->ldt.base);
6117     }
6118 
6119     /* NOTE: same code as Linux kernel */
6120     /* Allow LDTs to be cleared by the user. */
6121     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6122         if (oldmode ||
6123             (contents == 0		&&
6124              read_exec_only == 1	&&
6125              seg_32bit == 0		&&
6126              limit_in_pages == 0	&&
6127              seg_not_present == 1	&&
6128              useable == 0 )) {
6129             entry_1 = 0;
6130             entry_2 = 0;
6131             goto install;
6132         }
6133     }
6134 
6135     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6136         (ldt_info.limit & 0x0ffff);
6137     entry_2 = (ldt_info.base_addr & 0xff000000) |
6138         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6139         (ldt_info.limit & 0xf0000) |
6140         ((read_exec_only ^ 1) << 9) |
6141         (contents << 10) |
6142         ((seg_not_present ^ 1) << 15) |
6143         (seg_32bit << 22) |
6144         (limit_in_pages << 23) |
6145         (lm << 21) |
6146         0x7000;
6147     if (!oldmode)
6148         entry_2 |= (useable << 20);
6149 
6150     /* Install the new entry ...  */
6151 install:
6152     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6153     lp[0] = tswap32(entry_1);
6154     lp[1] = tswap32(entry_2);
6155     return 0;
6156 }
6157 
6158 /* specific and weird i386 syscalls */
6159 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6160                               unsigned long bytecount)
6161 {
6162     abi_long ret;
6163 
6164     switch (func) {
6165     case 0:
6166         ret = read_ldt(ptr, bytecount);
6167         break;
6168     case 1:
6169         ret = write_ldt(env, ptr, bytecount, 1);
6170         break;
6171     case 0x11:
6172         ret = write_ldt(env, ptr, bytecount, 0);
6173         break;
6174     default:
6175         ret = -TARGET_ENOSYS;
6176         break;
6177     }
6178     return ret;
6179 }
6180 
6181 #if defined(TARGET_ABI32)
6182 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6183 {
6184     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6185     struct target_modify_ldt_ldt_s ldt_info;
6186     struct target_modify_ldt_ldt_s *target_ldt_info;
6187     int seg_32bit, contents, read_exec_only, limit_in_pages;
6188     int seg_not_present, useable, lm;
6189     uint32_t *lp, entry_1, entry_2;
6190     int i;
6191 
6192     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6193     if (!target_ldt_info)
6194         return -TARGET_EFAULT;
6195     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6196     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6197     ldt_info.limit = tswap32(target_ldt_info->limit);
6198     ldt_info.flags = tswap32(target_ldt_info->flags);
6199     if (ldt_info.entry_number == -1) {
6200         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6201             if (gdt_table[i] == 0) {
6202                 ldt_info.entry_number = i;
6203                 target_ldt_info->entry_number = tswap32(i);
6204                 break;
6205             }
6206         }
6207     }
6208     unlock_user_struct(target_ldt_info, ptr, 1);
6209 
6210     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6211         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6212            return -TARGET_EINVAL;
6213     seg_32bit = ldt_info.flags & 1;
6214     contents = (ldt_info.flags >> 1) & 3;
6215     read_exec_only = (ldt_info.flags >> 3) & 1;
6216     limit_in_pages = (ldt_info.flags >> 4) & 1;
6217     seg_not_present = (ldt_info.flags >> 5) & 1;
6218     useable = (ldt_info.flags >> 6) & 1;
6219 #ifdef TARGET_ABI32
6220     lm = 0;
6221 #else
6222     lm = (ldt_info.flags >> 7) & 1;
6223 #endif
6224 
6225     if (contents == 3) {
6226         if (seg_not_present == 0)
6227             return -TARGET_EINVAL;
6228     }
6229 
6230     /* NOTE: same code as Linux kernel */
6231     /* Allow LDTs to be cleared by the user. */
6232     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6233         if ((contents == 0             &&
6234              read_exec_only == 1       &&
6235              seg_32bit == 0            &&
6236              limit_in_pages == 0       &&
6237              seg_not_present == 1      &&
6238              useable == 0 )) {
6239             entry_1 = 0;
6240             entry_2 = 0;
6241             goto install;
6242         }
6243     }
6244 
6245     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6246         (ldt_info.limit & 0x0ffff);
6247     entry_2 = (ldt_info.base_addr & 0xff000000) |
6248         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6249         (ldt_info.limit & 0xf0000) |
6250         ((read_exec_only ^ 1) << 9) |
6251         (contents << 10) |
6252         ((seg_not_present ^ 1) << 15) |
6253         (seg_32bit << 22) |
6254         (limit_in_pages << 23) |
6255         (useable << 20) |
6256         (lm << 21) |
6257         0x7000;
6258 
6259     /* Install the new entry ...  */
6260 install:
6261     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6262     lp[0] = tswap32(entry_1);
6263     lp[1] = tswap32(entry_2);
6264     return 0;
6265 }
6266 
6267 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6268 {
6269     struct target_modify_ldt_ldt_s *target_ldt_info;
6270     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6271     uint32_t base_addr, limit, flags;
6272     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6273     int seg_not_present, useable, lm;
6274     uint32_t *lp, entry_1, entry_2;
6275 
6276     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6277     if (!target_ldt_info)
6278         return -TARGET_EFAULT;
6279     idx = tswap32(target_ldt_info->entry_number);
6280     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6281         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6282         unlock_user_struct(target_ldt_info, ptr, 1);
6283         return -TARGET_EINVAL;
6284     }
6285     lp = (uint32_t *)(gdt_table + idx);
6286     entry_1 = tswap32(lp[0]);
6287     entry_2 = tswap32(lp[1]);
6288 
6289     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6290     contents = (entry_2 >> 10) & 3;
6291     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6292     seg_32bit = (entry_2 >> 22) & 1;
6293     limit_in_pages = (entry_2 >> 23) & 1;
6294     useable = (entry_2 >> 20) & 1;
6295 #ifdef TARGET_ABI32
6296     lm = 0;
6297 #else
6298     lm = (entry_2 >> 21) & 1;
6299 #endif
6300     flags = (seg_32bit << 0) | (contents << 1) |
6301         (read_exec_only << 3) | (limit_in_pages << 4) |
6302         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6303     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6304     base_addr = (entry_1 >> 16) |
6305         (entry_2 & 0xff000000) |
6306         ((entry_2 & 0xff) << 16);
6307     target_ldt_info->base_addr = tswapal(base_addr);
6308     target_ldt_info->limit = tswap32(limit);
6309     target_ldt_info->flags = tswap32(flags);
6310     unlock_user_struct(target_ldt_info, ptr, 1);
6311     return 0;
6312 }
6313 
6314 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6315 {
6316     return -TARGET_ENOSYS;
6317 }
6318 #else
6319 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6320 {
6321     abi_long ret = 0;
6322     abi_ulong val;
6323     int idx;
6324 
6325     switch(code) {
6326     case TARGET_ARCH_SET_GS:
6327     case TARGET_ARCH_SET_FS:
6328         if (code == TARGET_ARCH_SET_GS)
6329             idx = R_GS;
6330         else
6331             idx = R_FS;
6332         cpu_x86_load_seg(env, idx, 0);
6333         env->segs[idx].base = addr;
6334         break;
6335     case TARGET_ARCH_GET_GS:
6336     case TARGET_ARCH_GET_FS:
6337         if (code == TARGET_ARCH_GET_GS)
6338             idx = R_GS;
6339         else
6340             idx = R_FS;
6341         val = env->segs[idx].base;
6342         if (put_user(val, addr, abi_ulong))
6343             ret = -TARGET_EFAULT;
6344         break;
6345     default:
6346         ret = -TARGET_EINVAL;
6347         break;
6348     }
6349     return ret;
6350 }
6351 #endif /* defined(TARGET_ABI32 */
6352 #endif /* defined(TARGET_I386) */
6353 
6354 /*
6355  * These constants are generic.  Supply any that are missing from the host.
6356  */
6357 #ifndef PR_SET_NAME
6358 # define PR_SET_NAME    15
6359 # define PR_GET_NAME    16
6360 #endif
6361 #ifndef PR_SET_FP_MODE
6362 # define PR_SET_FP_MODE 45
6363 # define PR_GET_FP_MODE 46
6364 # define PR_FP_MODE_FR   (1 << 0)
6365 # define PR_FP_MODE_FRE  (1 << 1)
6366 #endif
6367 #ifndef PR_SVE_SET_VL
6368 # define PR_SVE_SET_VL  50
6369 # define PR_SVE_GET_VL  51
6370 # define PR_SVE_VL_LEN_MASK  0xffff
6371 # define PR_SVE_VL_INHERIT   (1 << 17)
6372 #endif
6373 #ifndef PR_PAC_RESET_KEYS
6374 # define PR_PAC_RESET_KEYS  54
6375 # define PR_PAC_APIAKEY   (1 << 0)
6376 # define PR_PAC_APIBKEY   (1 << 1)
6377 # define PR_PAC_APDAKEY   (1 << 2)
6378 # define PR_PAC_APDBKEY   (1 << 3)
6379 # define PR_PAC_APGAKEY   (1 << 4)
6380 #endif
6381 #ifndef PR_SET_TAGGED_ADDR_CTRL
6382 # define PR_SET_TAGGED_ADDR_CTRL 55
6383 # define PR_GET_TAGGED_ADDR_CTRL 56
6384 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6385 #endif
6386 #ifndef PR_MTE_TCF_SHIFT
6387 # define PR_MTE_TCF_SHIFT       1
6388 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6389 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6390 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6391 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6392 # define PR_MTE_TAG_SHIFT       3
6393 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6394 #endif
6395 #ifndef PR_SET_IO_FLUSHER
6396 # define PR_SET_IO_FLUSHER 57
6397 # define PR_GET_IO_FLUSHER 58
6398 #endif
6399 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6400 # define PR_SET_SYSCALL_USER_DISPATCH 59
6401 #endif
6402 #ifndef PR_SME_SET_VL
6403 # define PR_SME_SET_VL  63
6404 # define PR_SME_GET_VL  64
6405 # define PR_SME_VL_LEN_MASK  0xffff
6406 # define PR_SME_VL_INHERIT   (1 << 17)
6407 #endif
6408 
6409 #include "target_prctl.h"
6410 
6411 static abi_long do_prctl_inval0(CPUArchState *env)
6412 {
6413     return -TARGET_EINVAL;
6414 }
6415 
6416 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6417 {
6418     return -TARGET_EINVAL;
6419 }
6420 
6421 #ifndef do_prctl_get_fp_mode
6422 #define do_prctl_get_fp_mode do_prctl_inval0
6423 #endif
6424 #ifndef do_prctl_set_fp_mode
6425 #define do_prctl_set_fp_mode do_prctl_inval1
6426 #endif
6427 #ifndef do_prctl_sve_get_vl
6428 #define do_prctl_sve_get_vl do_prctl_inval0
6429 #endif
6430 #ifndef do_prctl_sve_set_vl
6431 #define do_prctl_sve_set_vl do_prctl_inval1
6432 #endif
6433 #ifndef do_prctl_reset_keys
6434 #define do_prctl_reset_keys do_prctl_inval1
6435 #endif
6436 #ifndef do_prctl_set_tagged_addr_ctrl
6437 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6438 #endif
6439 #ifndef do_prctl_get_tagged_addr_ctrl
6440 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6441 #endif
6442 #ifndef do_prctl_get_unalign
6443 #define do_prctl_get_unalign do_prctl_inval1
6444 #endif
6445 #ifndef do_prctl_set_unalign
6446 #define do_prctl_set_unalign do_prctl_inval1
6447 #endif
6448 #ifndef do_prctl_sme_get_vl
6449 #define do_prctl_sme_get_vl do_prctl_inval0
6450 #endif
6451 #ifndef do_prctl_sme_set_vl
6452 #define do_prctl_sme_set_vl do_prctl_inval1
6453 #endif
6454 
6455 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6456                          abi_long arg3, abi_long arg4, abi_long arg5)
6457 {
6458     abi_long ret;
6459 
6460     switch (option) {
6461     case PR_GET_PDEATHSIG:
6462         {
6463             int deathsig;
6464             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6465                                   arg3, arg4, arg5));
6466             if (!is_error(ret) &&
6467                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6468                 return -TARGET_EFAULT;
6469             }
6470             return ret;
6471         }
6472     case PR_SET_PDEATHSIG:
6473         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6474                                arg3, arg4, arg5));
6475     case PR_GET_NAME:
6476         {
6477             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6478             if (!name) {
6479                 return -TARGET_EFAULT;
6480             }
6481             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6482                                   arg3, arg4, arg5));
6483             unlock_user(name, arg2, 16);
6484             return ret;
6485         }
6486     case PR_SET_NAME:
6487         {
6488             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6489             if (!name) {
6490                 return -TARGET_EFAULT;
6491             }
6492             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6493                                   arg3, arg4, arg5));
6494             unlock_user(name, arg2, 0);
6495             return ret;
6496         }
6497     case PR_GET_FP_MODE:
6498         return do_prctl_get_fp_mode(env);
6499     case PR_SET_FP_MODE:
6500         return do_prctl_set_fp_mode(env, arg2);
6501     case PR_SVE_GET_VL:
6502         return do_prctl_sve_get_vl(env);
6503     case PR_SVE_SET_VL:
6504         return do_prctl_sve_set_vl(env, arg2);
6505     case PR_SME_GET_VL:
6506         return do_prctl_sme_get_vl(env);
6507     case PR_SME_SET_VL:
6508         return do_prctl_sme_set_vl(env, arg2);
6509     case PR_PAC_RESET_KEYS:
6510         if (arg3 || arg4 || arg5) {
6511             return -TARGET_EINVAL;
6512         }
6513         return do_prctl_reset_keys(env, arg2);
6514     case PR_SET_TAGGED_ADDR_CTRL:
6515         if (arg3 || arg4 || arg5) {
6516             return -TARGET_EINVAL;
6517         }
6518         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6519     case PR_GET_TAGGED_ADDR_CTRL:
6520         if (arg2 || arg3 || arg4 || arg5) {
6521             return -TARGET_EINVAL;
6522         }
6523         return do_prctl_get_tagged_addr_ctrl(env);
6524 
6525     case PR_GET_UNALIGN:
6526         return do_prctl_get_unalign(env, arg2);
6527     case PR_SET_UNALIGN:
6528         return do_prctl_set_unalign(env, arg2);
6529 
6530     case PR_CAP_AMBIENT:
6531     case PR_CAPBSET_READ:
6532     case PR_CAPBSET_DROP:
6533     case PR_GET_DUMPABLE:
6534     case PR_SET_DUMPABLE:
6535     case PR_GET_KEEPCAPS:
6536     case PR_SET_KEEPCAPS:
6537     case PR_GET_SECUREBITS:
6538     case PR_SET_SECUREBITS:
6539     case PR_GET_TIMING:
6540     case PR_SET_TIMING:
6541     case PR_GET_TIMERSLACK:
6542     case PR_SET_TIMERSLACK:
6543     case PR_MCE_KILL:
6544     case PR_MCE_KILL_GET:
6545     case PR_GET_NO_NEW_PRIVS:
6546     case PR_SET_NO_NEW_PRIVS:
6547     case PR_GET_IO_FLUSHER:
6548     case PR_SET_IO_FLUSHER:
6549         /* Some prctl options have no pointer arguments and we can pass on. */
6550         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6551 
6552     case PR_GET_CHILD_SUBREAPER:
6553     case PR_SET_CHILD_SUBREAPER:
6554     case PR_GET_SPECULATION_CTRL:
6555     case PR_SET_SPECULATION_CTRL:
6556     case PR_GET_TID_ADDRESS:
6557         /* TODO */
6558         return -TARGET_EINVAL;
6559 
6560     case PR_GET_FPEXC:
6561     case PR_SET_FPEXC:
6562         /* Was used for SPE on PowerPC. */
6563         return -TARGET_EINVAL;
6564 
6565     case PR_GET_ENDIAN:
6566     case PR_SET_ENDIAN:
6567     case PR_GET_FPEMU:
6568     case PR_SET_FPEMU:
6569     case PR_SET_MM:
6570     case PR_GET_SECCOMP:
6571     case PR_SET_SECCOMP:
6572     case PR_SET_SYSCALL_USER_DISPATCH:
6573     case PR_GET_THP_DISABLE:
6574     case PR_SET_THP_DISABLE:
6575     case PR_GET_TSC:
6576     case PR_SET_TSC:
6577         /* Disable to prevent the target disabling stuff we need. */
6578         return -TARGET_EINVAL;
6579 
6580     default:
6581         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6582                       option);
6583         return -TARGET_EINVAL;
6584     }
6585 }
6586 
6587 #define NEW_STACK_SIZE 0x40000
6588 
6589 
6590 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6591 typedef struct {
6592     CPUArchState *env;
6593     pthread_mutex_t mutex;
6594     pthread_cond_t cond;
6595     pthread_t thread;
6596     uint32_t tid;
6597     abi_ulong child_tidptr;
6598     abi_ulong parent_tidptr;
6599     sigset_t sigmask;
6600 } new_thread_info;
6601 
6602 static void *clone_func(void *arg)
6603 {
6604     new_thread_info *info = arg;
6605     CPUArchState *env;
6606     CPUState *cpu;
6607     TaskState *ts;
6608 
6609     rcu_register_thread();
6610     tcg_register_thread();
6611     env = info->env;
6612     cpu = env_cpu(env);
6613     thread_cpu = cpu;
6614     ts = (TaskState *)cpu->opaque;
6615     info->tid = sys_gettid();
6616     task_settid(ts);
6617     if (info->child_tidptr)
6618         put_user_u32(info->tid, info->child_tidptr);
6619     if (info->parent_tidptr)
6620         put_user_u32(info->tid, info->parent_tidptr);
6621     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6622     /* Enable signals.  */
6623     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6624     /* Signal to the parent that we're ready.  */
6625     pthread_mutex_lock(&info->mutex);
6626     pthread_cond_broadcast(&info->cond);
6627     pthread_mutex_unlock(&info->mutex);
6628     /* Wait until the parent has finished initializing the tls state.  */
6629     pthread_mutex_lock(&clone_lock);
6630     pthread_mutex_unlock(&clone_lock);
6631     cpu_loop(env);
6632     /* never exits */
6633     return NULL;
6634 }
6635 
6636 /* do_fork() Must return host values and target errnos (unlike most
6637    do_*() functions). */
6638 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6639                    abi_ulong parent_tidptr, target_ulong newtls,
6640                    abi_ulong child_tidptr)
6641 {
6642     CPUState *cpu = env_cpu(env);
6643     int ret;
6644     TaskState *ts;
6645     CPUState *new_cpu;
6646     CPUArchState *new_env;
6647     sigset_t sigmask;
6648 
6649     flags &= ~CLONE_IGNORED_FLAGS;
6650 
6651     /* Emulate vfork() with fork() */
6652     if (flags & CLONE_VFORK)
6653         flags &= ~(CLONE_VFORK | CLONE_VM);
6654 
6655     if (flags & CLONE_VM) {
6656         TaskState *parent_ts = (TaskState *)cpu->opaque;
6657         new_thread_info info;
6658         pthread_attr_t attr;
6659 
6660         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6661             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6662             return -TARGET_EINVAL;
6663         }
6664 
6665         ts = g_new0(TaskState, 1);
6666         init_task_state(ts);
6667 
6668         /* Grab a mutex so that thread setup appears atomic.  */
6669         pthread_mutex_lock(&clone_lock);
6670 
6671         /*
6672          * If this is our first additional thread, we need to ensure we
6673          * generate code for parallel execution and flush old translations.
6674          * Do this now so that the copy gets CF_PARALLEL too.
6675          */
6676         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6677             cpu->tcg_cflags |= CF_PARALLEL;
6678             tb_flush(cpu);
6679         }
6680 
6681         /* we create a new CPU instance. */
6682         new_env = cpu_copy(env);
6683         /* Init regs that differ from the parent.  */
6684         cpu_clone_regs_child(new_env, newsp, flags);
6685         cpu_clone_regs_parent(env, flags);
6686         new_cpu = env_cpu(new_env);
6687         new_cpu->opaque = ts;
6688         ts->bprm = parent_ts->bprm;
6689         ts->info = parent_ts->info;
6690         ts->signal_mask = parent_ts->signal_mask;
6691 
6692         if (flags & CLONE_CHILD_CLEARTID) {
6693             ts->child_tidptr = child_tidptr;
6694         }
6695 
6696         if (flags & CLONE_SETTLS) {
6697             cpu_set_tls (new_env, newtls);
6698         }
6699 
6700         memset(&info, 0, sizeof(info));
6701         pthread_mutex_init(&info.mutex, NULL);
6702         pthread_mutex_lock(&info.mutex);
6703         pthread_cond_init(&info.cond, NULL);
6704         info.env = new_env;
6705         if (flags & CLONE_CHILD_SETTID) {
6706             info.child_tidptr = child_tidptr;
6707         }
6708         if (flags & CLONE_PARENT_SETTID) {
6709             info.parent_tidptr = parent_tidptr;
6710         }
6711 
6712         ret = pthread_attr_init(&attr);
6713         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6714         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6715         /* It is not safe to deliver signals until the child has finished
6716            initializing, so temporarily block all signals.  */
6717         sigfillset(&sigmask);
6718         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6719         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6720 
6721         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6722         /* TODO: Free new CPU state if thread creation failed.  */
6723 
6724         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6725         pthread_attr_destroy(&attr);
6726         if (ret == 0) {
6727             /* Wait for the child to initialize.  */
6728             pthread_cond_wait(&info.cond, &info.mutex);
6729             ret = info.tid;
6730         } else {
6731             ret = -1;
6732         }
6733         pthread_mutex_unlock(&info.mutex);
6734         pthread_cond_destroy(&info.cond);
6735         pthread_mutex_destroy(&info.mutex);
6736         pthread_mutex_unlock(&clone_lock);
6737     } else {
6738         /* if no CLONE_VM, we consider it is a fork */
6739         if (flags & CLONE_INVALID_FORK_FLAGS) {
6740             return -TARGET_EINVAL;
6741         }
6742 
6743         /* We can't support custom termination signals */
6744         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6745             return -TARGET_EINVAL;
6746         }
6747 
6748 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6749         if (flags & CLONE_PIDFD) {
6750             return -TARGET_EINVAL;
6751         }
6752 #endif
6753 
6754         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6755         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6756             return -TARGET_EINVAL;
6757         }
6758 
6759         if (block_signals()) {
6760             return -QEMU_ERESTARTSYS;
6761         }
6762 
6763         fork_start();
6764         ret = fork();
6765         if (ret == 0) {
6766             /* Child Process.  */
6767             cpu_clone_regs_child(env, newsp, flags);
6768             fork_end(1);
6769             /* There is a race condition here.  The parent process could
6770                theoretically read the TID in the child process before the child
6771                tid is set.  This would require using either ptrace
6772                (not implemented) or having *_tidptr to point at a shared memory
6773                mapping.  We can't repeat the spinlock hack used above because
6774                the child process gets its own copy of the lock.  */
6775             if (flags & CLONE_CHILD_SETTID)
6776                 put_user_u32(sys_gettid(), child_tidptr);
6777             if (flags & CLONE_PARENT_SETTID)
6778                 put_user_u32(sys_gettid(), parent_tidptr);
6779             ts = (TaskState *)cpu->opaque;
6780             if (flags & CLONE_SETTLS)
6781                 cpu_set_tls (env, newtls);
6782             if (flags & CLONE_CHILD_CLEARTID)
6783                 ts->child_tidptr = child_tidptr;
6784         } else {
6785             cpu_clone_regs_parent(env, flags);
6786             if (flags & CLONE_PIDFD) {
6787                 int pid_fd = 0;
6788 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6789                 int pid_child = ret;
6790                 pid_fd = pidfd_open(pid_child, 0);
6791                 if (pid_fd >= 0) {
6792                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6793                                                | FD_CLOEXEC);
6794                 } else {
6795                         pid_fd = 0;
6796                 }
6797 #endif
6798                 put_user_u32(pid_fd, parent_tidptr);
6799                 }
6800             fork_end(0);
6801         }
6802         g_assert(!cpu_in_exclusive_context(cpu));
6803     }
6804     return ret;
6805 }
6806 
6807 /* warning : doesn't handle linux specific flags... */
6808 static int target_to_host_fcntl_cmd(int cmd)
6809 {
6810     int ret;
6811 
6812     switch(cmd) {
6813     case TARGET_F_DUPFD:
6814     case TARGET_F_GETFD:
6815     case TARGET_F_SETFD:
6816     case TARGET_F_GETFL:
6817     case TARGET_F_SETFL:
6818     case TARGET_F_OFD_GETLK:
6819     case TARGET_F_OFD_SETLK:
6820     case TARGET_F_OFD_SETLKW:
6821         ret = cmd;
6822         break;
6823     case TARGET_F_GETLK:
6824         ret = F_GETLK64;
6825         break;
6826     case TARGET_F_SETLK:
6827         ret = F_SETLK64;
6828         break;
6829     case TARGET_F_SETLKW:
6830         ret = F_SETLKW64;
6831         break;
6832     case TARGET_F_GETOWN:
6833         ret = F_GETOWN;
6834         break;
6835     case TARGET_F_SETOWN:
6836         ret = F_SETOWN;
6837         break;
6838     case TARGET_F_GETSIG:
6839         ret = F_GETSIG;
6840         break;
6841     case TARGET_F_SETSIG:
6842         ret = F_SETSIG;
6843         break;
6844 #if TARGET_ABI_BITS == 32
6845     case TARGET_F_GETLK64:
6846         ret = F_GETLK64;
6847         break;
6848     case TARGET_F_SETLK64:
6849         ret = F_SETLK64;
6850         break;
6851     case TARGET_F_SETLKW64:
6852         ret = F_SETLKW64;
6853         break;
6854 #endif
6855     case TARGET_F_SETLEASE:
6856         ret = F_SETLEASE;
6857         break;
6858     case TARGET_F_GETLEASE:
6859         ret = F_GETLEASE;
6860         break;
6861 #ifdef F_DUPFD_CLOEXEC
6862     case TARGET_F_DUPFD_CLOEXEC:
6863         ret = F_DUPFD_CLOEXEC;
6864         break;
6865 #endif
6866     case TARGET_F_NOTIFY:
6867         ret = F_NOTIFY;
6868         break;
6869 #ifdef F_GETOWN_EX
6870     case TARGET_F_GETOWN_EX:
6871         ret = F_GETOWN_EX;
6872         break;
6873 #endif
6874 #ifdef F_SETOWN_EX
6875     case TARGET_F_SETOWN_EX:
6876         ret = F_SETOWN_EX;
6877         break;
6878 #endif
6879 #ifdef F_SETPIPE_SZ
6880     case TARGET_F_SETPIPE_SZ:
6881         ret = F_SETPIPE_SZ;
6882         break;
6883     case TARGET_F_GETPIPE_SZ:
6884         ret = F_GETPIPE_SZ;
6885         break;
6886 #endif
6887 #ifdef F_ADD_SEALS
6888     case TARGET_F_ADD_SEALS:
6889         ret = F_ADD_SEALS;
6890         break;
6891     case TARGET_F_GET_SEALS:
6892         ret = F_GET_SEALS;
6893         break;
6894 #endif
6895     default:
6896         ret = -TARGET_EINVAL;
6897         break;
6898     }
6899 
6900 #if defined(__powerpc64__)
6901     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6902      * is not supported by kernel. The glibc fcntl call actually adjusts
6903      * them to 5, 6 and 7 before making the syscall(). Since we make the
6904      * syscall directly, adjust to what is supported by the kernel.
6905      */
6906     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6907         ret -= F_GETLK64 - 5;
6908     }
6909 #endif
6910 
6911     return ret;
6912 }
6913 
6914 #define FLOCK_TRANSTBL \
6915     switch (type) { \
6916     TRANSTBL_CONVERT(F_RDLCK); \
6917     TRANSTBL_CONVERT(F_WRLCK); \
6918     TRANSTBL_CONVERT(F_UNLCK); \
6919     }
6920 
6921 static int target_to_host_flock(int type)
6922 {
6923 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6924     FLOCK_TRANSTBL
6925 #undef  TRANSTBL_CONVERT
6926     return -TARGET_EINVAL;
6927 }
6928 
6929 static int host_to_target_flock(int type)
6930 {
6931 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6932     FLOCK_TRANSTBL
6933 #undef  TRANSTBL_CONVERT
6934     /* if we don't know how to convert the value coming
6935      * from the host we copy to the target field as-is
6936      */
6937     return type;
6938 }
6939 
6940 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6941                                             abi_ulong target_flock_addr)
6942 {
6943     struct target_flock *target_fl;
6944     int l_type;
6945 
6946     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6947         return -TARGET_EFAULT;
6948     }
6949 
6950     __get_user(l_type, &target_fl->l_type);
6951     l_type = target_to_host_flock(l_type);
6952     if (l_type < 0) {
6953         return l_type;
6954     }
6955     fl->l_type = l_type;
6956     __get_user(fl->l_whence, &target_fl->l_whence);
6957     __get_user(fl->l_start, &target_fl->l_start);
6958     __get_user(fl->l_len, &target_fl->l_len);
6959     __get_user(fl->l_pid, &target_fl->l_pid);
6960     unlock_user_struct(target_fl, target_flock_addr, 0);
6961     return 0;
6962 }
6963 
6964 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6965                                           const struct flock64 *fl)
6966 {
6967     struct target_flock *target_fl;
6968     short l_type;
6969 
6970     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6971         return -TARGET_EFAULT;
6972     }
6973 
6974     l_type = host_to_target_flock(fl->l_type);
6975     __put_user(l_type, &target_fl->l_type);
6976     __put_user(fl->l_whence, &target_fl->l_whence);
6977     __put_user(fl->l_start, &target_fl->l_start);
6978     __put_user(fl->l_len, &target_fl->l_len);
6979     __put_user(fl->l_pid, &target_fl->l_pid);
6980     unlock_user_struct(target_fl, target_flock_addr, 1);
6981     return 0;
6982 }
6983 
6984 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6985 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6986 
6987 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6988 struct target_oabi_flock64 {
6989     abi_short l_type;
6990     abi_short l_whence;
6991     abi_llong l_start;
6992     abi_llong l_len;
6993     abi_int   l_pid;
6994 } QEMU_PACKED;
6995 
6996 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6997                                                    abi_ulong target_flock_addr)
6998 {
6999     struct target_oabi_flock64 *target_fl;
7000     int l_type;
7001 
7002     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7003         return -TARGET_EFAULT;
7004     }
7005 
7006     __get_user(l_type, &target_fl->l_type);
7007     l_type = target_to_host_flock(l_type);
7008     if (l_type < 0) {
7009         return l_type;
7010     }
7011     fl->l_type = l_type;
7012     __get_user(fl->l_whence, &target_fl->l_whence);
7013     __get_user(fl->l_start, &target_fl->l_start);
7014     __get_user(fl->l_len, &target_fl->l_len);
7015     __get_user(fl->l_pid, &target_fl->l_pid);
7016     unlock_user_struct(target_fl, target_flock_addr, 0);
7017     return 0;
7018 }
7019 
7020 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7021                                                  const struct flock64 *fl)
7022 {
7023     struct target_oabi_flock64 *target_fl;
7024     short l_type;
7025 
7026     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7027         return -TARGET_EFAULT;
7028     }
7029 
7030     l_type = host_to_target_flock(fl->l_type);
7031     __put_user(l_type, &target_fl->l_type);
7032     __put_user(fl->l_whence, &target_fl->l_whence);
7033     __put_user(fl->l_start, &target_fl->l_start);
7034     __put_user(fl->l_len, &target_fl->l_len);
7035     __put_user(fl->l_pid, &target_fl->l_pid);
7036     unlock_user_struct(target_fl, target_flock_addr, 1);
7037     return 0;
7038 }
7039 #endif
7040 
7041 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7042                                               abi_ulong target_flock_addr)
7043 {
7044     struct target_flock64 *target_fl;
7045     int l_type;
7046 
7047     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7048         return -TARGET_EFAULT;
7049     }
7050 
7051     __get_user(l_type, &target_fl->l_type);
7052     l_type = target_to_host_flock(l_type);
7053     if (l_type < 0) {
7054         return l_type;
7055     }
7056     fl->l_type = l_type;
7057     __get_user(fl->l_whence, &target_fl->l_whence);
7058     __get_user(fl->l_start, &target_fl->l_start);
7059     __get_user(fl->l_len, &target_fl->l_len);
7060     __get_user(fl->l_pid, &target_fl->l_pid);
7061     unlock_user_struct(target_fl, target_flock_addr, 0);
7062     return 0;
7063 }
7064 
7065 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7066                                             const struct flock64 *fl)
7067 {
7068     struct target_flock64 *target_fl;
7069     short l_type;
7070 
7071     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7072         return -TARGET_EFAULT;
7073     }
7074 
7075     l_type = host_to_target_flock(fl->l_type);
7076     __put_user(l_type, &target_fl->l_type);
7077     __put_user(fl->l_whence, &target_fl->l_whence);
7078     __put_user(fl->l_start, &target_fl->l_start);
7079     __put_user(fl->l_len, &target_fl->l_len);
7080     __put_user(fl->l_pid, &target_fl->l_pid);
7081     unlock_user_struct(target_fl, target_flock_addr, 1);
7082     return 0;
7083 }
7084 
7085 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7086 {
7087     struct flock64 fl64;
7088 #ifdef F_GETOWN_EX
7089     struct f_owner_ex fox;
7090     struct target_f_owner_ex *target_fox;
7091 #endif
7092     abi_long ret;
7093     int host_cmd = target_to_host_fcntl_cmd(cmd);
7094 
7095     if (host_cmd == -TARGET_EINVAL)
7096 	    return host_cmd;
7097 
7098     switch(cmd) {
7099     case TARGET_F_GETLK:
7100         ret = copy_from_user_flock(&fl64, arg);
7101         if (ret) {
7102             return ret;
7103         }
7104         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7105         if (ret == 0) {
7106             ret = copy_to_user_flock(arg, &fl64);
7107         }
7108         break;
7109 
7110     case TARGET_F_SETLK:
7111     case TARGET_F_SETLKW:
7112         ret = copy_from_user_flock(&fl64, arg);
7113         if (ret) {
7114             return ret;
7115         }
7116         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7117         break;
7118 
7119     case TARGET_F_GETLK64:
7120     case TARGET_F_OFD_GETLK:
7121         ret = copy_from_user_flock64(&fl64, arg);
7122         if (ret) {
7123             return ret;
7124         }
7125         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7126         if (ret == 0) {
7127             ret = copy_to_user_flock64(arg, &fl64);
7128         }
7129         break;
7130     case TARGET_F_SETLK64:
7131     case TARGET_F_SETLKW64:
7132     case TARGET_F_OFD_SETLK:
7133     case TARGET_F_OFD_SETLKW:
7134         ret = copy_from_user_flock64(&fl64, arg);
7135         if (ret) {
7136             return ret;
7137         }
7138         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7139         break;
7140 
7141     case TARGET_F_GETFL:
7142         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7143         if (ret >= 0) {
7144             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7145             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7146             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7147                 ret |= TARGET_O_LARGEFILE;
7148             }
7149         }
7150         break;
7151 
7152     case TARGET_F_SETFL:
7153         ret = get_errno(safe_fcntl(fd, host_cmd,
7154                                    target_to_host_bitmask(arg,
7155                                                           fcntl_flags_tbl)));
7156         break;
7157 
7158 #ifdef F_GETOWN_EX
7159     case TARGET_F_GETOWN_EX:
7160         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7161         if (ret >= 0) {
7162             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7163                 return -TARGET_EFAULT;
7164             target_fox->type = tswap32(fox.type);
7165             target_fox->pid = tswap32(fox.pid);
7166             unlock_user_struct(target_fox, arg, 1);
7167         }
7168         break;
7169 #endif
7170 
7171 #ifdef F_SETOWN_EX
7172     case TARGET_F_SETOWN_EX:
7173         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7174             return -TARGET_EFAULT;
7175         fox.type = tswap32(target_fox->type);
7176         fox.pid = tswap32(target_fox->pid);
7177         unlock_user_struct(target_fox, arg, 0);
7178         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7179         break;
7180 #endif
7181 
7182     case TARGET_F_SETSIG:
7183         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7184         break;
7185 
7186     case TARGET_F_GETSIG:
7187         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7188         break;
7189 
7190     case TARGET_F_SETOWN:
7191     case TARGET_F_GETOWN:
7192     case TARGET_F_SETLEASE:
7193     case TARGET_F_GETLEASE:
7194     case TARGET_F_SETPIPE_SZ:
7195     case TARGET_F_GETPIPE_SZ:
7196     case TARGET_F_ADD_SEALS:
7197     case TARGET_F_GET_SEALS:
7198         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7199         break;
7200 
7201     default:
7202         ret = get_errno(safe_fcntl(fd, cmd, arg));
7203         break;
7204     }
7205     return ret;
7206 }
7207 
7208 #ifdef USE_UID16
7209 
7210 static inline int high2lowuid(int uid)
7211 {
7212     if (uid > 65535)
7213         return 65534;
7214     else
7215         return uid;
7216 }
7217 
7218 static inline int high2lowgid(int gid)
7219 {
7220     if (gid > 65535)
7221         return 65534;
7222     else
7223         return gid;
7224 }
7225 
7226 static inline int low2highuid(int uid)
7227 {
7228     if ((int16_t)uid == -1)
7229         return -1;
7230     else
7231         return uid;
7232 }
7233 
7234 static inline int low2highgid(int gid)
7235 {
7236     if ((int16_t)gid == -1)
7237         return -1;
7238     else
7239         return gid;
7240 }
7241 static inline int tswapid(int id)
7242 {
7243     return tswap16(id);
7244 }
7245 
7246 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7247 
7248 #else /* !USE_UID16 */
7249 static inline int high2lowuid(int uid)
7250 {
7251     return uid;
7252 }
7253 static inline int high2lowgid(int gid)
7254 {
7255     return gid;
7256 }
7257 static inline int low2highuid(int uid)
7258 {
7259     return uid;
7260 }
7261 static inline int low2highgid(int gid)
7262 {
7263     return gid;
7264 }
7265 static inline int tswapid(int id)
7266 {
7267     return tswap32(id);
7268 }
7269 
7270 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7271 
7272 #endif /* USE_UID16 */
7273 
7274 /* We must do direct syscalls for setting UID/GID, because we want to
7275  * implement the Linux system call semantics of "change only for this thread",
7276  * not the libc/POSIX semantics of "change for all threads in process".
7277  * (See http://ewontfix.com/17/ for more details.)
7278  * We use the 32-bit version of the syscalls if present; if it is not
7279  * then either the host architecture supports 32-bit UIDs natively with
7280  * the standard syscall, or the 16-bit UID is the best we can do.
7281  */
7282 #ifdef __NR_setuid32
7283 #define __NR_sys_setuid __NR_setuid32
7284 #else
7285 #define __NR_sys_setuid __NR_setuid
7286 #endif
7287 #ifdef __NR_setgid32
7288 #define __NR_sys_setgid __NR_setgid32
7289 #else
7290 #define __NR_sys_setgid __NR_setgid
7291 #endif
7292 #ifdef __NR_setresuid32
7293 #define __NR_sys_setresuid __NR_setresuid32
7294 #else
7295 #define __NR_sys_setresuid __NR_setresuid
7296 #endif
7297 #ifdef __NR_setresgid32
7298 #define __NR_sys_setresgid __NR_setresgid32
7299 #else
7300 #define __NR_sys_setresgid __NR_setresgid
7301 #endif
7302 
7303 _syscall1(int, sys_setuid, uid_t, uid)
7304 _syscall1(int, sys_setgid, gid_t, gid)
7305 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7306 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7307 
7308 void syscall_init(void)
7309 {
7310     IOCTLEntry *ie;
7311     const argtype *arg_type;
7312     int size;
7313 
7314     thunk_init(STRUCT_MAX);
7315 
7316 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7317 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7318 #include "syscall_types.h"
7319 #undef STRUCT
7320 #undef STRUCT_SPECIAL
7321 
7322     /* we patch the ioctl size if necessary. We rely on the fact that
7323        no ioctl has all the bits at '1' in the size field */
7324     ie = ioctl_entries;
7325     while (ie->target_cmd != 0) {
7326         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7327             TARGET_IOC_SIZEMASK) {
7328             arg_type = ie->arg_type;
7329             if (arg_type[0] != TYPE_PTR) {
7330                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7331                         ie->target_cmd);
7332                 exit(1);
7333             }
7334             arg_type++;
7335             size = thunk_type_size(arg_type, 0);
7336             ie->target_cmd = (ie->target_cmd &
7337                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7338                 (size << TARGET_IOC_SIZESHIFT);
7339         }
7340 
7341         /* automatic consistency check if same arch */
7342 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7343     (defined(__x86_64__) && defined(TARGET_X86_64))
7344         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7345             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7346                     ie->name, ie->target_cmd, ie->host_cmd);
7347         }
7348 #endif
7349         ie++;
7350     }
7351 }
7352 
7353 #ifdef TARGET_NR_truncate64
7354 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7355                                          abi_long arg2,
7356                                          abi_long arg3,
7357                                          abi_long arg4)
7358 {
7359     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7360         arg2 = arg3;
7361         arg3 = arg4;
7362     }
7363     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7364 }
7365 #endif
7366 
7367 #ifdef TARGET_NR_ftruncate64
7368 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7369                                           abi_long arg2,
7370                                           abi_long arg3,
7371                                           abi_long arg4)
7372 {
7373     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7374         arg2 = arg3;
7375         arg3 = arg4;
7376     }
7377     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7378 }
7379 #endif
7380 
7381 #if defined(TARGET_NR_timer_settime) || \
7382     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7383 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7384                                                  abi_ulong target_addr)
7385 {
7386     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7387                                 offsetof(struct target_itimerspec,
7388                                          it_interval)) ||
7389         target_to_host_timespec(&host_its->it_value, target_addr +
7390                                 offsetof(struct target_itimerspec,
7391                                          it_value))) {
7392         return -TARGET_EFAULT;
7393     }
7394 
7395     return 0;
7396 }
7397 #endif
7398 
7399 #if defined(TARGET_NR_timer_settime64) || \
7400     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7401 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7402                                                    abi_ulong target_addr)
7403 {
7404     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7405                                   offsetof(struct target__kernel_itimerspec,
7406                                            it_interval)) ||
7407         target_to_host_timespec64(&host_its->it_value, target_addr +
7408                                   offsetof(struct target__kernel_itimerspec,
7409                                            it_value))) {
7410         return -TARGET_EFAULT;
7411     }
7412 
7413     return 0;
7414 }
7415 #endif
7416 
7417 #if ((defined(TARGET_NR_timerfd_gettime) || \
7418       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7419       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7420 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7421                                                  struct itimerspec *host_its)
7422 {
7423     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7424                                                        it_interval),
7425                                 &host_its->it_interval) ||
7426         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7427                                                        it_value),
7428                                 &host_its->it_value)) {
7429         return -TARGET_EFAULT;
7430     }
7431     return 0;
7432 }
7433 #endif
7434 
7435 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7436       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7437       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7438 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7439                                                    struct itimerspec *host_its)
7440 {
7441     if (host_to_target_timespec64(target_addr +
7442                                   offsetof(struct target__kernel_itimerspec,
7443                                            it_interval),
7444                                   &host_its->it_interval) ||
7445         host_to_target_timespec64(target_addr +
7446                                   offsetof(struct target__kernel_itimerspec,
7447                                            it_value),
7448                                   &host_its->it_value)) {
7449         return -TARGET_EFAULT;
7450     }
7451     return 0;
7452 }
7453 #endif
7454 
7455 #if defined(TARGET_NR_adjtimex) || \
7456     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7457 static inline abi_long target_to_host_timex(struct timex *host_tx,
7458                                             abi_long target_addr)
7459 {
7460     struct target_timex *target_tx;
7461 
7462     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7463         return -TARGET_EFAULT;
7464     }
7465 
7466     __get_user(host_tx->modes, &target_tx->modes);
7467     __get_user(host_tx->offset, &target_tx->offset);
7468     __get_user(host_tx->freq, &target_tx->freq);
7469     __get_user(host_tx->maxerror, &target_tx->maxerror);
7470     __get_user(host_tx->esterror, &target_tx->esterror);
7471     __get_user(host_tx->status, &target_tx->status);
7472     __get_user(host_tx->constant, &target_tx->constant);
7473     __get_user(host_tx->precision, &target_tx->precision);
7474     __get_user(host_tx->tolerance, &target_tx->tolerance);
7475     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7476     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7477     __get_user(host_tx->tick, &target_tx->tick);
7478     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7479     __get_user(host_tx->jitter, &target_tx->jitter);
7480     __get_user(host_tx->shift, &target_tx->shift);
7481     __get_user(host_tx->stabil, &target_tx->stabil);
7482     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7483     __get_user(host_tx->calcnt, &target_tx->calcnt);
7484     __get_user(host_tx->errcnt, &target_tx->errcnt);
7485     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7486     __get_user(host_tx->tai, &target_tx->tai);
7487 
7488     unlock_user_struct(target_tx, target_addr, 0);
7489     return 0;
7490 }
7491 
7492 static inline abi_long host_to_target_timex(abi_long target_addr,
7493                                             struct timex *host_tx)
7494 {
7495     struct target_timex *target_tx;
7496 
7497     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7498         return -TARGET_EFAULT;
7499     }
7500 
7501     __put_user(host_tx->modes, &target_tx->modes);
7502     __put_user(host_tx->offset, &target_tx->offset);
7503     __put_user(host_tx->freq, &target_tx->freq);
7504     __put_user(host_tx->maxerror, &target_tx->maxerror);
7505     __put_user(host_tx->esterror, &target_tx->esterror);
7506     __put_user(host_tx->status, &target_tx->status);
7507     __put_user(host_tx->constant, &target_tx->constant);
7508     __put_user(host_tx->precision, &target_tx->precision);
7509     __put_user(host_tx->tolerance, &target_tx->tolerance);
7510     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7511     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7512     __put_user(host_tx->tick, &target_tx->tick);
7513     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7514     __put_user(host_tx->jitter, &target_tx->jitter);
7515     __put_user(host_tx->shift, &target_tx->shift);
7516     __put_user(host_tx->stabil, &target_tx->stabil);
7517     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7518     __put_user(host_tx->calcnt, &target_tx->calcnt);
7519     __put_user(host_tx->errcnt, &target_tx->errcnt);
7520     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7521     __put_user(host_tx->tai, &target_tx->tai);
7522 
7523     unlock_user_struct(target_tx, target_addr, 1);
7524     return 0;
7525 }
7526 #endif
7527 
7528 
7529 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7530 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7531                                               abi_long target_addr)
7532 {
7533     struct target__kernel_timex *target_tx;
7534 
7535     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7536                                  offsetof(struct target__kernel_timex,
7537                                           time))) {
7538         return -TARGET_EFAULT;
7539     }
7540 
7541     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7542         return -TARGET_EFAULT;
7543     }
7544 
7545     __get_user(host_tx->modes, &target_tx->modes);
7546     __get_user(host_tx->offset, &target_tx->offset);
7547     __get_user(host_tx->freq, &target_tx->freq);
7548     __get_user(host_tx->maxerror, &target_tx->maxerror);
7549     __get_user(host_tx->esterror, &target_tx->esterror);
7550     __get_user(host_tx->status, &target_tx->status);
7551     __get_user(host_tx->constant, &target_tx->constant);
7552     __get_user(host_tx->precision, &target_tx->precision);
7553     __get_user(host_tx->tolerance, &target_tx->tolerance);
7554     __get_user(host_tx->tick, &target_tx->tick);
7555     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7556     __get_user(host_tx->jitter, &target_tx->jitter);
7557     __get_user(host_tx->shift, &target_tx->shift);
7558     __get_user(host_tx->stabil, &target_tx->stabil);
7559     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7560     __get_user(host_tx->calcnt, &target_tx->calcnt);
7561     __get_user(host_tx->errcnt, &target_tx->errcnt);
7562     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7563     __get_user(host_tx->tai, &target_tx->tai);
7564 
7565     unlock_user_struct(target_tx, target_addr, 0);
7566     return 0;
7567 }
7568 
7569 static inline abi_long host_to_target_timex64(abi_long target_addr,
7570                                               struct timex *host_tx)
7571 {
7572     struct target__kernel_timex *target_tx;
7573 
7574    if (copy_to_user_timeval64(target_addr +
7575                               offsetof(struct target__kernel_timex, time),
7576                               &host_tx->time)) {
7577         return -TARGET_EFAULT;
7578     }
7579 
7580     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7581         return -TARGET_EFAULT;
7582     }
7583 
7584     __put_user(host_tx->modes, &target_tx->modes);
7585     __put_user(host_tx->offset, &target_tx->offset);
7586     __put_user(host_tx->freq, &target_tx->freq);
7587     __put_user(host_tx->maxerror, &target_tx->maxerror);
7588     __put_user(host_tx->esterror, &target_tx->esterror);
7589     __put_user(host_tx->status, &target_tx->status);
7590     __put_user(host_tx->constant, &target_tx->constant);
7591     __put_user(host_tx->precision, &target_tx->precision);
7592     __put_user(host_tx->tolerance, &target_tx->tolerance);
7593     __put_user(host_tx->tick, &target_tx->tick);
7594     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7595     __put_user(host_tx->jitter, &target_tx->jitter);
7596     __put_user(host_tx->shift, &target_tx->shift);
7597     __put_user(host_tx->stabil, &target_tx->stabil);
7598     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7599     __put_user(host_tx->calcnt, &target_tx->calcnt);
7600     __put_user(host_tx->errcnt, &target_tx->errcnt);
7601     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7602     __put_user(host_tx->tai, &target_tx->tai);
7603 
7604     unlock_user_struct(target_tx, target_addr, 1);
7605     return 0;
7606 }
7607 #endif
7608 
7609 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7610 #define sigev_notify_thread_id _sigev_un._tid
7611 #endif
7612 
7613 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7614                                                abi_ulong target_addr)
7615 {
7616     struct target_sigevent *target_sevp;
7617 
7618     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7619         return -TARGET_EFAULT;
7620     }
7621 
7622     /* This union is awkward on 64 bit systems because it has a 32 bit
7623      * integer and a pointer in it; we follow the conversion approach
7624      * used for handling sigval types in signal.c so the guest should get
7625      * the correct value back even if we did a 64 bit byteswap and it's
7626      * using the 32 bit integer.
7627      */
7628     host_sevp->sigev_value.sival_ptr =
7629         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7630     host_sevp->sigev_signo =
7631         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7632     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7633     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7634 
7635     unlock_user_struct(target_sevp, target_addr, 1);
7636     return 0;
7637 }
7638 
7639 #if defined(TARGET_NR_mlockall)
7640 static inline int target_to_host_mlockall_arg(int arg)
7641 {
7642     int result = 0;
7643 
7644     if (arg & TARGET_MCL_CURRENT) {
7645         result |= MCL_CURRENT;
7646     }
7647     if (arg & TARGET_MCL_FUTURE) {
7648         result |= MCL_FUTURE;
7649     }
7650 #ifdef MCL_ONFAULT
7651     if (arg & TARGET_MCL_ONFAULT) {
7652         result |= MCL_ONFAULT;
7653     }
7654 #endif
7655 
7656     return result;
7657 }
7658 #endif
7659 
7660 static inline int target_to_host_msync_arg(abi_long arg)
7661 {
7662     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7663            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7664            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7665            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7666 }
7667 
7668 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7669      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7670      defined(TARGET_NR_newfstatat))
7671 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7672                                              abi_ulong target_addr,
7673                                              struct stat *host_st)
7674 {
7675 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7676     if (cpu_env->eabi) {
7677         struct target_eabi_stat64 *target_st;
7678 
7679         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7680             return -TARGET_EFAULT;
7681         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7682         __put_user(host_st->st_dev, &target_st->st_dev);
7683         __put_user(host_st->st_ino, &target_st->st_ino);
7684 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7685         __put_user(host_st->st_ino, &target_st->__st_ino);
7686 #endif
7687         __put_user(host_st->st_mode, &target_st->st_mode);
7688         __put_user(host_st->st_nlink, &target_st->st_nlink);
7689         __put_user(host_st->st_uid, &target_st->st_uid);
7690         __put_user(host_st->st_gid, &target_st->st_gid);
7691         __put_user(host_st->st_rdev, &target_st->st_rdev);
7692         __put_user(host_st->st_size, &target_st->st_size);
7693         __put_user(host_st->st_blksize, &target_st->st_blksize);
7694         __put_user(host_st->st_blocks, &target_st->st_blocks);
7695         __put_user(host_st->st_atime, &target_st->target_st_atime);
7696         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7697         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7698 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7699         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7700         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7701         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7702 #endif
7703         unlock_user_struct(target_st, target_addr, 1);
7704     } else
7705 #endif
7706     {
7707 #if defined(TARGET_HAS_STRUCT_STAT64)
7708         struct target_stat64 *target_st;
7709 #else
7710         struct target_stat *target_st;
7711 #endif
7712 
7713         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7714             return -TARGET_EFAULT;
7715         memset(target_st, 0, sizeof(*target_st));
7716         __put_user(host_st->st_dev, &target_st->st_dev);
7717         __put_user(host_st->st_ino, &target_st->st_ino);
7718 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7719         __put_user(host_st->st_ino, &target_st->__st_ino);
7720 #endif
7721         __put_user(host_st->st_mode, &target_st->st_mode);
7722         __put_user(host_st->st_nlink, &target_st->st_nlink);
7723         __put_user(host_st->st_uid, &target_st->st_uid);
7724         __put_user(host_st->st_gid, &target_st->st_gid);
7725         __put_user(host_st->st_rdev, &target_st->st_rdev);
7726         /* XXX: better use of kernel struct */
7727         __put_user(host_st->st_size, &target_st->st_size);
7728         __put_user(host_st->st_blksize, &target_st->st_blksize);
7729         __put_user(host_st->st_blocks, &target_st->st_blocks);
7730         __put_user(host_st->st_atime, &target_st->target_st_atime);
7731         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7732         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7733 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7734         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7735         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7736         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7737 #endif
7738         unlock_user_struct(target_st, target_addr, 1);
7739     }
7740 
7741     return 0;
7742 }
7743 #endif
7744 
7745 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7746 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7747                                             abi_ulong target_addr)
7748 {
7749     struct target_statx *target_stx;
7750 
7751     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7752         return -TARGET_EFAULT;
7753     }
7754     memset(target_stx, 0, sizeof(*target_stx));
7755 
7756     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7757     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7758     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7759     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7760     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7761     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7762     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7763     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7764     __put_user(host_stx->stx_size, &target_stx->stx_size);
7765     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7766     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7767     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7768     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7769     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7770     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7771     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7772     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7773     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7774     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7775     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7776     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7777     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7778     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7779 
7780     unlock_user_struct(target_stx, target_addr, 1);
7781 
7782     return 0;
7783 }
7784 #endif
7785 
7786 static int do_sys_futex(int *uaddr, int op, int val,
7787                          const struct timespec *timeout, int *uaddr2,
7788                          int val3)
7789 {
7790 #if HOST_LONG_BITS == 64
7791 #if defined(__NR_futex)
7792     /* always a 64-bit time_t, it doesn't define _time64 version  */
7793     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7794 
7795 #endif
7796 #else /* HOST_LONG_BITS == 64 */
7797 #if defined(__NR_futex_time64)
7798     if (sizeof(timeout->tv_sec) == 8) {
7799         /* _time64 function on 32bit arch */
7800         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7801     }
7802 #endif
7803 #if defined(__NR_futex)
7804     /* old function on 32bit arch */
7805     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7806 #endif
7807 #endif /* HOST_LONG_BITS == 64 */
7808     g_assert_not_reached();
7809 }
7810 
7811 static int do_safe_futex(int *uaddr, int op, int val,
7812                          const struct timespec *timeout, int *uaddr2,
7813                          int val3)
7814 {
7815 #if HOST_LONG_BITS == 64
7816 #if defined(__NR_futex)
7817     /* always a 64-bit time_t, it doesn't define _time64 version  */
7818     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7819 #endif
7820 #else /* HOST_LONG_BITS == 64 */
7821 #if defined(__NR_futex_time64)
7822     if (sizeof(timeout->tv_sec) == 8) {
7823         /* _time64 function on 32bit arch */
7824         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7825                                            val3));
7826     }
7827 #endif
7828 #if defined(__NR_futex)
7829     /* old function on 32bit arch */
7830     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7831 #endif
7832 #endif /* HOST_LONG_BITS == 64 */
7833     return -TARGET_ENOSYS;
7834 }
7835 
7836 /* ??? Using host futex calls even when target atomic operations
7837    are not really atomic probably breaks things.  However implementing
7838    futexes locally would make futexes shared between multiple processes
7839    tricky.  However they're probably useless because guest atomic
7840    operations won't work either.  */
7841 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7842 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7843                     int op, int val, target_ulong timeout,
7844                     target_ulong uaddr2, int val3)
7845 {
7846     struct timespec ts, *pts = NULL;
7847     void *haddr2 = NULL;
7848     int base_op;
7849 
7850     /* We assume FUTEX_* constants are the same on both host and target. */
7851 #ifdef FUTEX_CMD_MASK
7852     base_op = op & FUTEX_CMD_MASK;
7853 #else
7854     base_op = op;
7855 #endif
7856     switch (base_op) {
7857     case FUTEX_WAIT:
7858     case FUTEX_WAIT_BITSET:
7859         val = tswap32(val);
7860         break;
7861     case FUTEX_WAIT_REQUEUE_PI:
7862         val = tswap32(val);
7863         haddr2 = g2h(cpu, uaddr2);
7864         break;
7865     case FUTEX_LOCK_PI:
7866     case FUTEX_LOCK_PI2:
7867         break;
7868     case FUTEX_WAKE:
7869     case FUTEX_WAKE_BITSET:
7870     case FUTEX_TRYLOCK_PI:
7871     case FUTEX_UNLOCK_PI:
7872         timeout = 0;
7873         break;
7874     case FUTEX_FD:
7875         val = target_to_host_signal(val);
7876         timeout = 0;
7877         break;
7878     case FUTEX_CMP_REQUEUE:
7879     case FUTEX_CMP_REQUEUE_PI:
7880         val3 = tswap32(val3);
7881         /* fall through */
7882     case FUTEX_REQUEUE:
7883     case FUTEX_WAKE_OP:
7884         /*
7885          * For these, the 4th argument is not TIMEOUT, but VAL2.
7886          * But the prototype of do_safe_futex takes a pointer, so
7887          * insert casts to satisfy the compiler.  We do not need
7888          * to tswap VAL2 since it's not compared to guest memory.
7889           */
7890         pts = (struct timespec *)(uintptr_t)timeout;
7891         timeout = 0;
7892         haddr2 = g2h(cpu, uaddr2);
7893         break;
7894     default:
7895         return -TARGET_ENOSYS;
7896     }
7897     if (timeout) {
7898         pts = &ts;
7899         if (time64
7900             ? target_to_host_timespec64(pts, timeout)
7901             : target_to_host_timespec(pts, timeout)) {
7902             return -TARGET_EFAULT;
7903         }
7904     }
7905     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7906 }
7907 #endif
7908 
7909 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7910 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7911                                      abi_long handle, abi_long mount_id,
7912                                      abi_long flags)
7913 {
7914     struct file_handle *target_fh;
7915     struct file_handle *fh;
7916     int mid = 0;
7917     abi_long ret;
7918     char *name;
7919     unsigned int size, total_size;
7920 
7921     if (get_user_s32(size, handle)) {
7922         return -TARGET_EFAULT;
7923     }
7924 
7925     name = lock_user_string(pathname);
7926     if (!name) {
7927         return -TARGET_EFAULT;
7928     }
7929 
7930     total_size = sizeof(struct file_handle) + size;
7931     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7932     if (!target_fh) {
7933         unlock_user(name, pathname, 0);
7934         return -TARGET_EFAULT;
7935     }
7936 
7937     fh = g_malloc0(total_size);
7938     fh->handle_bytes = size;
7939 
7940     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7941     unlock_user(name, pathname, 0);
7942 
7943     /* man name_to_handle_at(2):
7944      * Other than the use of the handle_bytes field, the caller should treat
7945      * the file_handle structure as an opaque data type
7946      */
7947 
7948     memcpy(target_fh, fh, total_size);
7949     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7950     target_fh->handle_type = tswap32(fh->handle_type);
7951     g_free(fh);
7952     unlock_user(target_fh, handle, total_size);
7953 
7954     if (put_user_s32(mid, mount_id)) {
7955         return -TARGET_EFAULT;
7956     }
7957 
7958     return ret;
7959 
7960 }
7961 #endif
7962 
7963 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7964 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7965                                      abi_long flags)
7966 {
7967     struct file_handle *target_fh;
7968     struct file_handle *fh;
7969     unsigned int size, total_size;
7970     abi_long ret;
7971 
7972     if (get_user_s32(size, handle)) {
7973         return -TARGET_EFAULT;
7974     }
7975 
7976     total_size = sizeof(struct file_handle) + size;
7977     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7978     if (!target_fh) {
7979         return -TARGET_EFAULT;
7980     }
7981 
7982     fh = g_memdup(target_fh, total_size);
7983     fh->handle_bytes = size;
7984     fh->handle_type = tswap32(target_fh->handle_type);
7985 
7986     ret = get_errno(open_by_handle_at(mount_fd, fh,
7987                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7988 
7989     g_free(fh);
7990 
7991     unlock_user(target_fh, handle, total_size);
7992 
7993     return ret;
7994 }
7995 #endif
7996 
7997 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7998 
7999 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
8000 {
8001     int host_flags;
8002     target_sigset_t *target_mask;
8003     sigset_t host_mask;
8004     abi_long ret;
8005 
8006     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
8007         return -TARGET_EINVAL;
8008     }
8009     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8010         return -TARGET_EFAULT;
8011     }
8012 
8013     target_to_host_sigset(&host_mask, target_mask);
8014 
8015     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8016 
8017     ret = get_errno(signalfd(fd, &host_mask, host_flags));
8018     if (ret >= 0) {
8019         fd_trans_register(ret, &target_signalfd_trans);
8020     }
8021 
8022     unlock_user_struct(target_mask, mask, 0);
8023 
8024     return ret;
8025 }
8026 #endif
8027 
8028 /* Map host to target signal numbers for the wait family of syscalls.
8029    Assume all other status bits are the same.  */
8030 int host_to_target_waitstatus(int status)
8031 {
8032     if (WIFSIGNALED(status)) {
8033         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8034     }
8035     if (WIFSTOPPED(status)) {
8036         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8037                | (status & 0xff);
8038     }
8039     return status;
8040 }
8041 
8042 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8043 {
8044     CPUState *cpu = env_cpu(cpu_env);
8045     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8046     int i;
8047 
8048     for (i = 0; i < bprm->argc; i++) {
8049         size_t len = strlen(bprm->argv[i]) + 1;
8050 
8051         if (write(fd, bprm->argv[i], len) != len) {
8052             return -1;
8053         }
8054     }
8055 
8056     return 0;
8057 }
8058 
8059 static void show_smaps(int fd, unsigned long size)
8060 {
8061     unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8062     unsigned long size_kb = size >> 10;
8063 
8064     dprintf(fd, "Size:                  %lu kB\n"
8065                 "KernelPageSize:        %lu kB\n"
8066                 "MMUPageSize:           %lu kB\n"
8067                 "Rss:                   0 kB\n"
8068                 "Pss:                   0 kB\n"
8069                 "Pss_Dirty:             0 kB\n"
8070                 "Shared_Clean:          0 kB\n"
8071                 "Shared_Dirty:          0 kB\n"
8072                 "Private_Clean:         0 kB\n"
8073                 "Private_Dirty:         0 kB\n"
8074                 "Referenced:            0 kB\n"
8075                 "Anonymous:             0 kB\n"
8076                 "LazyFree:              0 kB\n"
8077                 "AnonHugePages:         0 kB\n"
8078                 "ShmemPmdMapped:        0 kB\n"
8079                 "FilePmdMapped:         0 kB\n"
8080                 "Shared_Hugetlb:        0 kB\n"
8081                 "Private_Hugetlb:       0 kB\n"
8082                 "Swap:                  0 kB\n"
8083                 "SwapPss:               0 kB\n"
8084                 "Locked:                0 kB\n"
8085                 "THPeligible:    0\n", size_kb, page_size_kb, page_size_kb);
8086 }
8087 
8088 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8089 {
8090     CPUState *cpu = env_cpu(cpu_env);
8091     TaskState *ts = cpu->opaque;
8092     GSList *map_info = read_self_maps();
8093     GSList *s;
8094     int count;
8095 
8096     for (s = map_info; s; s = g_slist_next(s)) {
8097         MapInfo *e = (MapInfo *) s->data;
8098 
8099         if (h2g_valid(e->start)) {
8100             unsigned long min = e->start;
8101             unsigned long max = e->end;
8102             int flags = page_get_flags(h2g(min));
8103             const char *path;
8104 
8105             max = h2g_valid(max - 1) ?
8106                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8107 
8108             if (page_check_range(h2g(min), max - min, flags) == -1) {
8109                 continue;
8110             }
8111 
8112 #ifdef TARGET_HPPA
8113             if (h2g(max) == ts->info->stack_limit) {
8114 #else
8115             if (h2g(min) == ts->info->stack_limit) {
8116 #endif
8117                 path = "[stack]";
8118             } else {
8119                 path = e->path;
8120             }
8121 
8122             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8123                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8124                             h2g(min), h2g(max - 1) + 1,
8125                             (flags & PAGE_READ) ? 'r' : '-',
8126                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8127                             (flags & PAGE_EXEC) ? 'x' : '-',
8128                             e->is_priv ? 'p' : 's',
8129                             (uint64_t) e->offset, e->dev, e->inode);
8130             if (path) {
8131                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8132             } else {
8133                 dprintf(fd, "\n");
8134             }
8135             if (smaps) {
8136                 show_smaps(fd, max - min);
8137                 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8138                         (flags & PAGE_READ) ? " rd" : "",
8139                         (flags & PAGE_WRITE_ORG) ? " wr" : "",
8140                         (flags & PAGE_EXEC) ? " ex" : "",
8141                         e->is_priv ? "" : " sh",
8142                         (flags & PAGE_READ) ? " mr" : "",
8143                         (flags & PAGE_WRITE_ORG) ? " mw" : "",
8144                         (flags & PAGE_EXEC) ? " me" : "",
8145                         e->is_priv ? "" : " ms");
8146             }
8147         }
8148     }
8149 
8150     free_self_maps(map_info);
8151 
8152 #ifdef TARGET_VSYSCALL_PAGE
8153     /*
8154      * We only support execution from the vsyscall page.
8155      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8156      */
8157     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8158                     " --xp 00000000 00:00 0",
8159                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8160     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8161     if (smaps) {
8162         show_smaps(fd, TARGET_PAGE_SIZE);
8163         dprintf(fd, "VmFlags: ex\n");
8164     }
8165 #endif
8166 
8167     return 0;
8168 }
8169 
8170 static int open_self_maps(CPUArchState *cpu_env, int fd)
8171 {
8172     return open_self_maps_1(cpu_env, fd, false);
8173 }
8174 
8175 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8176 {
8177     return open_self_maps_1(cpu_env, fd, true);
8178 }
8179 
8180 static int open_self_stat(CPUArchState *cpu_env, int fd)
8181 {
8182     CPUState *cpu = env_cpu(cpu_env);
8183     TaskState *ts = cpu->opaque;
8184     g_autoptr(GString) buf = g_string_new(NULL);
8185     int i;
8186 
8187     for (i = 0; i < 44; i++) {
8188         if (i == 0) {
8189             /* pid */
8190             g_string_printf(buf, FMT_pid " ", getpid());
8191         } else if (i == 1) {
8192             /* app name */
8193             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8194             bin = bin ? bin + 1 : ts->bprm->argv[0];
8195             g_string_printf(buf, "(%.15s) ", bin);
8196         } else if (i == 2) {
8197             /* task state */
8198             g_string_assign(buf, "R "); /* we are running right now */
8199         } else if (i == 3) {
8200             /* ppid */
8201             g_string_printf(buf, FMT_pid " ", getppid());
8202         } else if (i == 21) {
8203             /* starttime */
8204             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8205         } else if (i == 27) {
8206             /* stack bottom */
8207             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8208         } else {
8209             /* for the rest, there is MasterCard */
8210             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8211         }
8212 
8213         if (write(fd, buf->str, buf->len) != buf->len) {
8214             return -1;
8215         }
8216     }
8217 
8218     return 0;
8219 }
8220 
8221 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8222 {
8223     CPUState *cpu = env_cpu(cpu_env);
8224     TaskState *ts = cpu->opaque;
8225     abi_ulong auxv = ts->info->saved_auxv;
8226     abi_ulong len = ts->info->auxv_len;
8227     char *ptr;
8228 
8229     /*
8230      * Auxiliary vector is stored in target process stack.
8231      * read in whole auxv vector and copy it to file
8232      */
8233     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8234     if (ptr != NULL) {
8235         while (len > 0) {
8236             ssize_t r;
8237             r = write(fd, ptr, len);
8238             if (r <= 0) {
8239                 break;
8240             }
8241             len -= r;
8242             ptr += r;
8243         }
8244         lseek(fd, 0, SEEK_SET);
8245         unlock_user(ptr, auxv, len);
8246     }
8247 
8248     return 0;
8249 }
8250 
8251 static int is_proc_myself(const char *filename, const char *entry)
8252 {
8253     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8254         filename += strlen("/proc/");
8255         if (!strncmp(filename, "self/", strlen("self/"))) {
8256             filename += strlen("self/");
8257         } else if (*filename >= '1' && *filename <= '9') {
8258             char myself[80];
8259             snprintf(myself, sizeof(myself), "%d/", getpid());
8260             if (!strncmp(filename, myself, strlen(myself))) {
8261                 filename += strlen(myself);
8262             } else {
8263                 return 0;
8264             }
8265         } else {
8266             return 0;
8267         }
8268         if (!strcmp(filename, entry)) {
8269             return 1;
8270         }
8271     }
8272     return 0;
8273 }
8274 
8275 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8276                       const char *fmt, int code)
8277 {
8278     if (logfile) {
8279         CPUState *cs = env_cpu(env);
8280 
8281         fprintf(logfile, fmt, code);
8282         fprintf(logfile, "Failing executable: %s\n", exec_path);
8283         cpu_dump_state(cs, logfile, 0);
8284         open_self_maps(env, fileno(logfile));
8285     }
8286 }
8287 
8288 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8289 {
8290     /* dump to console */
8291     excp_dump_file(stderr, env, fmt, code);
8292 
8293     /* dump to log file */
8294     if (qemu_log_separate()) {
8295         FILE *logfile = qemu_log_trylock();
8296 
8297         excp_dump_file(logfile, env, fmt, code);
8298         qemu_log_unlock(logfile);
8299     }
8300 }
8301 
8302 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8303     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8304     defined(TARGET_RISCV) || defined(TARGET_S390X)
8305 static int is_proc(const char *filename, const char *entry)
8306 {
8307     return strcmp(filename, entry) == 0;
8308 }
8309 #endif
8310 
8311 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8312 static int open_net_route(CPUArchState *cpu_env, int fd)
8313 {
8314     FILE *fp;
8315     char *line = NULL;
8316     size_t len = 0;
8317     ssize_t read;
8318 
8319     fp = fopen("/proc/net/route", "r");
8320     if (fp == NULL) {
8321         return -1;
8322     }
8323 
8324     /* read header */
8325 
8326     read = getline(&line, &len, fp);
8327     dprintf(fd, "%s", line);
8328 
8329     /* read routes */
8330 
8331     while ((read = getline(&line, &len, fp)) != -1) {
8332         char iface[16];
8333         uint32_t dest, gw, mask;
8334         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8335         int fields;
8336 
8337         fields = sscanf(line,
8338                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8339                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8340                         &mask, &mtu, &window, &irtt);
8341         if (fields != 11) {
8342             continue;
8343         }
8344         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8345                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8346                 metric, tswap32(mask), mtu, window, irtt);
8347     }
8348 
8349     free(line);
8350     fclose(fp);
8351 
8352     return 0;
8353 }
8354 #endif
8355 
8356 #if defined(TARGET_SPARC)
8357 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8358 {
8359     dprintf(fd, "type\t\t: sun4u\n");
8360     return 0;
8361 }
8362 #endif
8363 
8364 #if defined(TARGET_HPPA)
8365 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8366 {
8367     int i, num_cpus;
8368 
8369     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8370     for (i = 0; i < num_cpus; i++) {
8371         dprintf(fd, "processor\t: %d\n", i);
8372         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8373         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8374         dprintf(fd, "capabilities\t: os32\n");
8375         dprintf(fd, "model\t\t: 9000/778/B160L - "
8376                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8377     }
8378     return 0;
8379 }
8380 #endif
8381 
8382 #if defined(TARGET_RISCV)
8383 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8384 {
8385     int i;
8386     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8387     RISCVCPU *cpu = env_archcpu(cpu_env);
8388     const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8389     char *isa_string = riscv_isa_string(cpu);
8390     const char *mmu;
8391 
8392     if (cfg->mmu) {
8393         mmu = (cpu_env->xl == MXL_RV32) ? "sv32"  : "sv48";
8394     } else {
8395         mmu = "none";
8396     }
8397 
8398     for (i = 0; i < num_cpus; i++) {
8399         dprintf(fd, "processor\t: %d\n", i);
8400         dprintf(fd, "hart\t\t: %d\n", i);
8401         dprintf(fd, "isa\t\t: %s\n", isa_string);
8402         dprintf(fd, "mmu\t\t: %s\n", mmu);
8403         dprintf(fd, "uarch\t\t: qemu\n\n");
8404     }
8405 
8406     g_free(isa_string);
8407     return 0;
8408 }
8409 #endif
8410 
8411 #if defined(TARGET_S390X)
8412 /*
8413  * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8414  * show in /proc/cpuinfo.
8415  *
8416  * Skip the following in order to match the missing support in op_ecag():
8417  * - show_cacheinfo().
8418  * - show_cpu_topology().
8419  * - show_cpu_mhz().
8420  *
8421  * Use fixed values for certain fields:
8422  * - bogomips per cpu - from a qemu-system-s390x run.
8423  * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8424  *
8425  * Keep the code structure close to arch/s390/kernel/processor.c.
8426  */
8427 
8428 static void show_facilities(int fd)
8429 {
8430     size_t sizeof_stfl_bytes = 2048;
8431     g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8432     unsigned int bit;
8433 
8434     dprintf(fd, "facilities      :");
8435     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8436     for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8437         if (test_be_bit(bit, stfl_bytes)) {
8438             dprintf(fd, " %d", bit);
8439         }
8440     }
8441     dprintf(fd, "\n");
8442 }
8443 
8444 static int cpu_ident(unsigned long n)
8445 {
8446     return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8447                      n);
8448 }
8449 
8450 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8451 {
8452     S390CPUModel *model = env_archcpu(cpu_env)->model;
8453     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8454     uint32_t elf_hwcap = get_elf_hwcap();
8455     const char *hwcap_str;
8456     int i;
8457 
8458     dprintf(fd, "vendor_id       : IBM/S390\n"
8459                 "# processors    : %i\n"
8460                 "bogomips per cpu: 13370.00\n",
8461             num_cpus);
8462     dprintf(fd, "max thread id   : 0\n");
8463     dprintf(fd, "features\t: ");
8464     for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8465         if (!(elf_hwcap & (1 << i))) {
8466             continue;
8467         }
8468         hwcap_str = elf_hwcap_str(i);
8469         if (hwcap_str) {
8470             dprintf(fd, "%s ", hwcap_str);
8471         }
8472     }
8473     dprintf(fd, "\n");
8474     show_facilities(fd);
8475     for (i = 0; i < num_cpus; i++) {
8476         dprintf(fd, "processor %d: "
8477                "version = %02X,  "
8478                "identification = %06X,  "
8479                "machine = %04X\n",
8480                i, model->cpu_ver, cpu_ident(i), model->def->type);
8481     }
8482 }
8483 
8484 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8485 {
8486     S390CPUModel *model = env_archcpu(cpu_env)->model;
8487 
8488     dprintf(fd, "version         : %02X\n", model->cpu_ver);
8489     dprintf(fd, "identification  : %06X\n", cpu_ident(n));
8490     dprintf(fd, "machine         : %04X\n", model->def->type);
8491 }
8492 
8493 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8494 {
8495     dprintf(fd, "\ncpu number      : %ld\n", n);
8496     show_cpu_ids(cpu_env, fd, n);
8497 }
8498 
8499 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8500 {
8501     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8502     int i;
8503 
8504     show_cpu_summary(cpu_env, fd);
8505     for (i = 0; i < num_cpus; i++) {
8506         show_cpuinfo(cpu_env, fd, i);
8507     }
8508     return 0;
8509 }
8510 #endif
8511 
8512 #if defined(TARGET_M68K)
8513 static int open_hardware(CPUArchState *cpu_env, int fd)
8514 {
8515     dprintf(fd, "Model:\t\tqemu-m68k\n");
8516     return 0;
8517 }
8518 #endif
8519 
8520 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8521                     int flags, mode_t mode, bool safe)
8522 {
8523     struct fake_open {
8524         const char *filename;
8525         int (*fill)(CPUArchState *cpu_env, int fd);
8526         int (*cmp)(const char *s1, const char *s2);
8527     };
8528     const struct fake_open *fake_open;
8529     static const struct fake_open fakes[] = {
8530         { "maps", open_self_maps, is_proc_myself },
8531         { "smaps", open_self_smaps, is_proc_myself },
8532         { "stat", open_self_stat, is_proc_myself },
8533         { "auxv", open_self_auxv, is_proc_myself },
8534         { "cmdline", open_self_cmdline, is_proc_myself },
8535 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8536         { "/proc/net/route", open_net_route, is_proc },
8537 #endif
8538 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8539     defined(TARGET_RISCV) || defined(TARGET_S390X)
8540         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8541 #endif
8542 #if defined(TARGET_M68K)
8543         { "/proc/hardware", open_hardware, is_proc },
8544 #endif
8545         { NULL, NULL, NULL }
8546     };
8547 
8548     if (is_proc_myself(pathname, "exe")) {
8549         if (safe) {
8550             return safe_openat(dirfd, exec_path, flags, mode);
8551         } else {
8552             return openat(dirfd, exec_path, flags, mode);
8553         }
8554     }
8555 
8556     for (fake_open = fakes; fake_open->filename; fake_open++) {
8557         if (fake_open->cmp(pathname, fake_open->filename)) {
8558             break;
8559         }
8560     }
8561 
8562     if (fake_open->filename) {
8563         const char *tmpdir;
8564         char filename[PATH_MAX];
8565         int fd, r;
8566 
8567         fd = memfd_create("qemu-open", 0);
8568         if (fd < 0) {
8569             if (errno != ENOSYS) {
8570                 return fd;
8571             }
8572             /* create temporary file to map stat to */
8573             tmpdir = getenv("TMPDIR");
8574             if (!tmpdir)
8575                 tmpdir = "/tmp";
8576             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8577             fd = mkstemp(filename);
8578             if (fd < 0) {
8579                 return fd;
8580             }
8581             unlink(filename);
8582         }
8583 
8584         if ((r = fake_open->fill(cpu_env, fd))) {
8585             int e = errno;
8586             close(fd);
8587             errno = e;
8588             return r;
8589         }
8590         lseek(fd, 0, SEEK_SET);
8591 
8592         return fd;
8593     }
8594 
8595     if (safe) {
8596         return safe_openat(dirfd, path(pathname), flags, mode);
8597     } else {
8598         return openat(dirfd, path(pathname), flags, mode);
8599     }
8600 }
8601 
8602 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8603 {
8604     ssize_t ret;
8605 
8606     if (!pathname || !buf) {
8607         errno = EFAULT;
8608         return -1;
8609     }
8610 
8611     if (!bufsiz) {
8612         /* Short circuit this for the magic exe check. */
8613         errno = EINVAL;
8614         return -1;
8615     }
8616 
8617     if (is_proc_myself((const char *)pathname, "exe")) {
8618         /*
8619          * Don't worry about sign mismatch as earlier mapping
8620          * logic would have thrown a bad address error.
8621          */
8622         ret = MIN(strlen(exec_path), bufsiz);
8623         /* We cannot NUL terminate the string. */
8624         memcpy(buf, exec_path, ret);
8625     } else {
8626         ret = readlink(path(pathname), buf, bufsiz);
8627     }
8628 
8629     return ret;
8630 }
8631 
8632 static int do_execveat(CPUArchState *cpu_env, int dirfd,
8633                        abi_long pathname, abi_long guest_argp,
8634                        abi_long guest_envp, int flags)
8635 {
8636     int ret;
8637     char **argp, **envp;
8638     int argc, envc;
8639     abi_ulong gp;
8640     abi_ulong addr;
8641     char **q;
8642     void *p;
8643 
8644     argc = 0;
8645 
8646     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8647         if (get_user_ual(addr, gp)) {
8648             return -TARGET_EFAULT;
8649         }
8650         if (!addr) {
8651             break;
8652         }
8653         argc++;
8654     }
8655     envc = 0;
8656     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8657         if (get_user_ual(addr, gp)) {
8658             return -TARGET_EFAULT;
8659         }
8660         if (!addr) {
8661             break;
8662         }
8663         envc++;
8664     }
8665 
8666     argp = g_new0(char *, argc + 1);
8667     envp = g_new0(char *, envc + 1);
8668 
8669     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8670         if (get_user_ual(addr, gp)) {
8671             goto execve_efault;
8672         }
8673         if (!addr) {
8674             break;
8675         }
8676         *q = lock_user_string(addr);
8677         if (!*q) {
8678             goto execve_efault;
8679         }
8680     }
8681     *q = NULL;
8682 
8683     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8684         if (get_user_ual(addr, gp)) {
8685             goto execve_efault;
8686         }
8687         if (!addr) {
8688             break;
8689         }
8690         *q = lock_user_string(addr);
8691         if (!*q) {
8692             goto execve_efault;
8693         }
8694     }
8695     *q = NULL;
8696 
8697     /*
8698      * Although execve() is not an interruptible syscall it is
8699      * a special case where we must use the safe_syscall wrapper:
8700      * if we allow a signal to happen before we make the host
8701      * syscall then we will 'lose' it, because at the point of
8702      * execve the process leaves QEMU's control. So we use the
8703      * safe syscall wrapper to ensure that we either take the
8704      * signal as a guest signal, or else it does not happen
8705      * before the execve completes and makes it the other
8706      * program's problem.
8707      */
8708     p = lock_user_string(pathname);
8709     if (!p) {
8710         goto execve_efault;
8711     }
8712 
8713     if (is_proc_myself(p, "exe")) {
8714         ret = get_errno(safe_execveat(dirfd, exec_path, argp, envp, flags));
8715     } else {
8716         ret = get_errno(safe_execveat(dirfd, p, argp, envp, flags));
8717     }
8718 
8719     unlock_user(p, pathname, 0);
8720 
8721     goto execve_end;
8722 
8723 execve_efault:
8724     ret = -TARGET_EFAULT;
8725 
8726 execve_end:
8727     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8728         if (get_user_ual(addr, gp) || !addr) {
8729             break;
8730         }
8731         unlock_user(*q, addr, 0);
8732     }
8733     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8734         if (get_user_ual(addr, gp) || !addr) {
8735             break;
8736         }
8737         unlock_user(*q, addr, 0);
8738     }
8739 
8740     g_free(argp);
8741     g_free(envp);
8742     return ret;
8743 }
8744 
8745 #define TIMER_MAGIC 0x0caf0000
8746 #define TIMER_MAGIC_MASK 0xffff0000
8747 
8748 /* Convert QEMU provided timer ID back to internal 16bit index format */
8749 static target_timer_t get_timer_id(abi_long arg)
8750 {
8751     target_timer_t timerid = arg;
8752 
8753     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8754         return -TARGET_EINVAL;
8755     }
8756 
8757     timerid &= 0xffff;
8758 
8759     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8760         return -TARGET_EINVAL;
8761     }
8762 
8763     return timerid;
8764 }
8765 
8766 static int target_to_host_cpu_mask(unsigned long *host_mask,
8767                                    size_t host_size,
8768                                    abi_ulong target_addr,
8769                                    size_t target_size)
8770 {
8771     unsigned target_bits = sizeof(abi_ulong) * 8;
8772     unsigned host_bits = sizeof(*host_mask) * 8;
8773     abi_ulong *target_mask;
8774     unsigned i, j;
8775 
8776     assert(host_size >= target_size);
8777 
8778     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8779     if (!target_mask) {
8780         return -TARGET_EFAULT;
8781     }
8782     memset(host_mask, 0, host_size);
8783 
8784     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8785         unsigned bit = i * target_bits;
8786         abi_ulong val;
8787 
8788         __get_user(val, &target_mask[i]);
8789         for (j = 0; j < target_bits; j++, bit++) {
8790             if (val & (1UL << j)) {
8791                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8792             }
8793         }
8794     }
8795 
8796     unlock_user(target_mask, target_addr, 0);
8797     return 0;
8798 }
8799 
8800 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8801                                    size_t host_size,
8802                                    abi_ulong target_addr,
8803                                    size_t target_size)
8804 {
8805     unsigned target_bits = sizeof(abi_ulong) * 8;
8806     unsigned host_bits = sizeof(*host_mask) * 8;
8807     abi_ulong *target_mask;
8808     unsigned i, j;
8809 
8810     assert(host_size >= target_size);
8811 
8812     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8813     if (!target_mask) {
8814         return -TARGET_EFAULT;
8815     }
8816 
8817     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8818         unsigned bit = i * target_bits;
8819         abi_ulong val = 0;
8820 
8821         for (j = 0; j < target_bits; j++, bit++) {
8822             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8823                 val |= 1UL << j;
8824             }
8825         }
8826         __put_user(val, &target_mask[i]);
8827     }
8828 
8829     unlock_user(target_mask, target_addr, target_size);
8830     return 0;
8831 }
8832 
8833 #ifdef TARGET_NR_getdents
8834 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8835 {
8836     g_autofree void *hdirp = NULL;
8837     void *tdirp;
8838     int hlen, hoff, toff;
8839     int hreclen, treclen;
8840     off64_t prev_diroff = 0;
8841 
8842     hdirp = g_try_malloc(count);
8843     if (!hdirp) {
8844         return -TARGET_ENOMEM;
8845     }
8846 
8847 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8848     hlen = sys_getdents(dirfd, hdirp, count);
8849 #else
8850     hlen = sys_getdents64(dirfd, hdirp, count);
8851 #endif
8852 
8853     hlen = get_errno(hlen);
8854     if (is_error(hlen)) {
8855         return hlen;
8856     }
8857 
8858     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8859     if (!tdirp) {
8860         return -TARGET_EFAULT;
8861     }
8862 
8863     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8864 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8865         struct linux_dirent *hde = hdirp + hoff;
8866 #else
8867         struct linux_dirent64 *hde = hdirp + hoff;
8868 #endif
8869         struct target_dirent *tde = tdirp + toff;
8870         int namelen;
8871         uint8_t type;
8872 
8873         namelen = strlen(hde->d_name);
8874         hreclen = hde->d_reclen;
8875         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8876         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8877 
8878         if (toff + treclen > count) {
8879             /*
8880              * If the host struct is smaller than the target struct, or
8881              * requires less alignment and thus packs into less space,
8882              * then the host can return more entries than we can pass
8883              * on to the guest.
8884              */
8885             if (toff == 0) {
8886                 toff = -TARGET_EINVAL; /* result buffer is too small */
8887                 break;
8888             }
8889             /*
8890              * Return what we have, resetting the file pointer to the
8891              * location of the first record not returned.
8892              */
8893             lseek64(dirfd, prev_diroff, SEEK_SET);
8894             break;
8895         }
8896 
8897         prev_diroff = hde->d_off;
8898         tde->d_ino = tswapal(hde->d_ino);
8899         tde->d_off = tswapal(hde->d_off);
8900         tde->d_reclen = tswap16(treclen);
8901         memcpy(tde->d_name, hde->d_name, namelen + 1);
8902 
8903         /*
8904          * The getdents type is in what was formerly a padding byte at the
8905          * end of the structure.
8906          */
8907 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8908         type = *((uint8_t *)hde + hreclen - 1);
8909 #else
8910         type = hde->d_type;
8911 #endif
8912         *((uint8_t *)tde + treclen - 1) = type;
8913     }
8914 
8915     unlock_user(tdirp, arg2, toff);
8916     return toff;
8917 }
8918 #endif /* TARGET_NR_getdents */
8919 
8920 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8921 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8922 {
8923     g_autofree void *hdirp = NULL;
8924     void *tdirp;
8925     int hlen, hoff, toff;
8926     int hreclen, treclen;
8927     off64_t prev_diroff = 0;
8928 
8929     hdirp = g_try_malloc(count);
8930     if (!hdirp) {
8931         return -TARGET_ENOMEM;
8932     }
8933 
8934     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8935     if (is_error(hlen)) {
8936         return hlen;
8937     }
8938 
8939     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8940     if (!tdirp) {
8941         return -TARGET_EFAULT;
8942     }
8943 
8944     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8945         struct linux_dirent64 *hde = hdirp + hoff;
8946         struct target_dirent64 *tde = tdirp + toff;
8947         int namelen;
8948 
8949         namelen = strlen(hde->d_name) + 1;
8950         hreclen = hde->d_reclen;
8951         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8952         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8953 
8954         if (toff + treclen > count) {
8955             /*
8956              * If the host struct is smaller than the target struct, or
8957              * requires less alignment and thus packs into less space,
8958              * then the host can return more entries than we can pass
8959              * on to the guest.
8960              */
8961             if (toff == 0) {
8962                 toff = -TARGET_EINVAL; /* result buffer is too small */
8963                 break;
8964             }
8965             /*
8966              * Return what we have, resetting the file pointer to the
8967              * location of the first record not returned.
8968              */
8969             lseek64(dirfd, prev_diroff, SEEK_SET);
8970             break;
8971         }
8972 
8973         prev_diroff = hde->d_off;
8974         tde->d_ino = tswap64(hde->d_ino);
8975         tde->d_off = tswap64(hde->d_off);
8976         tde->d_reclen = tswap16(treclen);
8977         tde->d_type = hde->d_type;
8978         memcpy(tde->d_name, hde->d_name, namelen);
8979     }
8980 
8981     unlock_user(tdirp, arg2, toff);
8982     return toff;
8983 }
8984 #endif /* TARGET_NR_getdents64 */
8985 
8986 #if defined(TARGET_NR_riscv_hwprobe)
8987 
8988 #define RISCV_HWPROBE_KEY_MVENDORID     0
8989 #define RISCV_HWPROBE_KEY_MARCHID       1
8990 #define RISCV_HWPROBE_KEY_MIMPID        2
8991 
8992 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8993 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8994 
8995 #define RISCV_HWPROBE_KEY_IMA_EXT_0     4
8996 #define     RISCV_HWPROBE_IMA_FD       (1 << 0)
8997 #define     RISCV_HWPROBE_IMA_C        (1 << 1)
8998 
8999 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
9000 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
9001 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
9002 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
9003 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
9004 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9005 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9006 
9007 struct riscv_hwprobe {
9008     abi_llong  key;
9009     abi_ullong value;
9010 };
9011 
9012 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9013                                     struct riscv_hwprobe *pair,
9014                                     size_t pair_count)
9015 {
9016     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9017 
9018     for (; pair_count > 0; pair_count--, pair++) {
9019         abi_llong key;
9020         abi_ullong value;
9021         __put_user(0, &pair->value);
9022         __get_user(key, &pair->key);
9023         switch (key) {
9024         case RISCV_HWPROBE_KEY_MVENDORID:
9025             __put_user(cfg->mvendorid, &pair->value);
9026             break;
9027         case RISCV_HWPROBE_KEY_MARCHID:
9028             __put_user(cfg->marchid, &pair->value);
9029             break;
9030         case RISCV_HWPROBE_KEY_MIMPID:
9031             __put_user(cfg->mimpid, &pair->value);
9032             break;
9033         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9034             value = riscv_has_ext(env, RVI) &&
9035                     riscv_has_ext(env, RVM) &&
9036                     riscv_has_ext(env, RVA) ?
9037                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9038             __put_user(value, &pair->value);
9039             break;
9040         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9041             value = riscv_has_ext(env, RVF) &&
9042                     riscv_has_ext(env, RVD) ?
9043                     RISCV_HWPROBE_IMA_FD : 0;
9044             value |= riscv_has_ext(env, RVC) ?
9045                      RISCV_HWPROBE_IMA_C : pair->value;
9046             __put_user(value, &pair->value);
9047             break;
9048         case RISCV_HWPROBE_KEY_CPUPERF_0:
9049             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9050             break;
9051         default:
9052             __put_user(-1, &pair->key);
9053             break;
9054         }
9055     }
9056 }
9057 
9058 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9059 {
9060     int ret, i, tmp;
9061     size_t host_mask_size, target_mask_size;
9062     unsigned long *host_mask;
9063 
9064     /*
9065      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9066      * arg3 contains the cpu count.
9067      */
9068     tmp = (8 * sizeof(abi_ulong));
9069     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9070     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9071                      ~(sizeof(*host_mask) - 1);
9072 
9073     host_mask = alloca(host_mask_size);
9074 
9075     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9076                                   arg4, target_mask_size);
9077     if (ret != 0) {
9078         return ret;
9079     }
9080 
9081     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9082         if (host_mask[i] != 0) {
9083             return 0;
9084         }
9085     }
9086     return -TARGET_EINVAL;
9087 }
9088 
9089 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9090                                  abi_long arg2, abi_long arg3,
9091                                  abi_long arg4, abi_long arg5)
9092 {
9093     int ret;
9094     struct riscv_hwprobe *host_pairs;
9095 
9096     /* flags must be 0 */
9097     if (arg5 != 0) {
9098         return -TARGET_EINVAL;
9099     }
9100 
9101     /* check cpu_set */
9102     if (arg3 != 0) {
9103         ret = cpu_set_valid(arg3, arg4);
9104         if (ret != 0) {
9105             return ret;
9106         }
9107     } else if (arg4 != 0) {
9108         return -TARGET_EINVAL;
9109     }
9110 
9111     /* no pairs */
9112     if (arg2 == 0) {
9113         return 0;
9114     }
9115 
9116     host_pairs = lock_user(VERIFY_WRITE, arg1,
9117                            sizeof(*host_pairs) * (size_t)arg2, 0);
9118     if (host_pairs == NULL) {
9119         return -TARGET_EFAULT;
9120     }
9121     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9122     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9123     return 0;
9124 }
9125 #endif /* TARGET_NR_riscv_hwprobe */
9126 
9127 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9128 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9129 #endif
9130 
9131 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9132 #define __NR_sys_open_tree __NR_open_tree
9133 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9134           unsigned int, __flags)
9135 #endif
9136 
9137 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9138 #define __NR_sys_move_mount __NR_move_mount
9139 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9140            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9141 #endif
9142 
9143 /* This is an internal helper for do_syscall so that it is easier
9144  * to have a single return point, so that actions, such as logging
9145  * of syscall results, can be performed.
9146  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9147  */
9148 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9149                             abi_long arg2, abi_long arg3, abi_long arg4,
9150                             abi_long arg5, abi_long arg6, abi_long arg7,
9151                             abi_long arg8)
9152 {
9153     CPUState *cpu = env_cpu(cpu_env);
9154     abi_long ret;
9155 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9156     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9157     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9158     || defined(TARGET_NR_statx)
9159     struct stat st;
9160 #endif
9161 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9162     || defined(TARGET_NR_fstatfs)
9163     struct statfs stfs;
9164 #endif
9165     void *p;
9166 
9167     switch(num) {
9168     case TARGET_NR_exit:
9169         /* In old applications this may be used to implement _exit(2).
9170            However in threaded applications it is used for thread termination,
9171            and _exit_group is used for application termination.
9172            Do thread termination if we have more then one thread.  */
9173 
9174         if (block_signals()) {
9175             return -QEMU_ERESTARTSYS;
9176         }
9177 
9178         pthread_mutex_lock(&clone_lock);
9179 
9180         if (CPU_NEXT(first_cpu)) {
9181             TaskState *ts = cpu->opaque;
9182 
9183             if (ts->child_tidptr) {
9184                 put_user_u32(0, ts->child_tidptr);
9185                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9186                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9187             }
9188 
9189             object_unparent(OBJECT(cpu));
9190             object_unref(OBJECT(cpu));
9191             /*
9192              * At this point the CPU should be unrealized and removed
9193              * from cpu lists. We can clean-up the rest of the thread
9194              * data without the lock held.
9195              */
9196 
9197             pthread_mutex_unlock(&clone_lock);
9198 
9199             thread_cpu = NULL;
9200             g_free(ts);
9201             rcu_unregister_thread();
9202             pthread_exit(NULL);
9203         }
9204 
9205         pthread_mutex_unlock(&clone_lock);
9206         preexit_cleanup(cpu_env, arg1);
9207         _exit(arg1);
9208         return 0; /* avoid warning */
9209     case TARGET_NR_read:
9210         if (arg2 == 0 && arg3 == 0) {
9211             return get_errno(safe_read(arg1, 0, 0));
9212         } else {
9213             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9214                 return -TARGET_EFAULT;
9215             ret = get_errno(safe_read(arg1, p, arg3));
9216             if (ret >= 0 &&
9217                 fd_trans_host_to_target_data(arg1)) {
9218                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9219             }
9220             unlock_user(p, arg2, ret);
9221         }
9222         return ret;
9223     case TARGET_NR_write:
9224         if (arg2 == 0 && arg3 == 0) {
9225             return get_errno(safe_write(arg1, 0, 0));
9226         }
9227         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9228             return -TARGET_EFAULT;
9229         if (fd_trans_target_to_host_data(arg1)) {
9230             void *copy = g_malloc(arg3);
9231             memcpy(copy, p, arg3);
9232             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9233             if (ret >= 0) {
9234                 ret = get_errno(safe_write(arg1, copy, ret));
9235             }
9236             g_free(copy);
9237         } else {
9238             ret = get_errno(safe_write(arg1, p, arg3));
9239         }
9240         unlock_user(p, arg2, 0);
9241         return ret;
9242 
9243 #ifdef TARGET_NR_open
9244     case TARGET_NR_open:
9245         if (!(p = lock_user_string(arg1)))
9246             return -TARGET_EFAULT;
9247         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9248                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9249                                   arg3, true));
9250         fd_trans_unregister(ret);
9251         unlock_user(p, arg1, 0);
9252         return ret;
9253 #endif
9254     case TARGET_NR_openat:
9255         if (!(p = lock_user_string(arg2)))
9256             return -TARGET_EFAULT;
9257         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9258                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9259                                   arg4, true));
9260         fd_trans_unregister(ret);
9261         unlock_user(p, arg2, 0);
9262         return ret;
9263 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9264     case TARGET_NR_name_to_handle_at:
9265         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9266         return ret;
9267 #endif
9268 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9269     case TARGET_NR_open_by_handle_at:
9270         ret = do_open_by_handle_at(arg1, arg2, arg3);
9271         fd_trans_unregister(ret);
9272         return ret;
9273 #endif
9274 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9275     case TARGET_NR_pidfd_open:
9276         return get_errno(pidfd_open(arg1, arg2));
9277 #endif
9278 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9279     case TARGET_NR_pidfd_send_signal:
9280         {
9281             siginfo_t uinfo, *puinfo;
9282 
9283             if (arg3) {
9284                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9285                 if (!p) {
9286                     return -TARGET_EFAULT;
9287                  }
9288                  target_to_host_siginfo(&uinfo, p);
9289                  unlock_user(p, arg3, 0);
9290                  puinfo = &uinfo;
9291             } else {
9292                  puinfo = NULL;
9293             }
9294             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9295                                               puinfo, arg4));
9296         }
9297         return ret;
9298 #endif
9299 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9300     case TARGET_NR_pidfd_getfd:
9301         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9302 #endif
9303     case TARGET_NR_close:
9304         fd_trans_unregister(arg1);
9305         return get_errno(close(arg1));
9306 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9307     case TARGET_NR_close_range:
9308         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9309         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9310             abi_long fd, maxfd;
9311             maxfd = MIN(arg2, target_fd_max);
9312             for (fd = arg1; fd < maxfd; fd++) {
9313                 fd_trans_unregister(fd);
9314             }
9315         }
9316         return ret;
9317 #endif
9318 
9319     case TARGET_NR_brk:
9320         return do_brk(arg1);
9321 #ifdef TARGET_NR_fork
9322     case TARGET_NR_fork:
9323         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9324 #endif
9325 #ifdef TARGET_NR_waitpid
9326     case TARGET_NR_waitpid:
9327         {
9328             int status;
9329             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9330             if (!is_error(ret) && arg2 && ret
9331                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9332                 return -TARGET_EFAULT;
9333         }
9334         return ret;
9335 #endif
9336 #ifdef TARGET_NR_waitid
9337     case TARGET_NR_waitid:
9338         {
9339             siginfo_t info;
9340             info.si_pid = 0;
9341             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9342             if (!is_error(ret) && arg3 && info.si_pid != 0) {
9343                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9344                     return -TARGET_EFAULT;
9345                 host_to_target_siginfo(p, &info);
9346                 unlock_user(p, arg3, sizeof(target_siginfo_t));
9347             }
9348         }
9349         return ret;
9350 #endif
9351 #ifdef TARGET_NR_creat /* not on alpha */
9352     case TARGET_NR_creat:
9353         if (!(p = lock_user_string(arg1)))
9354             return -TARGET_EFAULT;
9355         ret = get_errno(creat(p, arg2));
9356         fd_trans_unregister(ret);
9357         unlock_user(p, arg1, 0);
9358         return ret;
9359 #endif
9360 #ifdef TARGET_NR_link
9361     case TARGET_NR_link:
9362         {
9363             void * p2;
9364             p = lock_user_string(arg1);
9365             p2 = lock_user_string(arg2);
9366             if (!p || !p2)
9367                 ret = -TARGET_EFAULT;
9368             else
9369                 ret = get_errno(link(p, p2));
9370             unlock_user(p2, arg2, 0);
9371             unlock_user(p, arg1, 0);
9372         }
9373         return ret;
9374 #endif
9375 #if defined(TARGET_NR_linkat)
9376     case TARGET_NR_linkat:
9377         {
9378             void * p2 = NULL;
9379             if (!arg2 || !arg4)
9380                 return -TARGET_EFAULT;
9381             p  = lock_user_string(arg2);
9382             p2 = lock_user_string(arg4);
9383             if (!p || !p2)
9384                 ret = -TARGET_EFAULT;
9385             else
9386                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9387             unlock_user(p, arg2, 0);
9388             unlock_user(p2, arg4, 0);
9389         }
9390         return ret;
9391 #endif
9392 #ifdef TARGET_NR_unlink
9393     case TARGET_NR_unlink:
9394         if (!(p = lock_user_string(arg1)))
9395             return -TARGET_EFAULT;
9396         ret = get_errno(unlink(p));
9397         unlock_user(p, arg1, 0);
9398         return ret;
9399 #endif
9400 #if defined(TARGET_NR_unlinkat)
9401     case TARGET_NR_unlinkat:
9402         if (!(p = lock_user_string(arg2)))
9403             return -TARGET_EFAULT;
9404         ret = get_errno(unlinkat(arg1, p, arg3));
9405         unlock_user(p, arg2, 0);
9406         return ret;
9407 #endif
9408     case TARGET_NR_execveat:
9409         return do_execveat(cpu_env, arg1, arg2, arg3, arg4, arg5);
9410     case TARGET_NR_execve:
9411         return do_execveat(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0);
9412     case TARGET_NR_chdir:
9413         if (!(p = lock_user_string(arg1)))
9414             return -TARGET_EFAULT;
9415         ret = get_errno(chdir(p));
9416         unlock_user(p, arg1, 0);
9417         return ret;
9418 #ifdef TARGET_NR_time
9419     case TARGET_NR_time:
9420         {
9421             time_t host_time;
9422             ret = get_errno(time(&host_time));
9423             if (!is_error(ret)
9424                 && arg1
9425                 && put_user_sal(host_time, arg1))
9426                 return -TARGET_EFAULT;
9427         }
9428         return ret;
9429 #endif
9430 #ifdef TARGET_NR_mknod
9431     case TARGET_NR_mknod:
9432         if (!(p = lock_user_string(arg1)))
9433             return -TARGET_EFAULT;
9434         ret = get_errno(mknod(p, arg2, arg3));
9435         unlock_user(p, arg1, 0);
9436         return ret;
9437 #endif
9438 #if defined(TARGET_NR_mknodat)
9439     case TARGET_NR_mknodat:
9440         if (!(p = lock_user_string(arg2)))
9441             return -TARGET_EFAULT;
9442         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9443         unlock_user(p, arg2, 0);
9444         return ret;
9445 #endif
9446 #ifdef TARGET_NR_chmod
9447     case TARGET_NR_chmod:
9448         if (!(p = lock_user_string(arg1)))
9449             return -TARGET_EFAULT;
9450         ret = get_errno(chmod(p, arg2));
9451         unlock_user(p, arg1, 0);
9452         return ret;
9453 #endif
9454 #ifdef TARGET_NR_lseek
9455     case TARGET_NR_lseek:
9456         return get_errno(lseek(arg1, arg2, arg3));
9457 #endif
9458 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9459     /* Alpha specific */
9460     case TARGET_NR_getxpid:
9461         cpu_env->ir[IR_A4] = getppid();
9462         return get_errno(getpid());
9463 #endif
9464 #ifdef TARGET_NR_getpid
9465     case TARGET_NR_getpid:
9466         return get_errno(getpid());
9467 #endif
9468     case TARGET_NR_mount:
9469         {
9470             /* need to look at the data field */
9471             void *p2, *p3;
9472 
9473             if (arg1) {
9474                 p = lock_user_string(arg1);
9475                 if (!p) {
9476                     return -TARGET_EFAULT;
9477                 }
9478             } else {
9479                 p = NULL;
9480             }
9481 
9482             p2 = lock_user_string(arg2);
9483             if (!p2) {
9484                 if (arg1) {
9485                     unlock_user(p, arg1, 0);
9486                 }
9487                 return -TARGET_EFAULT;
9488             }
9489 
9490             if (arg3) {
9491                 p3 = lock_user_string(arg3);
9492                 if (!p3) {
9493                     if (arg1) {
9494                         unlock_user(p, arg1, 0);
9495                     }
9496                     unlock_user(p2, arg2, 0);
9497                     return -TARGET_EFAULT;
9498                 }
9499             } else {
9500                 p3 = NULL;
9501             }
9502 
9503             /* FIXME - arg5 should be locked, but it isn't clear how to
9504              * do that since it's not guaranteed to be a NULL-terminated
9505              * string.
9506              */
9507             if (!arg5) {
9508                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9509             } else {
9510                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9511             }
9512             ret = get_errno(ret);
9513 
9514             if (arg1) {
9515                 unlock_user(p, arg1, 0);
9516             }
9517             unlock_user(p2, arg2, 0);
9518             if (arg3) {
9519                 unlock_user(p3, arg3, 0);
9520             }
9521         }
9522         return ret;
9523 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9524 #if defined(TARGET_NR_umount)
9525     case TARGET_NR_umount:
9526 #endif
9527 #if defined(TARGET_NR_oldumount)
9528     case TARGET_NR_oldumount:
9529 #endif
9530         if (!(p = lock_user_string(arg1)))
9531             return -TARGET_EFAULT;
9532         ret = get_errno(umount(p));
9533         unlock_user(p, arg1, 0);
9534         return ret;
9535 #endif
9536 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9537     case TARGET_NR_move_mount:
9538         {
9539             void *p2, *p4;
9540 
9541             if (!arg2 || !arg4) {
9542                 return -TARGET_EFAULT;
9543             }
9544 
9545             p2 = lock_user_string(arg2);
9546             if (!p2) {
9547                 return -TARGET_EFAULT;
9548             }
9549 
9550             p4 = lock_user_string(arg4);
9551             if (!p4) {
9552                 unlock_user(p2, arg2, 0);
9553                 return -TARGET_EFAULT;
9554             }
9555             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9556 
9557             unlock_user(p2, arg2, 0);
9558             unlock_user(p4, arg4, 0);
9559 
9560             return ret;
9561         }
9562 #endif
9563 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9564     case TARGET_NR_open_tree:
9565         {
9566             void *p2;
9567             int host_flags;
9568 
9569             if (!arg2) {
9570                 return -TARGET_EFAULT;
9571             }
9572 
9573             p2 = lock_user_string(arg2);
9574             if (!p2) {
9575                 return -TARGET_EFAULT;
9576             }
9577 
9578             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9579             if (arg3 & TARGET_O_CLOEXEC) {
9580                 host_flags |= O_CLOEXEC;
9581             }
9582 
9583             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9584 
9585             unlock_user(p2, arg2, 0);
9586 
9587             return ret;
9588         }
9589 #endif
9590 #ifdef TARGET_NR_stime /* not on alpha */
9591     case TARGET_NR_stime:
9592         {
9593             struct timespec ts;
9594             ts.tv_nsec = 0;
9595             if (get_user_sal(ts.tv_sec, arg1)) {
9596                 return -TARGET_EFAULT;
9597             }
9598             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9599         }
9600 #endif
9601 #ifdef TARGET_NR_alarm /* not on alpha */
9602     case TARGET_NR_alarm:
9603         return alarm(arg1);
9604 #endif
9605 #ifdef TARGET_NR_pause /* not on alpha */
9606     case TARGET_NR_pause:
9607         if (!block_signals()) {
9608             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9609         }
9610         return -TARGET_EINTR;
9611 #endif
9612 #ifdef TARGET_NR_utime
9613     case TARGET_NR_utime:
9614         {
9615             struct utimbuf tbuf, *host_tbuf;
9616             struct target_utimbuf *target_tbuf;
9617             if (arg2) {
9618                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9619                     return -TARGET_EFAULT;
9620                 tbuf.actime = tswapal(target_tbuf->actime);
9621                 tbuf.modtime = tswapal(target_tbuf->modtime);
9622                 unlock_user_struct(target_tbuf, arg2, 0);
9623                 host_tbuf = &tbuf;
9624             } else {
9625                 host_tbuf = NULL;
9626             }
9627             if (!(p = lock_user_string(arg1)))
9628                 return -TARGET_EFAULT;
9629             ret = get_errno(utime(p, host_tbuf));
9630             unlock_user(p, arg1, 0);
9631         }
9632         return ret;
9633 #endif
9634 #ifdef TARGET_NR_utimes
9635     case TARGET_NR_utimes:
9636         {
9637             struct timeval *tvp, tv[2];
9638             if (arg2) {
9639                 if (copy_from_user_timeval(&tv[0], arg2)
9640                     || copy_from_user_timeval(&tv[1],
9641                                               arg2 + sizeof(struct target_timeval)))
9642                     return -TARGET_EFAULT;
9643                 tvp = tv;
9644             } else {
9645                 tvp = NULL;
9646             }
9647             if (!(p = lock_user_string(arg1)))
9648                 return -TARGET_EFAULT;
9649             ret = get_errno(utimes(p, tvp));
9650             unlock_user(p, arg1, 0);
9651         }
9652         return ret;
9653 #endif
9654 #if defined(TARGET_NR_futimesat)
9655     case TARGET_NR_futimesat:
9656         {
9657             struct timeval *tvp, tv[2];
9658             if (arg3) {
9659                 if (copy_from_user_timeval(&tv[0], arg3)
9660                     || copy_from_user_timeval(&tv[1],
9661                                               arg3 + sizeof(struct target_timeval)))
9662                     return -TARGET_EFAULT;
9663                 tvp = tv;
9664             } else {
9665                 tvp = NULL;
9666             }
9667             if (!(p = lock_user_string(arg2))) {
9668                 return -TARGET_EFAULT;
9669             }
9670             ret = get_errno(futimesat(arg1, path(p), tvp));
9671             unlock_user(p, arg2, 0);
9672         }
9673         return ret;
9674 #endif
9675 #ifdef TARGET_NR_access
9676     case TARGET_NR_access:
9677         if (!(p = lock_user_string(arg1))) {
9678             return -TARGET_EFAULT;
9679         }
9680         ret = get_errno(access(path(p), arg2));
9681         unlock_user(p, arg1, 0);
9682         return ret;
9683 #endif
9684 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9685     case TARGET_NR_faccessat:
9686         if (!(p = lock_user_string(arg2))) {
9687             return -TARGET_EFAULT;
9688         }
9689         ret = get_errno(faccessat(arg1, p, arg3, 0));
9690         unlock_user(p, arg2, 0);
9691         return ret;
9692 #endif
9693 #if defined(TARGET_NR_faccessat2)
9694     case TARGET_NR_faccessat2:
9695         if (!(p = lock_user_string(arg2))) {
9696             return -TARGET_EFAULT;
9697         }
9698         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9699         unlock_user(p, arg2, 0);
9700         return ret;
9701 #endif
9702 #ifdef TARGET_NR_nice /* not on alpha */
9703     case TARGET_NR_nice:
9704         return get_errno(nice(arg1));
9705 #endif
9706     case TARGET_NR_sync:
9707         sync();
9708         return 0;
9709 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9710     case TARGET_NR_syncfs:
9711         return get_errno(syncfs(arg1));
9712 #endif
9713     case TARGET_NR_kill:
9714         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9715 #ifdef TARGET_NR_rename
9716     case TARGET_NR_rename:
9717         {
9718             void *p2;
9719             p = lock_user_string(arg1);
9720             p2 = lock_user_string(arg2);
9721             if (!p || !p2)
9722                 ret = -TARGET_EFAULT;
9723             else
9724                 ret = get_errno(rename(p, p2));
9725             unlock_user(p2, arg2, 0);
9726             unlock_user(p, arg1, 0);
9727         }
9728         return ret;
9729 #endif
9730 #if defined(TARGET_NR_renameat)
9731     case TARGET_NR_renameat:
9732         {
9733             void *p2;
9734             p  = lock_user_string(arg2);
9735             p2 = lock_user_string(arg4);
9736             if (!p || !p2)
9737                 ret = -TARGET_EFAULT;
9738             else
9739                 ret = get_errno(renameat(arg1, p, arg3, p2));
9740             unlock_user(p2, arg4, 0);
9741             unlock_user(p, arg2, 0);
9742         }
9743         return ret;
9744 #endif
9745 #if defined(TARGET_NR_renameat2)
9746     case TARGET_NR_renameat2:
9747         {
9748             void *p2;
9749             p  = lock_user_string(arg2);
9750             p2 = lock_user_string(arg4);
9751             if (!p || !p2) {
9752                 ret = -TARGET_EFAULT;
9753             } else {
9754                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9755             }
9756             unlock_user(p2, arg4, 0);
9757             unlock_user(p, arg2, 0);
9758         }
9759         return ret;
9760 #endif
9761 #ifdef TARGET_NR_mkdir
9762     case TARGET_NR_mkdir:
9763         if (!(p = lock_user_string(arg1)))
9764             return -TARGET_EFAULT;
9765         ret = get_errno(mkdir(p, arg2));
9766         unlock_user(p, arg1, 0);
9767         return ret;
9768 #endif
9769 #if defined(TARGET_NR_mkdirat)
9770     case TARGET_NR_mkdirat:
9771         if (!(p = lock_user_string(arg2)))
9772             return -TARGET_EFAULT;
9773         ret = get_errno(mkdirat(arg1, p, arg3));
9774         unlock_user(p, arg2, 0);
9775         return ret;
9776 #endif
9777 #ifdef TARGET_NR_rmdir
9778     case TARGET_NR_rmdir:
9779         if (!(p = lock_user_string(arg1)))
9780             return -TARGET_EFAULT;
9781         ret = get_errno(rmdir(p));
9782         unlock_user(p, arg1, 0);
9783         return ret;
9784 #endif
9785     case TARGET_NR_dup:
9786         ret = get_errno(dup(arg1));
9787         if (ret >= 0) {
9788             fd_trans_dup(arg1, ret);
9789         }
9790         return ret;
9791 #ifdef TARGET_NR_pipe
9792     case TARGET_NR_pipe:
9793         return do_pipe(cpu_env, arg1, 0, 0);
9794 #endif
9795 #ifdef TARGET_NR_pipe2
9796     case TARGET_NR_pipe2:
9797         return do_pipe(cpu_env, arg1,
9798                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9799 #endif
9800     case TARGET_NR_times:
9801         {
9802             struct target_tms *tmsp;
9803             struct tms tms;
9804             ret = get_errno(times(&tms));
9805             if (arg1) {
9806                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9807                 if (!tmsp)
9808                     return -TARGET_EFAULT;
9809                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9810                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9811                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9812                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9813             }
9814             if (!is_error(ret))
9815                 ret = host_to_target_clock_t(ret);
9816         }
9817         return ret;
9818     case TARGET_NR_acct:
9819         if (arg1 == 0) {
9820             ret = get_errno(acct(NULL));
9821         } else {
9822             if (!(p = lock_user_string(arg1))) {
9823                 return -TARGET_EFAULT;
9824             }
9825             ret = get_errno(acct(path(p)));
9826             unlock_user(p, arg1, 0);
9827         }
9828         return ret;
9829 #ifdef TARGET_NR_umount2
9830     case TARGET_NR_umount2:
9831         if (!(p = lock_user_string(arg1)))
9832             return -TARGET_EFAULT;
9833         ret = get_errno(umount2(p, arg2));
9834         unlock_user(p, arg1, 0);
9835         return ret;
9836 #endif
9837     case TARGET_NR_ioctl:
9838         return do_ioctl(arg1, arg2, arg3);
9839 #ifdef TARGET_NR_fcntl
9840     case TARGET_NR_fcntl:
9841         return do_fcntl(arg1, arg2, arg3);
9842 #endif
9843     case TARGET_NR_setpgid:
9844         return get_errno(setpgid(arg1, arg2));
9845     case TARGET_NR_umask:
9846         return get_errno(umask(arg1));
9847     case TARGET_NR_chroot:
9848         if (!(p = lock_user_string(arg1)))
9849             return -TARGET_EFAULT;
9850         ret = get_errno(chroot(p));
9851         unlock_user(p, arg1, 0);
9852         return ret;
9853 #ifdef TARGET_NR_dup2
9854     case TARGET_NR_dup2:
9855         ret = get_errno(dup2(arg1, arg2));
9856         if (ret >= 0) {
9857             fd_trans_dup(arg1, arg2);
9858         }
9859         return ret;
9860 #endif
9861 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9862     case TARGET_NR_dup3:
9863     {
9864         int host_flags;
9865 
9866         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9867             return -EINVAL;
9868         }
9869         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9870         ret = get_errno(dup3(arg1, arg2, host_flags));
9871         if (ret >= 0) {
9872             fd_trans_dup(arg1, arg2);
9873         }
9874         return ret;
9875     }
9876 #endif
9877 #ifdef TARGET_NR_getppid /* not on alpha */
9878     case TARGET_NR_getppid:
9879         return get_errno(getppid());
9880 #endif
9881 #ifdef TARGET_NR_getpgrp
9882     case TARGET_NR_getpgrp:
9883         return get_errno(getpgrp());
9884 #endif
9885     case TARGET_NR_setsid:
9886         return get_errno(setsid());
9887 #ifdef TARGET_NR_sigaction
9888     case TARGET_NR_sigaction:
9889         {
9890 #if defined(TARGET_MIPS)
9891 	    struct target_sigaction act, oact, *pact, *old_act;
9892 
9893 	    if (arg2) {
9894                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9895                     return -TARGET_EFAULT;
9896 		act._sa_handler = old_act->_sa_handler;
9897 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9898 		act.sa_flags = old_act->sa_flags;
9899 		unlock_user_struct(old_act, arg2, 0);
9900 		pact = &act;
9901 	    } else {
9902 		pact = NULL;
9903 	    }
9904 
9905         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9906 
9907 	    if (!is_error(ret) && arg3) {
9908                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9909                     return -TARGET_EFAULT;
9910 		old_act->_sa_handler = oact._sa_handler;
9911 		old_act->sa_flags = oact.sa_flags;
9912 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9913 		old_act->sa_mask.sig[1] = 0;
9914 		old_act->sa_mask.sig[2] = 0;
9915 		old_act->sa_mask.sig[3] = 0;
9916 		unlock_user_struct(old_act, arg3, 1);
9917 	    }
9918 #else
9919             struct target_old_sigaction *old_act;
9920             struct target_sigaction act, oact, *pact;
9921             if (arg2) {
9922                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9923                     return -TARGET_EFAULT;
9924                 act._sa_handler = old_act->_sa_handler;
9925                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9926                 act.sa_flags = old_act->sa_flags;
9927 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9928                 act.sa_restorer = old_act->sa_restorer;
9929 #endif
9930                 unlock_user_struct(old_act, arg2, 0);
9931                 pact = &act;
9932             } else {
9933                 pact = NULL;
9934             }
9935             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9936             if (!is_error(ret) && arg3) {
9937                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9938                     return -TARGET_EFAULT;
9939                 old_act->_sa_handler = oact._sa_handler;
9940                 old_act->sa_mask = oact.sa_mask.sig[0];
9941                 old_act->sa_flags = oact.sa_flags;
9942 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9943                 old_act->sa_restorer = oact.sa_restorer;
9944 #endif
9945                 unlock_user_struct(old_act, arg3, 1);
9946             }
9947 #endif
9948         }
9949         return ret;
9950 #endif
9951     case TARGET_NR_rt_sigaction:
9952         {
9953             /*
9954              * For Alpha and SPARC this is a 5 argument syscall, with
9955              * a 'restorer' parameter which must be copied into the
9956              * sa_restorer field of the sigaction struct.
9957              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9958              * and arg5 is the sigsetsize.
9959              */
9960 #if defined(TARGET_ALPHA)
9961             target_ulong sigsetsize = arg4;
9962             target_ulong restorer = arg5;
9963 #elif defined(TARGET_SPARC)
9964             target_ulong restorer = arg4;
9965             target_ulong sigsetsize = arg5;
9966 #else
9967             target_ulong sigsetsize = arg4;
9968             target_ulong restorer = 0;
9969 #endif
9970             struct target_sigaction *act = NULL;
9971             struct target_sigaction *oact = NULL;
9972 
9973             if (sigsetsize != sizeof(target_sigset_t)) {
9974                 return -TARGET_EINVAL;
9975             }
9976             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9977                 return -TARGET_EFAULT;
9978             }
9979             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9980                 ret = -TARGET_EFAULT;
9981             } else {
9982                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9983                 if (oact) {
9984                     unlock_user_struct(oact, arg3, 1);
9985                 }
9986             }
9987             if (act) {
9988                 unlock_user_struct(act, arg2, 0);
9989             }
9990         }
9991         return ret;
9992 #ifdef TARGET_NR_sgetmask /* not on alpha */
9993     case TARGET_NR_sgetmask:
9994         {
9995             sigset_t cur_set;
9996             abi_ulong target_set;
9997             ret = do_sigprocmask(0, NULL, &cur_set);
9998             if (!ret) {
9999                 host_to_target_old_sigset(&target_set, &cur_set);
10000                 ret = target_set;
10001             }
10002         }
10003         return ret;
10004 #endif
10005 #ifdef TARGET_NR_ssetmask /* not on alpha */
10006     case TARGET_NR_ssetmask:
10007         {
10008             sigset_t set, oset;
10009             abi_ulong target_set = arg1;
10010             target_to_host_old_sigset(&set, &target_set);
10011             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10012             if (!ret) {
10013                 host_to_target_old_sigset(&target_set, &oset);
10014                 ret = target_set;
10015             }
10016         }
10017         return ret;
10018 #endif
10019 #ifdef TARGET_NR_sigprocmask
10020     case TARGET_NR_sigprocmask:
10021         {
10022 #if defined(TARGET_ALPHA)
10023             sigset_t set, oldset;
10024             abi_ulong mask;
10025             int how;
10026 
10027             switch (arg1) {
10028             case TARGET_SIG_BLOCK:
10029                 how = SIG_BLOCK;
10030                 break;
10031             case TARGET_SIG_UNBLOCK:
10032                 how = SIG_UNBLOCK;
10033                 break;
10034             case TARGET_SIG_SETMASK:
10035                 how = SIG_SETMASK;
10036                 break;
10037             default:
10038                 return -TARGET_EINVAL;
10039             }
10040             mask = arg2;
10041             target_to_host_old_sigset(&set, &mask);
10042 
10043             ret = do_sigprocmask(how, &set, &oldset);
10044             if (!is_error(ret)) {
10045                 host_to_target_old_sigset(&mask, &oldset);
10046                 ret = mask;
10047                 cpu_env->ir[IR_V0] = 0; /* force no error */
10048             }
10049 #else
10050             sigset_t set, oldset, *set_ptr;
10051             int how;
10052 
10053             if (arg2) {
10054                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10055                 if (!p) {
10056                     return -TARGET_EFAULT;
10057                 }
10058                 target_to_host_old_sigset(&set, p);
10059                 unlock_user(p, arg2, 0);
10060                 set_ptr = &set;
10061                 switch (arg1) {
10062                 case TARGET_SIG_BLOCK:
10063                     how = SIG_BLOCK;
10064                     break;
10065                 case TARGET_SIG_UNBLOCK:
10066                     how = SIG_UNBLOCK;
10067                     break;
10068                 case TARGET_SIG_SETMASK:
10069                     how = SIG_SETMASK;
10070                     break;
10071                 default:
10072                     return -TARGET_EINVAL;
10073                 }
10074             } else {
10075                 how = 0;
10076                 set_ptr = NULL;
10077             }
10078             ret = do_sigprocmask(how, set_ptr, &oldset);
10079             if (!is_error(ret) && arg3) {
10080                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10081                     return -TARGET_EFAULT;
10082                 host_to_target_old_sigset(p, &oldset);
10083                 unlock_user(p, arg3, sizeof(target_sigset_t));
10084             }
10085 #endif
10086         }
10087         return ret;
10088 #endif
10089     case TARGET_NR_rt_sigprocmask:
10090         {
10091             int how = arg1;
10092             sigset_t set, oldset, *set_ptr;
10093 
10094             if (arg4 != sizeof(target_sigset_t)) {
10095                 return -TARGET_EINVAL;
10096             }
10097 
10098             if (arg2) {
10099                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10100                 if (!p) {
10101                     return -TARGET_EFAULT;
10102                 }
10103                 target_to_host_sigset(&set, p);
10104                 unlock_user(p, arg2, 0);
10105                 set_ptr = &set;
10106                 switch(how) {
10107                 case TARGET_SIG_BLOCK:
10108                     how = SIG_BLOCK;
10109                     break;
10110                 case TARGET_SIG_UNBLOCK:
10111                     how = SIG_UNBLOCK;
10112                     break;
10113                 case TARGET_SIG_SETMASK:
10114                     how = SIG_SETMASK;
10115                     break;
10116                 default:
10117                     return -TARGET_EINVAL;
10118                 }
10119             } else {
10120                 how = 0;
10121                 set_ptr = NULL;
10122             }
10123             ret = do_sigprocmask(how, set_ptr, &oldset);
10124             if (!is_error(ret) && arg3) {
10125                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10126                     return -TARGET_EFAULT;
10127                 host_to_target_sigset(p, &oldset);
10128                 unlock_user(p, arg3, sizeof(target_sigset_t));
10129             }
10130         }
10131         return ret;
10132 #ifdef TARGET_NR_sigpending
10133     case TARGET_NR_sigpending:
10134         {
10135             sigset_t set;
10136             ret = get_errno(sigpending(&set));
10137             if (!is_error(ret)) {
10138                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10139                     return -TARGET_EFAULT;
10140                 host_to_target_old_sigset(p, &set);
10141                 unlock_user(p, arg1, sizeof(target_sigset_t));
10142             }
10143         }
10144         return ret;
10145 #endif
10146     case TARGET_NR_rt_sigpending:
10147         {
10148             sigset_t set;
10149 
10150             /* Yes, this check is >, not != like most. We follow the kernel's
10151              * logic and it does it like this because it implements
10152              * NR_sigpending through the same code path, and in that case
10153              * the old_sigset_t is smaller in size.
10154              */
10155             if (arg2 > sizeof(target_sigset_t)) {
10156                 return -TARGET_EINVAL;
10157             }
10158 
10159             ret = get_errno(sigpending(&set));
10160             if (!is_error(ret)) {
10161                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10162                     return -TARGET_EFAULT;
10163                 host_to_target_sigset(p, &set);
10164                 unlock_user(p, arg1, sizeof(target_sigset_t));
10165             }
10166         }
10167         return ret;
10168 #ifdef TARGET_NR_sigsuspend
10169     case TARGET_NR_sigsuspend:
10170         {
10171             sigset_t *set;
10172 
10173 #if defined(TARGET_ALPHA)
10174             TaskState *ts = cpu->opaque;
10175             /* target_to_host_old_sigset will bswap back */
10176             abi_ulong mask = tswapal(arg1);
10177             set = &ts->sigsuspend_mask;
10178             target_to_host_old_sigset(set, &mask);
10179 #else
10180             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10181             if (ret != 0) {
10182                 return ret;
10183             }
10184 #endif
10185             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10186             finish_sigsuspend_mask(ret);
10187         }
10188         return ret;
10189 #endif
10190     case TARGET_NR_rt_sigsuspend:
10191         {
10192             sigset_t *set;
10193 
10194             ret = process_sigsuspend_mask(&set, arg1, arg2);
10195             if (ret != 0) {
10196                 return ret;
10197             }
10198             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10199             finish_sigsuspend_mask(ret);
10200         }
10201         return ret;
10202 #ifdef TARGET_NR_rt_sigtimedwait
10203     case TARGET_NR_rt_sigtimedwait:
10204         {
10205             sigset_t set;
10206             struct timespec uts, *puts;
10207             siginfo_t uinfo;
10208 
10209             if (arg4 != sizeof(target_sigset_t)) {
10210                 return -TARGET_EINVAL;
10211             }
10212 
10213             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10214                 return -TARGET_EFAULT;
10215             target_to_host_sigset(&set, p);
10216             unlock_user(p, arg1, 0);
10217             if (arg3) {
10218                 puts = &uts;
10219                 if (target_to_host_timespec(puts, arg3)) {
10220                     return -TARGET_EFAULT;
10221                 }
10222             } else {
10223                 puts = NULL;
10224             }
10225             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10226                                                  SIGSET_T_SIZE));
10227             if (!is_error(ret)) {
10228                 if (arg2) {
10229                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10230                                   0);
10231                     if (!p) {
10232                         return -TARGET_EFAULT;
10233                     }
10234                     host_to_target_siginfo(p, &uinfo);
10235                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10236                 }
10237                 ret = host_to_target_signal(ret);
10238             }
10239         }
10240         return ret;
10241 #endif
10242 #ifdef TARGET_NR_rt_sigtimedwait_time64
10243     case TARGET_NR_rt_sigtimedwait_time64:
10244         {
10245             sigset_t set;
10246             struct timespec uts, *puts;
10247             siginfo_t uinfo;
10248 
10249             if (arg4 != sizeof(target_sigset_t)) {
10250                 return -TARGET_EINVAL;
10251             }
10252 
10253             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10254             if (!p) {
10255                 return -TARGET_EFAULT;
10256             }
10257             target_to_host_sigset(&set, p);
10258             unlock_user(p, arg1, 0);
10259             if (arg3) {
10260                 puts = &uts;
10261                 if (target_to_host_timespec64(puts, arg3)) {
10262                     return -TARGET_EFAULT;
10263                 }
10264             } else {
10265                 puts = NULL;
10266             }
10267             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10268                                                  SIGSET_T_SIZE));
10269             if (!is_error(ret)) {
10270                 if (arg2) {
10271                     p = lock_user(VERIFY_WRITE, arg2,
10272                                   sizeof(target_siginfo_t), 0);
10273                     if (!p) {
10274                         return -TARGET_EFAULT;
10275                     }
10276                     host_to_target_siginfo(p, &uinfo);
10277                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10278                 }
10279                 ret = host_to_target_signal(ret);
10280             }
10281         }
10282         return ret;
10283 #endif
10284     case TARGET_NR_rt_sigqueueinfo:
10285         {
10286             siginfo_t uinfo;
10287 
10288             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10289             if (!p) {
10290                 return -TARGET_EFAULT;
10291             }
10292             target_to_host_siginfo(&uinfo, p);
10293             unlock_user(p, arg3, 0);
10294             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10295         }
10296         return ret;
10297     case TARGET_NR_rt_tgsigqueueinfo:
10298         {
10299             siginfo_t uinfo;
10300 
10301             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10302             if (!p) {
10303                 return -TARGET_EFAULT;
10304             }
10305             target_to_host_siginfo(&uinfo, p);
10306             unlock_user(p, arg4, 0);
10307             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10308         }
10309         return ret;
10310 #ifdef TARGET_NR_sigreturn
10311     case TARGET_NR_sigreturn:
10312         if (block_signals()) {
10313             return -QEMU_ERESTARTSYS;
10314         }
10315         return do_sigreturn(cpu_env);
10316 #endif
10317     case TARGET_NR_rt_sigreturn:
10318         if (block_signals()) {
10319             return -QEMU_ERESTARTSYS;
10320         }
10321         return do_rt_sigreturn(cpu_env);
10322     case TARGET_NR_sethostname:
10323         if (!(p = lock_user_string(arg1)))
10324             return -TARGET_EFAULT;
10325         ret = get_errno(sethostname(p, arg2));
10326         unlock_user(p, arg1, 0);
10327         return ret;
10328 #ifdef TARGET_NR_setrlimit
10329     case TARGET_NR_setrlimit:
10330         {
10331             int resource = target_to_host_resource(arg1);
10332             struct target_rlimit *target_rlim;
10333             struct rlimit rlim;
10334             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10335                 return -TARGET_EFAULT;
10336             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10337             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10338             unlock_user_struct(target_rlim, arg2, 0);
10339             /*
10340              * If we just passed through resource limit settings for memory then
10341              * they would also apply to QEMU's own allocations, and QEMU will
10342              * crash or hang or die if its allocations fail. Ideally we would
10343              * track the guest allocations in QEMU and apply the limits ourselves.
10344              * For now, just tell the guest the call succeeded but don't actually
10345              * limit anything.
10346              */
10347             if (resource != RLIMIT_AS &&
10348                 resource != RLIMIT_DATA &&
10349                 resource != RLIMIT_STACK) {
10350                 return get_errno(setrlimit(resource, &rlim));
10351             } else {
10352                 return 0;
10353             }
10354         }
10355 #endif
10356 #ifdef TARGET_NR_getrlimit
10357     case TARGET_NR_getrlimit:
10358         {
10359             int resource = target_to_host_resource(arg1);
10360             struct target_rlimit *target_rlim;
10361             struct rlimit rlim;
10362 
10363             ret = get_errno(getrlimit(resource, &rlim));
10364             if (!is_error(ret)) {
10365                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10366                     return -TARGET_EFAULT;
10367                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10368                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10369                 unlock_user_struct(target_rlim, arg2, 1);
10370             }
10371         }
10372         return ret;
10373 #endif
10374     case TARGET_NR_getrusage:
10375         {
10376             struct rusage rusage;
10377             ret = get_errno(getrusage(arg1, &rusage));
10378             if (!is_error(ret)) {
10379                 ret = host_to_target_rusage(arg2, &rusage);
10380             }
10381         }
10382         return ret;
10383 #if defined(TARGET_NR_gettimeofday)
10384     case TARGET_NR_gettimeofday:
10385         {
10386             struct timeval tv;
10387             struct timezone tz;
10388 
10389             ret = get_errno(gettimeofday(&tv, &tz));
10390             if (!is_error(ret)) {
10391                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10392                     return -TARGET_EFAULT;
10393                 }
10394                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10395                     return -TARGET_EFAULT;
10396                 }
10397             }
10398         }
10399         return ret;
10400 #endif
10401 #if defined(TARGET_NR_settimeofday)
10402     case TARGET_NR_settimeofday:
10403         {
10404             struct timeval tv, *ptv = NULL;
10405             struct timezone tz, *ptz = NULL;
10406 
10407             if (arg1) {
10408                 if (copy_from_user_timeval(&tv, arg1)) {
10409                     return -TARGET_EFAULT;
10410                 }
10411                 ptv = &tv;
10412             }
10413 
10414             if (arg2) {
10415                 if (copy_from_user_timezone(&tz, arg2)) {
10416                     return -TARGET_EFAULT;
10417                 }
10418                 ptz = &tz;
10419             }
10420 
10421             return get_errno(settimeofday(ptv, ptz));
10422         }
10423 #endif
10424 #if defined(TARGET_NR_select)
10425     case TARGET_NR_select:
10426 #if defined(TARGET_WANT_NI_OLD_SELECT)
10427         /* some architectures used to have old_select here
10428          * but now ENOSYS it.
10429          */
10430         ret = -TARGET_ENOSYS;
10431 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10432         ret = do_old_select(arg1);
10433 #else
10434         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10435 #endif
10436         return ret;
10437 #endif
10438 #ifdef TARGET_NR_pselect6
10439     case TARGET_NR_pselect6:
10440         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10441 #endif
10442 #ifdef TARGET_NR_pselect6_time64
10443     case TARGET_NR_pselect6_time64:
10444         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10445 #endif
10446 #ifdef TARGET_NR_symlink
10447     case TARGET_NR_symlink:
10448         {
10449             void *p2;
10450             p = lock_user_string(arg1);
10451             p2 = lock_user_string(arg2);
10452             if (!p || !p2)
10453                 ret = -TARGET_EFAULT;
10454             else
10455                 ret = get_errno(symlink(p, p2));
10456             unlock_user(p2, arg2, 0);
10457             unlock_user(p, arg1, 0);
10458         }
10459         return ret;
10460 #endif
10461 #if defined(TARGET_NR_symlinkat)
10462     case TARGET_NR_symlinkat:
10463         {
10464             void *p2;
10465             p  = lock_user_string(arg1);
10466             p2 = lock_user_string(arg3);
10467             if (!p || !p2)
10468                 ret = -TARGET_EFAULT;
10469             else
10470                 ret = get_errno(symlinkat(p, arg2, p2));
10471             unlock_user(p2, arg3, 0);
10472             unlock_user(p, arg1, 0);
10473         }
10474         return ret;
10475 #endif
10476 #ifdef TARGET_NR_readlink
10477     case TARGET_NR_readlink:
10478         {
10479             void *p2;
10480             p = lock_user_string(arg1);
10481             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10482             ret = get_errno(do_guest_readlink(p, p2, arg3));
10483             unlock_user(p2, arg2, ret);
10484             unlock_user(p, arg1, 0);
10485         }
10486         return ret;
10487 #endif
10488 #if defined(TARGET_NR_readlinkat)
10489     case TARGET_NR_readlinkat:
10490         {
10491             void *p2;
10492             p  = lock_user_string(arg2);
10493             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10494             if (!p || !p2) {
10495                 ret = -TARGET_EFAULT;
10496             } else if (!arg4) {
10497                 /* Short circuit this for the magic exe check. */
10498                 ret = -TARGET_EINVAL;
10499             } else if (is_proc_myself((const char *)p, "exe")) {
10500                 /*
10501                  * Don't worry about sign mismatch as earlier mapping
10502                  * logic would have thrown a bad address error.
10503                  */
10504                 ret = MIN(strlen(exec_path), arg4);
10505                 /* We cannot NUL terminate the string. */
10506                 memcpy(p2, exec_path, ret);
10507             } else {
10508                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10509             }
10510             unlock_user(p2, arg3, ret);
10511             unlock_user(p, arg2, 0);
10512         }
10513         return ret;
10514 #endif
10515 #ifdef TARGET_NR_swapon
10516     case TARGET_NR_swapon:
10517         if (!(p = lock_user_string(arg1)))
10518             return -TARGET_EFAULT;
10519         ret = get_errno(swapon(p, arg2));
10520         unlock_user(p, arg1, 0);
10521         return ret;
10522 #endif
10523     case TARGET_NR_reboot:
10524         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10525            /* arg4 must be ignored in all other cases */
10526            p = lock_user_string(arg4);
10527            if (!p) {
10528                return -TARGET_EFAULT;
10529            }
10530            ret = get_errno(reboot(arg1, arg2, arg3, p));
10531            unlock_user(p, arg4, 0);
10532         } else {
10533            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10534         }
10535         return ret;
10536 #ifdef TARGET_NR_mmap
10537     case TARGET_NR_mmap:
10538 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10539     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10540     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10541     || defined(TARGET_S390X)
10542         {
10543             abi_ulong *v;
10544             abi_ulong v1, v2, v3, v4, v5, v6;
10545             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10546                 return -TARGET_EFAULT;
10547             v1 = tswapal(v[0]);
10548             v2 = tswapal(v[1]);
10549             v3 = tswapal(v[2]);
10550             v4 = tswapal(v[3]);
10551             v5 = tswapal(v[4]);
10552             v6 = tswapal(v[5]);
10553             unlock_user(v, arg1, 0);
10554             ret = get_errno(target_mmap(v1, v2, v3,
10555                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10556                                         v5, v6));
10557         }
10558 #else
10559         /* mmap pointers are always untagged */
10560         ret = get_errno(target_mmap(arg1, arg2, arg3,
10561                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10562                                     arg5,
10563                                     arg6));
10564 #endif
10565         return ret;
10566 #endif
10567 #ifdef TARGET_NR_mmap2
10568     case TARGET_NR_mmap2:
10569 #ifndef MMAP_SHIFT
10570 #define MMAP_SHIFT 12
10571 #endif
10572         ret = target_mmap(arg1, arg2, arg3,
10573                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10574                           arg5, arg6 << MMAP_SHIFT);
10575         return get_errno(ret);
10576 #endif
10577     case TARGET_NR_munmap:
10578         arg1 = cpu_untagged_addr(cpu, arg1);
10579         return get_errno(target_munmap(arg1, arg2));
10580     case TARGET_NR_mprotect:
10581         arg1 = cpu_untagged_addr(cpu, arg1);
10582         {
10583             TaskState *ts = cpu->opaque;
10584             /* Special hack to detect libc making the stack executable.  */
10585             if ((arg3 & PROT_GROWSDOWN)
10586                 && arg1 >= ts->info->stack_limit
10587                 && arg1 <= ts->info->start_stack) {
10588                 arg3 &= ~PROT_GROWSDOWN;
10589                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10590                 arg1 = ts->info->stack_limit;
10591             }
10592         }
10593         return get_errno(target_mprotect(arg1, arg2, arg3));
10594 #ifdef TARGET_NR_mremap
10595     case TARGET_NR_mremap:
10596         arg1 = cpu_untagged_addr(cpu, arg1);
10597         /* mremap new_addr (arg5) is always untagged */
10598         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10599 #endif
10600         /* ??? msync/mlock/munlock are broken for softmmu.  */
10601 #ifdef TARGET_NR_msync
10602     case TARGET_NR_msync:
10603         return get_errno(msync(g2h(cpu, arg1), arg2,
10604                                target_to_host_msync_arg(arg3)));
10605 #endif
10606 #ifdef TARGET_NR_mlock
10607     case TARGET_NR_mlock:
10608         return get_errno(mlock(g2h(cpu, arg1), arg2));
10609 #endif
10610 #ifdef TARGET_NR_munlock
10611     case TARGET_NR_munlock:
10612         return get_errno(munlock(g2h(cpu, arg1), arg2));
10613 #endif
10614 #ifdef TARGET_NR_mlockall
10615     case TARGET_NR_mlockall:
10616         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10617 #endif
10618 #ifdef TARGET_NR_munlockall
10619     case TARGET_NR_munlockall:
10620         return get_errno(munlockall());
10621 #endif
10622 #ifdef TARGET_NR_truncate
10623     case TARGET_NR_truncate:
10624         if (!(p = lock_user_string(arg1)))
10625             return -TARGET_EFAULT;
10626         ret = get_errno(truncate(p, arg2));
10627         unlock_user(p, arg1, 0);
10628         return ret;
10629 #endif
10630 #ifdef TARGET_NR_ftruncate
10631     case TARGET_NR_ftruncate:
10632         return get_errno(ftruncate(arg1, arg2));
10633 #endif
10634     case TARGET_NR_fchmod:
10635         return get_errno(fchmod(arg1, arg2));
10636 #if defined(TARGET_NR_fchmodat)
10637     case TARGET_NR_fchmodat:
10638         if (!(p = lock_user_string(arg2)))
10639             return -TARGET_EFAULT;
10640         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10641         unlock_user(p, arg2, 0);
10642         return ret;
10643 #endif
10644     case TARGET_NR_getpriority:
10645         /* Note that negative values are valid for getpriority, so we must
10646            differentiate based on errno settings.  */
10647         errno = 0;
10648         ret = getpriority(arg1, arg2);
10649         if (ret == -1 && errno != 0) {
10650             return -host_to_target_errno(errno);
10651         }
10652 #ifdef TARGET_ALPHA
10653         /* Return value is the unbiased priority.  Signal no error.  */
10654         cpu_env->ir[IR_V0] = 0;
10655 #else
10656         /* Return value is a biased priority to avoid negative numbers.  */
10657         ret = 20 - ret;
10658 #endif
10659         return ret;
10660     case TARGET_NR_setpriority:
10661         return get_errno(setpriority(arg1, arg2, arg3));
10662 #ifdef TARGET_NR_statfs
10663     case TARGET_NR_statfs:
10664         if (!(p = lock_user_string(arg1))) {
10665             return -TARGET_EFAULT;
10666         }
10667         ret = get_errno(statfs(path(p), &stfs));
10668         unlock_user(p, arg1, 0);
10669     convert_statfs:
10670         if (!is_error(ret)) {
10671             struct target_statfs *target_stfs;
10672 
10673             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10674                 return -TARGET_EFAULT;
10675             __put_user(stfs.f_type, &target_stfs->f_type);
10676             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10677             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10678             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10679             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10680             __put_user(stfs.f_files, &target_stfs->f_files);
10681             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10682             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10683             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10684             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10685             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10686 #ifdef _STATFS_F_FLAGS
10687             __put_user(stfs.f_flags, &target_stfs->f_flags);
10688 #else
10689             __put_user(0, &target_stfs->f_flags);
10690 #endif
10691             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10692             unlock_user_struct(target_stfs, arg2, 1);
10693         }
10694         return ret;
10695 #endif
10696 #ifdef TARGET_NR_fstatfs
10697     case TARGET_NR_fstatfs:
10698         ret = get_errno(fstatfs(arg1, &stfs));
10699         goto convert_statfs;
10700 #endif
10701 #ifdef TARGET_NR_statfs64
10702     case TARGET_NR_statfs64:
10703         if (!(p = lock_user_string(arg1))) {
10704             return -TARGET_EFAULT;
10705         }
10706         ret = get_errno(statfs(path(p), &stfs));
10707         unlock_user(p, arg1, 0);
10708     convert_statfs64:
10709         if (!is_error(ret)) {
10710             struct target_statfs64 *target_stfs;
10711 
10712             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10713                 return -TARGET_EFAULT;
10714             __put_user(stfs.f_type, &target_stfs->f_type);
10715             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10716             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10717             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10718             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10719             __put_user(stfs.f_files, &target_stfs->f_files);
10720             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10721             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10722             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10723             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10724             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10725 #ifdef _STATFS_F_FLAGS
10726             __put_user(stfs.f_flags, &target_stfs->f_flags);
10727 #else
10728             __put_user(0, &target_stfs->f_flags);
10729 #endif
10730             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10731             unlock_user_struct(target_stfs, arg3, 1);
10732         }
10733         return ret;
10734     case TARGET_NR_fstatfs64:
10735         ret = get_errno(fstatfs(arg1, &stfs));
10736         goto convert_statfs64;
10737 #endif
10738 #ifdef TARGET_NR_socketcall
10739     case TARGET_NR_socketcall:
10740         return do_socketcall(arg1, arg2);
10741 #endif
10742 #ifdef TARGET_NR_accept
10743     case TARGET_NR_accept:
10744         return do_accept4(arg1, arg2, arg3, 0);
10745 #endif
10746 #ifdef TARGET_NR_accept4
10747     case TARGET_NR_accept4:
10748         return do_accept4(arg1, arg2, arg3, arg4);
10749 #endif
10750 #ifdef TARGET_NR_bind
10751     case TARGET_NR_bind:
10752         return do_bind(arg1, arg2, arg3);
10753 #endif
10754 #ifdef TARGET_NR_connect
10755     case TARGET_NR_connect:
10756         return do_connect(arg1, arg2, arg3);
10757 #endif
10758 #ifdef TARGET_NR_getpeername
10759     case TARGET_NR_getpeername:
10760         return do_getpeername(arg1, arg2, arg3);
10761 #endif
10762 #ifdef TARGET_NR_getsockname
10763     case TARGET_NR_getsockname:
10764         return do_getsockname(arg1, arg2, arg3);
10765 #endif
10766 #ifdef TARGET_NR_getsockopt
10767     case TARGET_NR_getsockopt:
10768         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10769 #endif
10770 #ifdef TARGET_NR_listen
10771     case TARGET_NR_listen:
10772         return get_errno(listen(arg1, arg2));
10773 #endif
10774 #ifdef TARGET_NR_recv
10775     case TARGET_NR_recv:
10776         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10777 #endif
10778 #ifdef TARGET_NR_recvfrom
10779     case TARGET_NR_recvfrom:
10780         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10781 #endif
10782 #ifdef TARGET_NR_recvmsg
10783     case TARGET_NR_recvmsg:
10784         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10785 #endif
10786 #ifdef TARGET_NR_send
10787     case TARGET_NR_send:
10788         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10789 #endif
10790 #ifdef TARGET_NR_sendmsg
10791     case TARGET_NR_sendmsg:
10792         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10793 #endif
10794 #ifdef TARGET_NR_sendmmsg
10795     case TARGET_NR_sendmmsg:
10796         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10797 #endif
10798 #ifdef TARGET_NR_recvmmsg
10799     case TARGET_NR_recvmmsg:
10800         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10801 #endif
10802 #ifdef TARGET_NR_sendto
10803     case TARGET_NR_sendto:
10804         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10805 #endif
10806 #ifdef TARGET_NR_shutdown
10807     case TARGET_NR_shutdown:
10808         return get_errno(shutdown(arg1, arg2));
10809 #endif
10810 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10811     case TARGET_NR_getrandom:
10812         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10813         if (!p) {
10814             return -TARGET_EFAULT;
10815         }
10816         ret = get_errno(getrandom(p, arg2, arg3));
10817         unlock_user(p, arg1, ret);
10818         return ret;
10819 #endif
10820 #ifdef TARGET_NR_socket
10821     case TARGET_NR_socket:
10822         return do_socket(arg1, arg2, arg3);
10823 #endif
10824 #ifdef TARGET_NR_socketpair
10825     case TARGET_NR_socketpair:
10826         return do_socketpair(arg1, arg2, arg3, arg4);
10827 #endif
10828 #ifdef TARGET_NR_setsockopt
10829     case TARGET_NR_setsockopt:
10830         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10831 #endif
10832 #if defined(TARGET_NR_syslog)
10833     case TARGET_NR_syslog:
10834         {
10835             int len = arg2;
10836 
10837             switch (arg1) {
10838             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10839             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10840             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10841             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10842             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10843             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10844             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10845             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10846                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10847             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10848             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10849             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10850                 {
10851                     if (len < 0) {
10852                         return -TARGET_EINVAL;
10853                     }
10854                     if (len == 0) {
10855                         return 0;
10856                     }
10857                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10858                     if (!p) {
10859                         return -TARGET_EFAULT;
10860                     }
10861                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10862                     unlock_user(p, arg2, arg3);
10863                 }
10864                 return ret;
10865             default:
10866                 return -TARGET_EINVAL;
10867             }
10868         }
10869         break;
10870 #endif
10871     case TARGET_NR_setitimer:
10872         {
10873             struct itimerval value, ovalue, *pvalue;
10874 
10875             if (arg2) {
10876                 pvalue = &value;
10877                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10878                     || copy_from_user_timeval(&pvalue->it_value,
10879                                               arg2 + sizeof(struct target_timeval)))
10880                     return -TARGET_EFAULT;
10881             } else {
10882                 pvalue = NULL;
10883             }
10884             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10885             if (!is_error(ret) && arg3) {
10886                 if (copy_to_user_timeval(arg3,
10887                                          &ovalue.it_interval)
10888                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10889                                             &ovalue.it_value))
10890                     return -TARGET_EFAULT;
10891             }
10892         }
10893         return ret;
10894     case TARGET_NR_getitimer:
10895         {
10896             struct itimerval value;
10897 
10898             ret = get_errno(getitimer(arg1, &value));
10899             if (!is_error(ret) && arg2) {
10900                 if (copy_to_user_timeval(arg2,
10901                                          &value.it_interval)
10902                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10903                                             &value.it_value))
10904                     return -TARGET_EFAULT;
10905             }
10906         }
10907         return ret;
10908 #ifdef TARGET_NR_stat
10909     case TARGET_NR_stat:
10910         if (!(p = lock_user_string(arg1))) {
10911             return -TARGET_EFAULT;
10912         }
10913         ret = get_errno(stat(path(p), &st));
10914         unlock_user(p, arg1, 0);
10915         goto do_stat;
10916 #endif
10917 #ifdef TARGET_NR_lstat
10918     case TARGET_NR_lstat:
10919         if (!(p = lock_user_string(arg1))) {
10920             return -TARGET_EFAULT;
10921         }
10922         ret = get_errno(lstat(path(p), &st));
10923         unlock_user(p, arg1, 0);
10924         goto do_stat;
10925 #endif
10926 #ifdef TARGET_NR_fstat
10927     case TARGET_NR_fstat:
10928         {
10929             ret = get_errno(fstat(arg1, &st));
10930 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10931         do_stat:
10932 #endif
10933             if (!is_error(ret)) {
10934                 struct target_stat *target_st;
10935 
10936                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10937                     return -TARGET_EFAULT;
10938                 memset(target_st, 0, sizeof(*target_st));
10939                 __put_user(st.st_dev, &target_st->st_dev);
10940                 __put_user(st.st_ino, &target_st->st_ino);
10941                 __put_user(st.st_mode, &target_st->st_mode);
10942                 __put_user(st.st_uid, &target_st->st_uid);
10943                 __put_user(st.st_gid, &target_st->st_gid);
10944                 __put_user(st.st_nlink, &target_st->st_nlink);
10945                 __put_user(st.st_rdev, &target_st->st_rdev);
10946                 __put_user(st.st_size, &target_st->st_size);
10947                 __put_user(st.st_blksize, &target_st->st_blksize);
10948                 __put_user(st.st_blocks, &target_st->st_blocks);
10949                 __put_user(st.st_atime, &target_st->target_st_atime);
10950                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10951                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10952 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10953                 __put_user(st.st_atim.tv_nsec,
10954                            &target_st->target_st_atime_nsec);
10955                 __put_user(st.st_mtim.tv_nsec,
10956                            &target_st->target_st_mtime_nsec);
10957                 __put_user(st.st_ctim.tv_nsec,
10958                            &target_st->target_st_ctime_nsec);
10959 #endif
10960                 unlock_user_struct(target_st, arg2, 1);
10961             }
10962         }
10963         return ret;
10964 #endif
10965     case TARGET_NR_vhangup:
10966         return get_errno(vhangup());
10967 #ifdef TARGET_NR_syscall
10968     case TARGET_NR_syscall:
10969         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10970                           arg6, arg7, arg8, 0);
10971 #endif
10972 #if defined(TARGET_NR_wait4)
10973     case TARGET_NR_wait4:
10974         {
10975             int status;
10976             abi_long status_ptr = arg2;
10977             struct rusage rusage, *rusage_ptr;
10978             abi_ulong target_rusage = arg4;
10979             abi_long rusage_err;
10980             if (target_rusage)
10981                 rusage_ptr = &rusage;
10982             else
10983                 rusage_ptr = NULL;
10984             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10985             if (!is_error(ret)) {
10986                 if (status_ptr && ret) {
10987                     status = host_to_target_waitstatus(status);
10988                     if (put_user_s32(status, status_ptr))
10989                         return -TARGET_EFAULT;
10990                 }
10991                 if (target_rusage) {
10992                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10993                     if (rusage_err) {
10994                         ret = rusage_err;
10995                     }
10996                 }
10997             }
10998         }
10999         return ret;
11000 #endif
11001 #ifdef TARGET_NR_swapoff
11002     case TARGET_NR_swapoff:
11003         if (!(p = lock_user_string(arg1)))
11004             return -TARGET_EFAULT;
11005         ret = get_errno(swapoff(p));
11006         unlock_user(p, arg1, 0);
11007         return ret;
11008 #endif
11009     case TARGET_NR_sysinfo:
11010         {
11011             struct target_sysinfo *target_value;
11012             struct sysinfo value;
11013             ret = get_errno(sysinfo(&value));
11014             if (!is_error(ret) && arg1)
11015             {
11016                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11017                     return -TARGET_EFAULT;
11018                 __put_user(value.uptime, &target_value->uptime);
11019                 __put_user(value.loads[0], &target_value->loads[0]);
11020                 __put_user(value.loads[1], &target_value->loads[1]);
11021                 __put_user(value.loads[2], &target_value->loads[2]);
11022                 __put_user(value.totalram, &target_value->totalram);
11023                 __put_user(value.freeram, &target_value->freeram);
11024                 __put_user(value.sharedram, &target_value->sharedram);
11025                 __put_user(value.bufferram, &target_value->bufferram);
11026                 __put_user(value.totalswap, &target_value->totalswap);
11027                 __put_user(value.freeswap, &target_value->freeswap);
11028                 __put_user(value.procs, &target_value->procs);
11029                 __put_user(value.totalhigh, &target_value->totalhigh);
11030                 __put_user(value.freehigh, &target_value->freehigh);
11031                 __put_user(value.mem_unit, &target_value->mem_unit);
11032                 unlock_user_struct(target_value, arg1, 1);
11033             }
11034         }
11035         return ret;
11036 #ifdef TARGET_NR_ipc
11037     case TARGET_NR_ipc:
11038         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11039 #endif
11040 #ifdef TARGET_NR_semget
11041     case TARGET_NR_semget:
11042         return get_errno(semget(arg1, arg2, arg3));
11043 #endif
11044 #ifdef TARGET_NR_semop
11045     case TARGET_NR_semop:
11046         return do_semtimedop(arg1, arg2, arg3, 0, false);
11047 #endif
11048 #ifdef TARGET_NR_semtimedop
11049     case TARGET_NR_semtimedop:
11050         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11051 #endif
11052 #ifdef TARGET_NR_semtimedop_time64
11053     case TARGET_NR_semtimedop_time64:
11054         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11055 #endif
11056 #ifdef TARGET_NR_semctl
11057     case TARGET_NR_semctl:
11058         return do_semctl(arg1, arg2, arg3, arg4);
11059 #endif
11060 #ifdef TARGET_NR_msgctl
11061     case TARGET_NR_msgctl:
11062         return do_msgctl(arg1, arg2, arg3);
11063 #endif
11064 #ifdef TARGET_NR_msgget
11065     case TARGET_NR_msgget:
11066         return get_errno(msgget(arg1, arg2));
11067 #endif
11068 #ifdef TARGET_NR_msgrcv
11069     case TARGET_NR_msgrcv:
11070         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11071 #endif
11072 #ifdef TARGET_NR_msgsnd
11073     case TARGET_NR_msgsnd:
11074         return do_msgsnd(arg1, arg2, arg3, arg4);
11075 #endif
11076 #ifdef TARGET_NR_shmget
11077     case TARGET_NR_shmget:
11078         return get_errno(shmget(arg1, arg2, arg3));
11079 #endif
11080 #ifdef TARGET_NR_shmctl
11081     case TARGET_NR_shmctl:
11082         return do_shmctl(arg1, arg2, arg3);
11083 #endif
11084 #ifdef TARGET_NR_shmat
11085     case TARGET_NR_shmat:
11086         return do_shmat(cpu_env, arg1, arg2, arg3);
11087 #endif
11088 #ifdef TARGET_NR_shmdt
11089     case TARGET_NR_shmdt:
11090         return do_shmdt(arg1);
11091 #endif
11092     case TARGET_NR_fsync:
11093         return get_errno(fsync(arg1));
11094     case TARGET_NR_clone:
11095         /* Linux manages to have three different orderings for its
11096          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11097          * match the kernel's CONFIG_CLONE_* settings.
11098          * Microblaze is further special in that it uses a sixth
11099          * implicit argument to clone for the TLS pointer.
11100          */
11101 #if defined(TARGET_MICROBLAZE)
11102         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11103 #elif defined(TARGET_CLONE_BACKWARDS)
11104         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11105 #elif defined(TARGET_CLONE_BACKWARDS2)
11106         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11107 #else
11108         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11109 #endif
11110         return ret;
11111 #ifdef __NR_exit_group
11112         /* new thread calls */
11113     case TARGET_NR_exit_group:
11114         preexit_cleanup(cpu_env, arg1);
11115         return get_errno(exit_group(arg1));
11116 #endif
11117     case TARGET_NR_setdomainname:
11118         if (!(p = lock_user_string(arg1)))
11119             return -TARGET_EFAULT;
11120         ret = get_errno(setdomainname(p, arg2));
11121         unlock_user(p, arg1, 0);
11122         return ret;
11123     case TARGET_NR_uname:
11124         /* no need to transcode because we use the linux syscall */
11125         {
11126             struct new_utsname * buf;
11127 
11128             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11129                 return -TARGET_EFAULT;
11130             ret = get_errno(sys_uname(buf));
11131             if (!is_error(ret)) {
11132                 /* Overwrite the native machine name with whatever is being
11133                    emulated. */
11134                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11135                           sizeof(buf->machine));
11136                 /* Allow the user to override the reported release.  */
11137                 if (qemu_uname_release && *qemu_uname_release) {
11138                     g_strlcpy(buf->release, qemu_uname_release,
11139                               sizeof(buf->release));
11140                 }
11141             }
11142             unlock_user_struct(buf, arg1, 1);
11143         }
11144         return ret;
11145 #ifdef TARGET_I386
11146     case TARGET_NR_modify_ldt:
11147         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11148 #if !defined(TARGET_X86_64)
11149     case TARGET_NR_vm86:
11150         return do_vm86(cpu_env, arg1, arg2);
11151 #endif
11152 #endif
11153 #if defined(TARGET_NR_adjtimex)
11154     case TARGET_NR_adjtimex:
11155         {
11156             struct timex host_buf;
11157 
11158             if (target_to_host_timex(&host_buf, arg1) != 0) {
11159                 return -TARGET_EFAULT;
11160             }
11161             ret = get_errno(adjtimex(&host_buf));
11162             if (!is_error(ret)) {
11163                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11164                     return -TARGET_EFAULT;
11165                 }
11166             }
11167         }
11168         return ret;
11169 #endif
11170 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11171     case TARGET_NR_clock_adjtime:
11172         {
11173             struct timex htx, *phtx = &htx;
11174 
11175             if (target_to_host_timex(phtx, arg2) != 0) {
11176                 return -TARGET_EFAULT;
11177             }
11178             ret = get_errno(clock_adjtime(arg1, phtx));
11179             if (!is_error(ret) && phtx) {
11180                 if (host_to_target_timex(arg2, phtx) != 0) {
11181                     return -TARGET_EFAULT;
11182                 }
11183             }
11184         }
11185         return ret;
11186 #endif
11187 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11188     case TARGET_NR_clock_adjtime64:
11189         {
11190             struct timex htx;
11191 
11192             if (target_to_host_timex64(&htx, arg2) != 0) {
11193                 return -TARGET_EFAULT;
11194             }
11195             ret = get_errno(clock_adjtime(arg1, &htx));
11196             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11197                     return -TARGET_EFAULT;
11198             }
11199         }
11200         return ret;
11201 #endif
11202     case TARGET_NR_getpgid:
11203         return get_errno(getpgid(arg1));
11204     case TARGET_NR_fchdir:
11205         return get_errno(fchdir(arg1));
11206     case TARGET_NR_personality:
11207         return get_errno(personality(arg1));
11208 #ifdef TARGET_NR__llseek /* Not on alpha */
11209     case TARGET_NR__llseek:
11210         {
11211             int64_t res;
11212 #if !defined(__NR_llseek)
11213             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11214             if (res == -1) {
11215                 ret = get_errno(res);
11216             } else {
11217                 ret = 0;
11218             }
11219 #else
11220             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11221 #endif
11222             if ((ret == 0) && put_user_s64(res, arg4)) {
11223                 return -TARGET_EFAULT;
11224             }
11225         }
11226         return ret;
11227 #endif
11228 #ifdef TARGET_NR_getdents
11229     case TARGET_NR_getdents:
11230         return do_getdents(arg1, arg2, arg3);
11231 #endif /* TARGET_NR_getdents */
11232 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11233     case TARGET_NR_getdents64:
11234         return do_getdents64(arg1, arg2, arg3);
11235 #endif /* TARGET_NR_getdents64 */
11236 #if defined(TARGET_NR__newselect)
11237     case TARGET_NR__newselect:
11238         return do_select(arg1, arg2, arg3, arg4, arg5);
11239 #endif
11240 #ifdef TARGET_NR_poll
11241     case TARGET_NR_poll:
11242         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11243 #endif
11244 #ifdef TARGET_NR_ppoll
11245     case TARGET_NR_ppoll:
11246         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11247 #endif
11248 #ifdef TARGET_NR_ppoll_time64
11249     case TARGET_NR_ppoll_time64:
11250         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11251 #endif
11252     case TARGET_NR_flock:
11253         /* NOTE: the flock constant seems to be the same for every
11254            Linux platform */
11255         return get_errno(safe_flock(arg1, arg2));
11256     case TARGET_NR_readv:
11257         {
11258             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11259             if (vec != NULL) {
11260                 ret = get_errno(safe_readv(arg1, vec, arg3));
11261                 unlock_iovec(vec, arg2, arg3, 1);
11262             } else {
11263                 ret = -host_to_target_errno(errno);
11264             }
11265         }
11266         return ret;
11267     case TARGET_NR_writev:
11268         {
11269             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11270             if (vec != NULL) {
11271                 ret = get_errno(safe_writev(arg1, vec, arg3));
11272                 unlock_iovec(vec, arg2, arg3, 0);
11273             } else {
11274                 ret = -host_to_target_errno(errno);
11275             }
11276         }
11277         return ret;
11278 #if defined(TARGET_NR_preadv)
11279     case TARGET_NR_preadv:
11280         {
11281             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11282             if (vec != NULL) {
11283                 unsigned long low, high;
11284 
11285                 target_to_host_low_high(arg4, arg5, &low, &high);
11286                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11287                 unlock_iovec(vec, arg2, arg3, 1);
11288             } else {
11289                 ret = -host_to_target_errno(errno);
11290            }
11291         }
11292         return ret;
11293 #endif
11294 #if defined(TARGET_NR_pwritev)
11295     case TARGET_NR_pwritev:
11296         {
11297             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11298             if (vec != NULL) {
11299                 unsigned long low, high;
11300 
11301                 target_to_host_low_high(arg4, arg5, &low, &high);
11302                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11303                 unlock_iovec(vec, arg2, arg3, 0);
11304             } else {
11305                 ret = -host_to_target_errno(errno);
11306            }
11307         }
11308         return ret;
11309 #endif
11310     case TARGET_NR_getsid:
11311         return get_errno(getsid(arg1));
11312 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11313     case TARGET_NR_fdatasync:
11314         return get_errno(fdatasync(arg1));
11315 #endif
11316     case TARGET_NR_sched_getaffinity:
11317         {
11318             unsigned int mask_size;
11319             unsigned long *mask;
11320 
11321             /*
11322              * sched_getaffinity needs multiples of ulong, so need to take
11323              * care of mismatches between target ulong and host ulong sizes.
11324              */
11325             if (arg2 & (sizeof(abi_ulong) - 1)) {
11326                 return -TARGET_EINVAL;
11327             }
11328             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11329 
11330             mask = alloca(mask_size);
11331             memset(mask, 0, mask_size);
11332             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11333 
11334             if (!is_error(ret)) {
11335                 if (ret > arg2) {
11336                     /* More data returned than the caller's buffer will fit.
11337                      * This only happens if sizeof(abi_long) < sizeof(long)
11338                      * and the caller passed us a buffer holding an odd number
11339                      * of abi_longs. If the host kernel is actually using the
11340                      * extra 4 bytes then fail EINVAL; otherwise we can just
11341                      * ignore them and only copy the interesting part.
11342                      */
11343                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11344                     if (numcpus > arg2 * 8) {
11345                         return -TARGET_EINVAL;
11346                     }
11347                     ret = arg2;
11348                 }
11349 
11350                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11351                     return -TARGET_EFAULT;
11352                 }
11353             }
11354         }
11355         return ret;
11356     case TARGET_NR_sched_setaffinity:
11357         {
11358             unsigned int mask_size;
11359             unsigned long *mask;
11360 
11361             /*
11362              * sched_setaffinity needs multiples of ulong, so need to take
11363              * care of mismatches between target ulong and host ulong sizes.
11364              */
11365             if (arg2 & (sizeof(abi_ulong) - 1)) {
11366                 return -TARGET_EINVAL;
11367             }
11368             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11369             mask = alloca(mask_size);
11370 
11371             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11372             if (ret) {
11373                 return ret;
11374             }
11375 
11376             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11377         }
11378     case TARGET_NR_getcpu:
11379         {
11380             unsigned cpu, node;
11381             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11382                                        arg2 ? &node : NULL,
11383                                        NULL));
11384             if (is_error(ret)) {
11385                 return ret;
11386             }
11387             if (arg1 && put_user_u32(cpu, arg1)) {
11388                 return -TARGET_EFAULT;
11389             }
11390             if (arg2 && put_user_u32(node, arg2)) {
11391                 return -TARGET_EFAULT;
11392             }
11393         }
11394         return ret;
11395     case TARGET_NR_sched_setparam:
11396         {
11397             struct target_sched_param *target_schp;
11398             struct sched_param schp;
11399 
11400             if (arg2 == 0) {
11401                 return -TARGET_EINVAL;
11402             }
11403             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11404                 return -TARGET_EFAULT;
11405             }
11406             schp.sched_priority = tswap32(target_schp->sched_priority);
11407             unlock_user_struct(target_schp, arg2, 0);
11408             return get_errno(sys_sched_setparam(arg1, &schp));
11409         }
11410     case TARGET_NR_sched_getparam:
11411         {
11412             struct target_sched_param *target_schp;
11413             struct sched_param schp;
11414 
11415             if (arg2 == 0) {
11416                 return -TARGET_EINVAL;
11417             }
11418             ret = get_errno(sys_sched_getparam(arg1, &schp));
11419             if (!is_error(ret)) {
11420                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11421                     return -TARGET_EFAULT;
11422                 }
11423                 target_schp->sched_priority = tswap32(schp.sched_priority);
11424                 unlock_user_struct(target_schp, arg2, 1);
11425             }
11426         }
11427         return ret;
11428     case TARGET_NR_sched_setscheduler:
11429         {
11430             struct target_sched_param *target_schp;
11431             struct sched_param schp;
11432             if (arg3 == 0) {
11433                 return -TARGET_EINVAL;
11434             }
11435             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11436                 return -TARGET_EFAULT;
11437             }
11438             schp.sched_priority = tswap32(target_schp->sched_priority);
11439             unlock_user_struct(target_schp, arg3, 0);
11440             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11441         }
11442     case TARGET_NR_sched_getscheduler:
11443         return get_errno(sys_sched_getscheduler(arg1));
11444     case TARGET_NR_sched_getattr:
11445         {
11446             struct target_sched_attr *target_scha;
11447             struct sched_attr scha;
11448             if (arg2 == 0) {
11449                 return -TARGET_EINVAL;
11450             }
11451             if (arg3 > sizeof(scha)) {
11452                 arg3 = sizeof(scha);
11453             }
11454             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11455             if (!is_error(ret)) {
11456                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11457                 if (!target_scha) {
11458                     return -TARGET_EFAULT;
11459                 }
11460                 target_scha->size = tswap32(scha.size);
11461                 target_scha->sched_policy = tswap32(scha.sched_policy);
11462                 target_scha->sched_flags = tswap64(scha.sched_flags);
11463                 target_scha->sched_nice = tswap32(scha.sched_nice);
11464                 target_scha->sched_priority = tswap32(scha.sched_priority);
11465                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11466                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11467                 target_scha->sched_period = tswap64(scha.sched_period);
11468                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11469                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11470                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11471                 }
11472                 unlock_user(target_scha, arg2, arg3);
11473             }
11474             return ret;
11475         }
11476     case TARGET_NR_sched_setattr:
11477         {
11478             struct target_sched_attr *target_scha;
11479             struct sched_attr scha;
11480             uint32_t size;
11481             int zeroed;
11482             if (arg2 == 0) {
11483                 return -TARGET_EINVAL;
11484             }
11485             if (get_user_u32(size, arg2)) {
11486                 return -TARGET_EFAULT;
11487             }
11488             if (!size) {
11489                 size = offsetof(struct target_sched_attr, sched_util_min);
11490             }
11491             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11492                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11493                     return -TARGET_EFAULT;
11494                 }
11495                 return -TARGET_E2BIG;
11496             }
11497 
11498             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11499             if (zeroed < 0) {
11500                 return zeroed;
11501             } else if (zeroed == 0) {
11502                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11503                     return -TARGET_EFAULT;
11504                 }
11505                 return -TARGET_E2BIG;
11506             }
11507             if (size > sizeof(struct target_sched_attr)) {
11508                 size = sizeof(struct target_sched_attr);
11509             }
11510 
11511             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11512             if (!target_scha) {
11513                 return -TARGET_EFAULT;
11514             }
11515             scha.size = size;
11516             scha.sched_policy = tswap32(target_scha->sched_policy);
11517             scha.sched_flags = tswap64(target_scha->sched_flags);
11518             scha.sched_nice = tswap32(target_scha->sched_nice);
11519             scha.sched_priority = tswap32(target_scha->sched_priority);
11520             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11521             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11522             scha.sched_period = tswap64(target_scha->sched_period);
11523             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11524                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11525                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11526             }
11527             unlock_user(target_scha, arg2, 0);
11528             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11529         }
11530     case TARGET_NR_sched_yield:
11531         return get_errno(sched_yield());
11532     case TARGET_NR_sched_get_priority_max:
11533         return get_errno(sched_get_priority_max(arg1));
11534     case TARGET_NR_sched_get_priority_min:
11535         return get_errno(sched_get_priority_min(arg1));
11536 #ifdef TARGET_NR_sched_rr_get_interval
11537     case TARGET_NR_sched_rr_get_interval:
11538         {
11539             struct timespec ts;
11540             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11541             if (!is_error(ret)) {
11542                 ret = host_to_target_timespec(arg2, &ts);
11543             }
11544         }
11545         return ret;
11546 #endif
11547 #ifdef TARGET_NR_sched_rr_get_interval_time64
11548     case TARGET_NR_sched_rr_get_interval_time64:
11549         {
11550             struct timespec ts;
11551             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11552             if (!is_error(ret)) {
11553                 ret = host_to_target_timespec64(arg2, &ts);
11554             }
11555         }
11556         return ret;
11557 #endif
11558 #if defined(TARGET_NR_nanosleep)
11559     case TARGET_NR_nanosleep:
11560         {
11561             struct timespec req, rem;
11562             target_to_host_timespec(&req, arg1);
11563             ret = get_errno(safe_nanosleep(&req, &rem));
11564             if (is_error(ret) && arg2) {
11565                 host_to_target_timespec(arg2, &rem);
11566             }
11567         }
11568         return ret;
11569 #endif
11570     case TARGET_NR_prctl:
11571         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11572         break;
11573 #ifdef TARGET_NR_arch_prctl
11574     case TARGET_NR_arch_prctl:
11575         return do_arch_prctl(cpu_env, arg1, arg2);
11576 #endif
11577 #ifdef TARGET_NR_pread64
11578     case TARGET_NR_pread64:
11579         if (regpairs_aligned(cpu_env, num)) {
11580             arg4 = arg5;
11581             arg5 = arg6;
11582         }
11583         if (arg2 == 0 && arg3 == 0) {
11584             /* Special-case NULL buffer and zero length, which should succeed */
11585             p = 0;
11586         } else {
11587             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11588             if (!p) {
11589                 return -TARGET_EFAULT;
11590             }
11591         }
11592         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11593         unlock_user(p, arg2, ret);
11594         return ret;
11595     case TARGET_NR_pwrite64:
11596         if (regpairs_aligned(cpu_env, num)) {
11597             arg4 = arg5;
11598             arg5 = arg6;
11599         }
11600         if (arg2 == 0 && arg3 == 0) {
11601             /* Special-case NULL buffer and zero length, which should succeed */
11602             p = 0;
11603         } else {
11604             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11605             if (!p) {
11606                 return -TARGET_EFAULT;
11607             }
11608         }
11609         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11610         unlock_user(p, arg2, 0);
11611         return ret;
11612 #endif
11613     case TARGET_NR_getcwd:
11614         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11615             return -TARGET_EFAULT;
11616         ret = get_errno(sys_getcwd1(p, arg2));
11617         unlock_user(p, arg1, ret);
11618         return ret;
11619     case TARGET_NR_capget:
11620     case TARGET_NR_capset:
11621     {
11622         struct target_user_cap_header *target_header;
11623         struct target_user_cap_data *target_data = NULL;
11624         struct __user_cap_header_struct header;
11625         struct __user_cap_data_struct data[2];
11626         struct __user_cap_data_struct *dataptr = NULL;
11627         int i, target_datalen;
11628         int data_items = 1;
11629 
11630         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11631             return -TARGET_EFAULT;
11632         }
11633         header.version = tswap32(target_header->version);
11634         header.pid = tswap32(target_header->pid);
11635 
11636         if (header.version != _LINUX_CAPABILITY_VERSION) {
11637             /* Version 2 and up takes pointer to two user_data structs */
11638             data_items = 2;
11639         }
11640 
11641         target_datalen = sizeof(*target_data) * data_items;
11642 
11643         if (arg2) {
11644             if (num == TARGET_NR_capget) {
11645                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11646             } else {
11647                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11648             }
11649             if (!target_data) {
11650                 unlock_user_struct(target_header, arg1, 0);
11651                 return -TARGET_EFAULT;
11652             }
11653 
11654             if (num == TARGET_NR_capset) {
11655                 for (i = 0; i < data_items; i++) {
11656                     data[i].effective = tswap32(target_data[i].effective);
11657                     data[i].permitted = tswap32(target_data[i].permitted);
11658                     data[i].inheritable = tswap32(target_data[i].inheritable);
11659                 }
11660             }
11661 
11662             dataptr = data;
11663         }
11664 
11665         if (num == TARGET_NR_capget) {
11666             ret = get_errno(capget(&header, dataptr));
11667         } else {
11668             ret = get_errno(capset(&header, dataptr));
11669         }
11670 
11671         /* The kernel always updates version for both capget and capset */
11672         target_header->version = tswap32(header.version);
11673         unlock_user_struct(target_header, arg1, 1);
11674 
11675         if (arg2) {
11676             if (num == TARGET_NR_capget) {
11677                 for (i = 0; i < data_items; i++) {
11678                     target_data[i].effective = tswap32(data[i].effective);
11679                     target_data[i].permitted = tswap32(data[i].permitted);
11680                     target_data[i].inheritable = tswap32(data[i].inheritable);
11681                 }
11682                 unlock_user(target_data, arg2, target_datalen);
11683             } else {
11684                 unlock_user(target_data, arg2, 0);
11685             }
11686         }
11687         return ret;
11688     }
11689     case TARGET_NR_sigaltstack:
11690         return do_sigaltstack(arg1, arg2, cpu_env);
11691 
11692 #ifdef CONFIG_SENDFILE
11693 #ifdef TARGET_NR_sendfile
11694     case TARGET_NR_sendfile:
11695     {
11696         off_t *offp = NULL;
11697         off_t off;
11698         if (arg3) {
11699             ret = get_user_sal(off, arg3);
11700             if (is_error(ret)) {
11701                 return ret;
11702             }
11703             offp = &off;
11704         }
11705         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11706         if (!is_error(ret) && arg3) {
11707             abi_long ret2 = put_user_sal(off, arg3);
11708             if (is_error(ret2)) {
11709                 ret = ret2;
11710             }
11711         }
11712         return ret;
11713     }
11714 #endif
11715 #ifdef TARGET_NR_sendfile64
11716     case TARGET_NR_sendfile64:
11717     {
11718         off_t *offp = NULL;
11719         off_t off;
11720         if (arg3) {
11721             ret = get_user_s64(off, arg3);
11722             if (is_error(ret)) {
11723                 return ret;
11724             }
11725             offp = &off;
11726         }
11727         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11728         if (!is_error(ret) && arg3) {
11729             abi_long ret2 = put_user_s64(off, arg3);
11730             if (is_error(ret2)) {
11731                 ret = ret2;
11732             }
11733         }
11734         return ret;
11735     }
11736 #endif
11737 #endif
11738 #ifdef TARGET_NR_vfork
11739     case TARGET_NR_vfork:
11740         return get_errno(do_fork(cpu_env,
11741                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11742                          0, 0, 0, 0));
11743 #endif
11744 #ifdef TARGET_NR_ugetrlimit
11745     case TARGET_NR_ugetrlimit:
11746     {
11747 	struct rlimit rlim;
11748 	int resource = target_to_host_resource(arg1);
11749 	ret = get_errno(getrlimit(resource, &rlim));
11750 	if (!is_error(ret)) {
11751 	    struct target_rlimit *target_rlim;
11752             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11753                 return -TARGET_EFAULT;
11754 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11755 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11756             unlock_user_struct(target_rlim, arg2, 1);
11757 	}
11758         return ret;
11759     }
11760 #endif
11761 #ifdef TARGET_NR_truncate64
11762     case TARGET_NR_truncate64:
11763         if (!(p = lock_user_string(arg1)))
11764             return -TARGET_EFAULT;
11765 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11766         unlock_user(p, arg1, 0);
11767         return ret;
11768 #endif
11769 #ifdef TARGET_NR_ftruncate64
11770     case TARGET_NR_ftruncate64:
11771         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11772 #endif
11773 #ifdef TARGET_NR_stat64
11774     case TARGET_NR_stat64:
11775         if (!(p = lock_user_string(arg1))) {
11776             return -TARGET_EFAULT;
11777         }
11778         ret = get_errno(stat(path(p), &st));
11779         unlock_user(p, arg1, 0);
11780         if (!is_error(ret))
11781             ret = host_to_target_stat64(cpu_env, arg2, &st);
11782         return ret;
11783 #endif
11784 #ifdef TARGET_NR_lstat64
11785     case TARGET_NR_lstat64:
11786         if (!(p = lock_user_string(arg1))) {
11787             return -TARGET_EFAULT;
11788         }
11789         ret = get_errno(lstat(path(p), &st));
11790         unlock_user(p, arg1, 0);
11791         if (!is_error(ret))
11792             ret = host_to_target_stat64(cpu_env, arg2, &st);
11793         return ret;
11794 #endif
11795 #ifdef TARGET_NR_fstat64
11796     case TARGET_NR_fstat64:
11797         ret = get_errno(fstat(arg1, &st));
11798         if (!is_error(ret))
11799             ret = host_to_target_stat64(cpu_env, arg2, &st);
11800         return ret;
11801 #endif
11802 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11803 #ifdef TARGET_NR_fstatat64
11804     case TARGET_NR_fstatat64:
11805 #endif
11806 #ifdef TARGET_NR_newfstatat
11807     case TARGET_NR_newfstatat:
11808 #endif
11809         if (!(p = lock_user_string(arg2))) {
11810             return -TARGET_EFAULT;
11811         }
11812         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11813         unlock_user(p, arg2, 0);
11814         if (!is_error(ret))
11815             ret = host_to_target_stat64(cpu_env, arg3, &st);
11816         return ret;
11817 #endif
11818 #if defined(TARGET_NR_statx)
11819     case TARGET_NR_statx:
11820         {
11821             struct target_statx *target_stx;
11822             int dirfd = arg1;
11823             int flags = arg3;
11824 
11825             p = lock_user_string(arg2);
11826             if (p == NULL) {
11827                 return -TARGET_EFAULT;
11828             }
11829 #if defined(__NR_statx)
11830             {
11831                 /*
11832                  * It is assumed that struct statx is architecture independent.
11833                  */
11834                 struct target_statx host_stx;
11835                 int mask = arg4;
11836 
11837                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11838                 if (!is_error(ret)) {
11839                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11840                         unlock_user(p, arg2, 0);
11841                         return -TARGET_EFAULT;
11842                     }
11843                 }
11844 
11845                 if (ret != -TARGET_ENOSYS) {
11846                     unlock_user(p, arg2, 0);
11847                     return ret;
11848                 }
11849             }
11850 #endif
11851             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11852             unlock_user(p, arg2, 0);
11853 
11854             if (!is_error(ret)) {
11855                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11856                     return -TARGET_EFAULT;
11857                 }
11858                 memset(target_stx, 0, sizeof(*target_stx));
11859                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11860                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11861                 __put_user(st.st_ino, &target_stx->stx_ino);
11862                 __put_user(st.st_mode, &target_stx->stx_mode);
11863                 __put_user(st.st_uid, &target_stx->stx_uid);
11864                 __put_user(st.st_gid, &target_stx->stx_gid);
11865                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11866                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11867                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11868                 __put_user(st.st_size, &target_stx->stx_size);
11869                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11870                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11871                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11872                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11873                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11874                 unlock_user_struct(target_stx, arg5, 1);
11875             }
11876         }
11877         return ret;
11878 #endif
11879 #ifdef TARGET_NR_lchown
11880     case TARGET_NR_lchown:
11881         if (!(p = lock_user_string(arg1)))
11882             return -TARGET_EFAULT;
11883         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11884         unlock_user(p, arg1, 0);
11885         return ret;
11886 #endif
11887 #ifdef TARGET_NR_getuid
11888     case TARGET_NR_getuid:
11889         return get_errno(high2lowuid(getuid()));
11890 #endif
11891 #ifdef TARGET_NR_getgid
11892     case TARGET_NR_getgid:
11893         return get_errno(high2lowgid(getgid()));
11894 #endif
11895 #ifdef TARGET_NR_geteuid
11896     case TARGET_NR_geteuid:
11897         return get_errno(high2lowuid(geteuid()));
11898 #endif
11899 #ifdef TARGET_NR_getegid
11900     case TARGET_NR_getegid:
11901         return get_errno(high2lowgid(getegid()));
11902 #endif
11903     case TARGET_NR_setreuid:
11904         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11905     case TARGET_NR_setregid:
11906         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11907     case TARGET_NR_getgroups:
11908         { /* the same code as for TARGET_NR_getgroups32 */
11909             int gidsetsize = arg1;
11910             target_id *target_grouplist;
11911             g_autofree gid_t *grouplist = NULL;
11912             int i;
11913 
11914             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11915                 return -TARGET_EINVAL;
11916             }
11917             if (gidsetsize > 0) {
11918                 grouplist = g_try_new(gid_t, gidsetsize);
11919                 if (!grouplist) {
11920                     return -TARGET_ENOMEM;
11921                 }
11922             }
11923             ret = get_errno(getgroups(gidsetsize, grouplist));
11924             if (!is_error(ret) && gidsetsize > 0) {
11925                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11926                                              gidsetsize * sizeof(target_id), 0);
11927                 if (!target_grouplist) {
11928                     return -TARGET_EFAULT;
11929                 }
11930                 for (i = 0; i < ret; i++) {
11931                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11932                 }
11933                 unlock_user(target_grouplist, arg2,
11934                             gidsetsize * sizeof(target_id));
11935             }
11936             return ret;
11937         }
11938     case TARGET_NR_setgroups:
11939         { /* the same code as for TARGET_NR_setgroups32 */
11940             int gidsetsize = arg1;
11941             target_id *target_grouplist;
11942             g_autofree gid_t *grouplist = NULL;
11943             int i;
11944 
11945             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11946                 return -TARGET_EINVAL;
11947             }
11948             if (gidsetsize > 0) {
11949                 grouplist = g_try_new(gid_t, gidsetsize);
11950                 if (!grouplist) {
11951                     return -TARGET_ENOMEM;
11952                 }
11953                 target_grouplist = lock_user(VERIFY_READ, arg2,
11954                                              gidsetsize * sizeof(target_id), 1);
11955                 if (!target_grouplist) {
11956                     return -TARGET_EFAULT;
11957                 }
11958                 for (i = 0; i < gidsetsize; i++) {
11959                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11960                 }
11961                 unlock_user(target_grouplist, arg2,
11962                             gidsetsize * sizeof(target_id));
11963             }
11964             return get_errno(setgroups(gidsetsize, grouplist));
11965         }
11966     case TARGET_NR_fchown:
11967         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11968 #if defined(TARGET_NR_fchownat)
11969     case TARGET_NR_fchownat:
11970         if (!(p = lock_user_string(arg2)))
11971             return -TARGET_EFAULT;
11972         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11973                                  low2highgid(arg4), arg5));
11974         unlock_user(p, arg2, 0);
11975         return ret;
11976 #endif
11977 #ifdef TARGET_NR_setresuid
11978     case TARGET_NR_setresuid:
11979         return get_errno(sys_setresuid(low2highuid(arg1),
11980                                        low2highuid(arg2),
11981                                        low2highuid(arg3)));
11982 #endif
11983 #ifdef TARGET_NR_getresuid
11984     case TARGET_NR_getresuid:
11985         {
11986             uid_t ruid, euid, suid;
11987             ret = get_errno(getresuid(&ruid, &euid, &suid));
11988             if (!is_error(ret)) {
11989                 if (put_user_id(high2lowuid(ruid), arg1)
11990                     || put_user_id(high2lowuid(euid), arg2)
11991                     || put_user_id(high2lowuid(suid), arg3))
11992                     return -TARGET_EFAULT;
11993             }
11994         }
11995         return ret;
11996 #endif
11997 #ifdef TARGET_NR_getresgid
11998     case TARGET_NR_setresgid:
11999         return get_errno(sys_setresgid(low2highgid(arg1),
12000                                        low2highgid(arg2),
12001                                        low2highgid(arg3)));
12002 #endif
12003 #ifdef TARGET_NR_getresgid
12004     case TARGET_NR_getresgid:
12005         {
12006             gid_t rgid, egid, sgid;
12007             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12008             if (!is_error(ret)) {
12009                 if (put_user_id(high2lowgid(rgid), arg1)
12010                     || put_user_id(high2lowgid(egid), arg2)
12011                     || put_user_id(high2lowgid(sgid), arg3))
12012                     return -TARGET_EFAULT;
12013             }
12014         }
12015         return ret;
12016 #endif
12017 #ifdef TARGET_NR_chown
12018     case TARGET_NR_chown:
12019         if (!(p = lock_user_string(arg1)))
12020             return -TARGET_EFAULT;
12021         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12022         unlock_user(p, arg1, 0);
12023         return ret;
12024 #endif
12025     case TARGET_NR_setuid:
12026         return get_errno(sys_setuid(low2highuid(arg1)));
12027     case TARGET_NR_setgid:
12028         return get_errno(sys_setgid(low2highgid(arg1)));
12029     case TARGET_NR_setfsuid:
12030         return get_errno(setfsuid(arg1));
12031     case TARGET_NR_setfsgid:
12032         return get_errno(setfsgid(arg1));
12033 
12034 #ifdef TARGET_NR_lchown32
12035     case TARGET_NR_lchown32:
12036         if (!(p = lock_user_string(arg1)))
12037             return -TARGET_EFAULT;
12038         ret = get_errno(lchown(p, arg2, arg3));
12039         unlock_user(p, arg1, 0);
12040         return ret;
12041 #endif
12042 #ifdef TARGET_NR_getuid32
12043     case TARGET_NR_getuid32:
12044         return get_errno(getuid());
12045 #endif
12046 
12047 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12048    /* Alpha specific */
12049     case TARGET_NR_getxuid:
12050          {
12051             uid_t euid;
12052             euid=geteuid();
12053             cpu_env->ir[IR_A4]=euid;
12054          }
12055         return get_errno(getuid());
12056 #endif
12057 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12058    /* Alpha specific */
12059     case TARGET_NR_getxgid:
12060          {
12061             uid_t egid;
12062             egid=getegid();
12063             cpu_env->ir[IR_A4]=egid;
12064          }
12065         return get_errno(getgid());
12066 #endif
12067 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12068     /* Alpha specific */
12069     case TARGET_NR_osf_getsysinfo:
12070         ret = -TARGET_EOPNOTSUPP;
12071         switch (arg1) {
12072           case TARGET_GSI_IEEE_FP_CONTROL:
12073             {
12074                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12075                 uint64_t swcr = cpu_env->swcr;
12076 
12077                 swcr &= ~SWCR_STATUS_MASK;
12078                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12079 
12080                 if (put_user_u64 (swcr, arg2))
12081                         return -TARGET_EFAULT;
12082                 ret = 0;
12083             }
12084             break;
12085 
12086           /* case GSI_IEEE_STATE_AT_SIGNAL:
12087              -- Not implemented in linux kernel.
12088              case GSI_UACPROC:
12089              -- Retrieves current unaligned access state; not much used.
12090              case GSI_PROC_TYPE:
12091              -- Retrieves implver information; surely not used.
12092              case GSI_GET_HWRPB:
12093              -- Grabs a copy of the HWRPB; surely not used.
12094           */
12095         }
12096         return ret;
12097 #endif
12098 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12099     /* Alpha specific */
12100     case TARGET_NR_osf_setsysinfo:
12101         ret = -TARGET_EOPNOTSUPP;
12102         switch (arg1) {
12103           case TARGET_SSI_IEEE_FP_CONTROL:
12104             {
12105                 uint64_t swcr, fpcr;
12106 
12107                 if (get_user_u64 (swcr, arg2)) {
12108                     return -TARGET_EFAULT;
12109                 }
12110 
12111                 /*
12112                  * The kernel calls swcr_update_status to update the
12113                  * status bits from the fpcr at every point that it
12114                  * could be queried.  Therefore, we store the status
12115                  * bits only in FPCR.
12116                  */
12117                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12118 
12119                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12120                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12121                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12122                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12123                 ret = 0;
12124             }
12125             break;
12126 
12127           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12128             {
12129                 uint64_t exc, fpcr, fex;
12130 
12131                 if (get_user_u64(exc, arg2)) {
12132                     return -TARGET_EFAULT;
12133                 }
12134                 exc &= SWCR_STATUS_MASK;
12135                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12136 
12137                 /* Old exceptions are not signaled.  */
12138                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12139                 fex = exc & ~fex;
12140                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12141                 fex &= (cpu_env)->swcr;
12142 
12143                 /* Update the hardware fpcr.  */
12144                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12145                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12146 
12147                 if (fex) {
12148                     int si_code = TARGET_FPE_FLTUNK;
12149                     target_siginfo_t info;
12150 
12151                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12152                         si_code = TARGET_FPE_FLTUND;
12153                     }
12154                     if (fex & SWCR_TRAP_ENABLE_INE) {
12155                         si_code = TARGET_FPE_FLTRES;
12156                     }
12157                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12158                         si_code = TARGET_FPE_FLTUND;
12159                     }
12160                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12161                         si_code = TARGET_FPE_FLTOVF;
12162                     }
12163                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12164                         si_code = TARGET_FPE_FLTDIV;
12165                     }
12166                     if (fex & SWCR_TRAP_ENABLE_INV) {
12167                         si_code = TARGET_FPE_FLTINV;
12168                     }
12169 
12170                     info.si_signo = SIGFPE;
12171                     info.si_errno = 0;
12172                     info.si_code = si_code;
12173                     info._sifields._sigfault._addr = (cpu_env)->pc;
12174                     queue_signal(cpu_env, info.si_signo,
12175                                  QEMU_SI_FAULT, &info);
12176                 }
12177                 ret = 0;
12178             }
12179             break;
12180 
12181           /* case SSI_NVPAIRS:
12182              -- Used with SSIN_UACPROC to enable unaligned accesses.
12183              case SSI_IEEE_STATE_AT_SIGNAL:
12184              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12185              -- Not implemented in linux kernel
12186           */
12187         }
12188         return ret;
12189 #endif
12190 #ifdef TARGET_NR_osf_sigprocmask
12191     /* Alpha specific.  */
12192     case TARGET_NR_osf_sigprocmask:
12193         {
12194             abi_ulong mask;
12195             int how;
12196             sigset_t set, oldset;
12197 
12198             switch(arg1) {
12199             case TARGET_SIG_BLOCK:
12200                 how = SIG_BLOCK;
12201                 break;
12202             case TARGET_SIG_UNBLOCK:
12203                 how = SIG_UNBLOCK;
12204                 break;
12205             case TARGET_SIG_SETMASK:
12206                 how = SIG_SETMASK;
12207                 break;
12208             default:
12209                 return -TARGET_EINVAL;
12210             }
12211             mask = arg2;
12212             target_to_host_old_sigset(&set, &mask);
12213             ret = do_sigprocmask(how, &set, &oldset);
12214             if (!ret) {
12215                 host_to_target_old_sigset(&mask, &oldset);
12216                 ret = mask;
12217             }
12218         }
12219         return ret;
12220 #endif
12221 
12222 #ifdef TARGET_NR_getgid32
12223     case TARGET_NR_getgid32:
12224         return get_errno(getgid());
12225 #endif
12226 #ifdef TARGET_NR_geteuid32
12227     case TARGET_NR_geteuid32:
12228         return get_errno(geteuid());
12229 #endif
12230 #ifdef TARGET_NR_getegid32
12231     case TARGET_NR_getegid32:
12232         return get_errno(getegid());
12233 #endif
12234 #ifdef TARGET_NR_setreuid32
12235     case TARGET_NR_setreuid32:
12236         return get_errno(setreuid(arg1, arg2));
12237 #endif
12238 #ifdef TARGET_NR_setregid32
12239     case TARGET_NR_setregid32:
12240         return get_errno(setregid(arg1, arg2));
12241 #endif
12242 #ifdef TARGET_NR_getgroups32
12243     case TARGET_NR_getgroups32:
12244         { /* the same code as for TARGET_NR_getgroups */
12245             int gidsetsize = arg1;
12246             uint32_t *target_grouplist;
12247             g_autofree gid_t *grouplist = NULL;
12248             int i;
12249 
12250             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12251                 return -TARGET_EINVAL;
12252             }
12253             if (gidsetsize > 0) {
12254                 grouplist = g_try_new(gid_t, gidsetsize);
12255                 if (!grouplist) {
12256                     return -TARGET_ENOMEM;
12257                 }
12258             }
12259             ret = get_errno(getgroups(gidsetsize, grouplist));
12260             if (!is_error(ret) && gidsetsize > 0) {
12261                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12262                                              gidsetsize * 4, 0);
12263                 if (!target_grouplist) {
12264                     return -TARGET_EFAULT;
12265                 }
12266                 for (i = 0; i < ret; i++) {
12267                     target_grouplist[i] = tswap32(grouplist[i]);
12268                 }
12269                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12270             }
12271             return ret;
12272         }
12273 #endif
12274 #ifdef TARGET_NR_setgroups32
12275     case TARGET_NR_setgroups32:
12276         { /* the same code as for TARGET_NR_setgroups */
12277             int gidsetsize = arg1;
12278             uint32_t *target_grouplist;
12279             g_autofree gid_t *grouplist = NULL;
12280             int i;
12281 
12282             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12283                 return -TARGET_EINVAL;
12284             }
12285             if (gidsetsize > 0) {
12286                 grouplist = g_try_new(gid_t, gidsetsize);
12287                 if (!grouplist) {
12288                     return -TARGET_ENOMEM;
12289                 }
12290                 target_grouplist = lock_user(VERIFY_READ, arg2,
12291                                              gidsetsize * 4, 1);
12292                 if (!target_grouplist) {
12293                     return -TARGET_EFAULT;
12294                 }
12295                 for (i = 0; i < gidsetsize; i++) {
12296                     grouplist[i] = tswap32(target_grouplist[i]);
12297                 }
12298                 unlock_user(target_grouplist, arg2, 0);
12299             }
12300             return get_errno(setgroups(gidsetsize, grouplist));
12301         }
12302 #endif
12303 #ifdef TARGET_NR_fchown32
12304     case TARGET_NR_fchown32:
12305         return get_errno(fchown(arg1, arg2, arg3));
12306 #endif
12307 #ifdef TARGET_NR_setresuid32
12308     case TARGET_NR_setresuid32:
12309         return get_errno(sys_setresuid(arg1, arg2, arg3));
12310 #endif
12311 #ifdef TARGET_NR_getresuid32
12312     case TARGET_NR_getresuid32:
12313         {
12314             uid_t ruid, euid, suid;
12315             ret = get_errno(getresuid(&ruid, &euid, &suid));
12316             if (!is_error(ret)) {
12317                 if (put_user_u32(ruid, arg1)
12318                     || put_user_u32(euid, arg2)
12319                     || put_user_u32(suid, arg3))
12320                     return -TARGET_EFAULT;
12321             }
12322         }
12323         return ret;
12324 #endif
12325 #ifdef TARGET_NR_setresgid32
12326     case TARGET_NR_setresgid32:
12327         return get_errno(sys_setresgid(arg1, arg2, arg3));
12328 #endif
12329 #ifdef TARGET_NR_getresgid32
12330     case TARGET_NR_getresgid32:
12331         {
12332             gid_t rgid, egid, sgid;
12333             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12334             if (!is_error(ret)) {
12335                 if (put_user_u32(rgid, arg1)
12336                     || put_user_u32(egid, arg2)
12337                     || put_user_u32(sgid, arg3))
12338                     return -TARGET_EFAULT;
12339             }
12340         }
12341         return ret;
12342 #endif
12343 #ifdef TARGET_NR_chown32
12344     case TARGET_NR_chown32:
12345         if (!(p = lock_user_string(arg1)))
12346             return -TARGET_EFAULT;
12347         ret = get_errno(chown(p, arg2, arg3));
12348         unlock_user(p, arg1, 0);
12349         return ret;
12350 #endif
12351 #ifdef TARGET_NR_setuid32
12352     case TARGET_NR_setuid32:
12353         return get_errno(sys_setuid(arg1));
12354 #endif
12355 #ifdef TARGET_NR_setgid32
12356     case TARGET_NR_setgid32:
12357         return get_errno(sys_setgid(arg1));
12358 #endif
12359 #ifdef TARGET_NR_setfsuid32
12360     case TARGET_NR_setfsuid32:
12361         return get_errno(setfsuid(arg1));
12362 #endif
12363 #ifdef TARGET_NR_setfsgid32
12364     case TARGET_NR_setfsgid32:
12365         return get_errno(setfsgid(arg1));
12366 #endif
12367 #ifdef TARGET_NR_mincore
12368     case TARGET_NR_mincore:
12369         {
12370             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12371             if (!a) {
12372                 return -TARGET_ENOMEM;
12373             }
12374             p = lock_user_string(arg3);
12375             if (!p) {
12376                 ret = -TARGET_EFAULT;
12377             } else {
12378                 ret = get_errno(mincore(a, arg2, p));
12379                 unlock_user(p, arg3, ret);
12380             }
12381             unlock_user(a, arg1, 0);
12382         }
12383         return ret;
12384 #endif
12385 #ifdef TARGET_NR_arm_fadvise64_64
12386     case TARGET_NR_arm_fadvise64_64:
12387         /* arm_fadvise64_64 looks like fadvise64_64 but
12388          * with different argument order: fd, advice, offset, len
12389          * rather than the usual fd, offset, len, advice.
12390          * Note that offset and len are both 64-bit so appear as
12391          * pairs of 32-bit registers.
12392          */
12393         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12394                             target_offset64(arg5, arg6), arg2);
12395         return -host_to_target_errno(ret);
12396 #endif
12397 
12398 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12399 
12400 #ifdef TARGET_NR_fadvise64_64
12401     case TARGET_NR_fadvise64_64:
12402 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12403         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12404         ret = arg2;
12405         arg2 = arg3;
12406         arg3 = arg4;
12407         arg4 = arg5;
12408         arg5 = arg6;
12409         arg6 = ret;
12410 #else
12411         /* 6 args: fd, offset (high, low), len (high, low), advice */
12412         if (regpairs_aligned(cpu_env, num)) {
12413             /* offset is in (3,4), len in (5,6) and advice in 7 */
12414             arg2 = arg3;
12415             arg3 = arg4;
12416             arg4 = arg5;
12417             arg5 = arg6;
12418             arg6 = arg7;
12419         }
12420 #endif
12421         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12422                             target_offset64(arg4, arg5), arg6);
12423         return -host_to_target_errno(ret);
12424 #endif
12425 
12426 #ifdef TARGET_NR_fadvise64
12427     case TARGET_NR_fadvise64:
12428         /* 5 args: fd, offset (high, low), len, advice */
12429         if (regpairs_aligned(cpu_env, num)) {
12430             /* offset is in (3,4), len in 5 and advice in 6 */
12431             arg2 = arg3;
12432             arg3 = arg4;
12433             arg4 = arg5;
12434             arg5 = arg6;
12435         }
12436         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12437         return -host_to_target_errno(ret);
12438 #endif
12439 
12440 #else /* not a 32-bit ABI */
12441 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12442 #ifdef TARGET_NR_fadvise64_64
12443     case TARGET_NR_fadvise64_64:
12444 #endif
12445 #ifdef TARGET_NR_fadvise64
12446     case TARGET_NR_fadvise64:
12447 #endif
12448 #ifdef TARGET_S390X
12449         switch (arg4) {
12450         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12451         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12452         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12453         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12454         default: break;
12455         }
12456 #endif
12457         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12458 #endif
12459 #endif /* end of 64-bit ABI fadvise handling */
12460 
12461 #ifdef TARGET_NR_madvise
12462     case TARGET_NR_madvise:
12463         return target_madvise(arg1, arg2, arg3);
12464 #endif
12465 #ifdef TARGET_NR_fcntl64
12466     case TARGET_NR_fcntl64:
12467     {
12468         int cmd;
12469         struct flock64 fl;
12470         from_flock64_fn *copyfrom = copy_from_user_flock64;
12471         to_flock64_fn *copyto = copy_to_user_flock64;
12472 
12473 #ifdef TARGET_ARM
12474         if (!cpu_env->eabi) {
12475             copyfrom = copy_from_user_oabi_flock64;
12476             copyto = copy_to_user_oabi_flock64;
12477         }
12478 #endif
12479 
12480         cmd = target_to_host_fcntl_cmd(arg2);
12481         if (cmd == -TARGET_EINVAL) {
12482             return cmd;
12483         }
12484 
12485         switch(arg2) {
12486         case TARGET_F_GETLK64:
12487             ret = copyfrom(&fl, arg3);
12488             if (ret) {
12489                 break;
12490             }
12491             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12492             if (ret == 0) {
12493                 ret = copyto(arg3, &fl);
12494             }
12495 	    break;
12496 
12497         case TARGET_F_SETLK64:
12498         case TARGET_F_SETLKW64:
12499             ret = copyfrom(&fl, arg3);
12500             if (ret) {
12501                 break;
12502             }
12503             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12504 	    break;
12505         default:
12506             ret = do_fcntl(arg1, arg2, arg3);
12507             break;
12508         }
12509         return ret;
12510     }
12511 #endif
12512 #ifdef TARGET_NR_cacheflush
12513     case TARGET_NR_cacheflush:
12514         /* self-modifying code is handled automatically, so nothing needed */
12515         return 0;
12516 #endif
12517 #ifdef TARGET_NR_getpagesize
12518     case TARGET_NR_getpagesize:
12519         return TARGET_PAGE_SIZE;
12520 #endif
12521     case TARGET_NR_gettid:
12522         return get_errno(sys_gettid());
12523 #ifdef TARGET_NR_readahead
12524     case TARGET_NR_readahead:
12525 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12526         if (regpairs_aligned(cpu_env, num)) {
12527             arg2 = arg3;
12528             arg3 = arg4;
12529             arg4 = arg5;
12530         }
12531         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12532 #else
12533         ret = get_errno(readahead(arg1, arg2, arg3));
12534 #endif
12535         return ret;
12536 #endif
12537 #ifdef CONFIG_ATTR
12538 #ifdef TARGET_NR_setxattr
12539     case TARGET_NR_listxattr:
12540     case TARGET_NR_llistxattr:
12541     {
12542         void *p, *b = 0;
12543         if (arg2) {
12544             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12545             if (!b) {
12546                 return -TARGET_EFAULT;
12547             }
12548         }
12549         p = lock_user_string(arg1);
12550         if (p) {
12551             if (num == TARGET_NR_listxattr) {
12552                 ret = get_errno(listxattr(p, b, arg3));
12553             } else {
12554                 ret = get_errno(llistxattr(p, b, arg3));
12555             }
12556         } else {
12557             ret = -TARGET_EFAULT;
12558         }
12559         unlock_user(p, arg1, 0);
12560         unlock_user(b, arg2, arg3);
12561         return ret;
12562     }
12563     case TARGET_NR_flistxattr:
12564     {
12565         void *b = 0;
12566         if (arg2) {
12567             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12568             if (!b) {
12569                 return -TARGET_EFAULT;
12570             }
12571         }
12572         ret = get_errno(flistxattr(arg1, b, arg3));
12573         unlock_user(b, arg2, arg3);
12574         return ret;
12575     }
12576     case TARGET_NR_setxattr:
12577     case TARGET_NR_lsetxattr:
12578         {
12579             void *p, *n, *v = 0;
12580             if (arg3) {
12581                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12582                 if (!v) {
12583                     return -TARGET_EFAULT;
12584                 }
12585             }
12586             p = lock_user_string(arg1);
12587             n = lock_user_string(arg2);
12588             if (p && n) {
12589                 if (num == TARGET_NR_setxattr) {
12590                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12591                 } else {
12592                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12593                 }
12594             } else {
12595                 ret = -TARGET_EFAULT;
12596             }
12597             unlock_user(p, arg1, 0);
12598             unlock_user(n, arg2, 0);
12599             unlock_user(v, arg3, 0);
12600         }
12601         return ret;
12602     case TARGET_NR_fsetxattr:
12603         {
12604             void *n, *v = 0;
12605             if (arg3) {
12606                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12607                 if (!v) {
12608                     return -TARGET_EFAULT;
12609                 }
12610             }
12611             n = lock_user_string(arg2);
12612             if (n) {
12613                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12614             } else {
12615                 ret = -TARGET_EFAULT;
12616             }
12617             unlock_user(n, arg2, 0);
12618             unlock_user(v, arg3, 0);
12619         }
12620         return ret;
12621     case TARGET_NR_getxattr:
12622     case TARGET_NR_lgetxattr:
12623         {
12624             void *p, *n, *v = 0;
12625             if (arg3) {
12626                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12627                 if (!v) {
12628                     return -TARGET_EFAULT;
12629                 }
12630             }
12631             p = lock_user_string(arg1);
12632             n = lock_user_string(arg2);
12633             if (p && n) {
12634                 if (num == TARGET_NR_getxattr) {
12635                     ret = get_errno(getxattr(p, n, v, arg4));
12636                 } else {
12637                     ret = get_errno(lgetxattr(p, n, v, arg4));
12638                 }
12639             } else {
12640                 ret = -TARGET_EFAULT;
12641             }
12642             unlock_user(p, arg1, 0);
12643             unlock_user(n, arg2, 0);
12644             unlock_user(v, arg3, arg4);
12645         }
12646         return ret;
12647     case TARGET_NR_fgetxattr:
12648         {
12649             void *n, *v = 0;
12650             if (arg3) {
12651                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12652                 if (!v) {
12653                     return -TARGET_EFAULT;
12654                 }
12655             }
12656             n = lock_user_string(arg2);
12657             if (n) {
12658                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12659             } else {
12660                 ret = -TARGET_EFAULT;
12661             }
12662             unlock_user(n, arg2, 0);
12663             unlock_user(v, arg3, arg4);
12664         }
12665         return ret;
12666     case TARGET_NR_removexattr:
12667     case TARGET_NR_lremovexattr:
12668         {
12669             void *p, *n;
12670             p = lock_user_string(arg1);
12671             n = lock_user_string(arg2);
12672             if (p && n) {
12673                 if (num == TARGET_NR_removexattr) {
12674                     ret = get_errno(removexattr(p, n));
12675                 } else {
12676                     ret = get_errno(lremovexattr(p, n));
12677                 }
12678             } else {
12679                 ret = -TARGET_EFAULT;
12680             }
12681             unlock_user(p, arg1, 0);
12682             unlock_user(n, arg2, 0);
12683         }
12684         return ret;
12685     case TARGET_NR_fremovexattr:
12686         {
12687             void *n;
12688             n = lock_user_string(arg2);
12689             if (n) {
12690                 ret = get_errno(fremovexattr(arg1, n));
12691             } else {
12692                 ret = -TARGET_EFAULT;
12693             }
12694             unlock_user(n, arg2, 0);
12695         }
12696         return ret;
12697 #endif
12698 #endif /* CONFIG_ATTR */
12699 #ifdef TARGET_NR_set_thread_area
12700     case TARGET_NR_set_thread_area:
12701 #if defined(TARGET_MIPS)
12702       cpu_env->active_tc.CP0_UserLocal = arg1;
12703       return 0;
12704 #elif defined(TARGET_CRIS)
12705       if (arg1 & 0xff)
12706           ret = -TARGET_EINVAL;
12707       else {
12708           cpu_env->pregs[PR_PID] = arg1;
12709           ret = 0;
12710       }
12711       return ret;
12712 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12713       return do_set_thread_area(cpu_env, arg1);
12714 #elif defined(TARGET_M68K)
12715       {
12716           TaskState *ts = cpu->opaque;
12717           ts->tp_value = arg1;
12718           return 0;
12719       }
12720 #else
12721       return -TARGET_ENOSYS;
12722 #endif
12723 #endif
12724 #ifdef TARGET_NR_get_thread_area
12725     case TARGET_NR_get_thread_area:
12726 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12727         return do_get_thread_area(cpu_env, arg1);
12728 #elif defined(TARGET_M68K)
12729         {
12730             TaskState *ts = cpu->opaque;
12731             return ts->tp_value;
12732         }
12733 #else
12734         return -TARGET_ENOSYS;
12735 #endif
12736 #endif
12737 #ifdef TARGET_NR_getdomainname
12738     case TARGET_NR_getdomainname:
12739         return -TARGET_ENOSYS;
12740 #endif
12741 
12742 #ifdef TARGET_NR_clock_settime
12743     case TARGET_NR_clock_settime:
12744     {
12745         struct timespec ts;
12746 
12747         ret = target_to_host_timespec(&ts, arg2);
12748         if (!is_error(ret)) {
12749             ret = get_errno(clock_settime(arg1, &ts));
12750         }
12751         return ret;
12752     }
12753 #endif
12754 #ifdef TARGET_NR_clock_settime64
12755     case TARGET_NR_clock_settime64:
12756     {
12757         struct timespec ts;
12758 
12759         ret = target_to_host_timespec64(&ts, arg2);
12760         if (!is_error(ret)) {
12761             ret = get_errno(clock_settime(arg1, &ts));
12762         }
12763         return ret;
12764     }
12765 #endif
12766 #ifdef TARGET_NR_clock_gettime
12767     case TARGET_NR_clock_gettime:
12768     {
12769         struct timespec ts;
12770         ret = get_errno(clock_gettime(arg1, &ts));
12771         if (!is_error(ret)) {
12772             ret = host_to_target_timespec(arg2, &ts);
12773         }
12774         return ret;
12775     }
12776 #endif
12777 #ifdef TARGET_NR_clock_gettime64
12778     case TARGET_NR_clock_gettime64:
12779     {
12780         struct timespec ts;
12781         ret = get_errno(clock_gettime(arg1, &ts));
12782         if (!is_error(ret)) {
12783             ret = host_to_target_timespec64(arg2, &ts);
12784         }
12785         return ret;
12786     }
12787 #endif
12788 #ifdef TARGET_NR_clock_getres
12789     case TARGET_NR_clock_getres:
12790     {
12791         struct timespec ts;
12792         ret = get_errno(clock_getres(arg1, &ts));
12793         if (!is_error(ret)) {
12794             host_to_target_timespec(arg2, &ts);
12795         }
12796         return ret;
12797     }
12798 #endif
12799 #ifdef TARGET_NR_clock_getres_time64
12800     case TARGET_NR_clock_getres_time64:
12801     {
12802         struct timespec ts;
12803         ret = get_errno(clock_getres(arg1, &ts));
12804         if (!is_error(ret)) {
12805             host_to_target_timespec64(arg2, &ts);
12806         }
12807         return ret;
12808     }
12809 #endif
12810 #ifdef TARGET_NR_clock_nanosleep
12811     case TARGET_NR_clock_nanosleep:
12812     {
12813         struct timespec ts;
12814         if (target_to_host_timespec(&ts, arg3)) {
12815             return -TARGET_EFAULT;
12816         }
12817         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12818                                              &ts, arg4 ? &ts : NULL));
12819         /*
12820          * if the call is interrupted by a signal handler, it fails
12821          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12822          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12823          */
12824         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12825             host_to_target_timespec(arg4, &ts)) {
12826               return -TARGET_EFAULT;
12827         }
12828 
12829         return ret;
12830     }
12831 #endif
12832 #ifdef TARGET_NR_clock_nanosleep_time64
12833     case TARGET_NR_clock_nanosleep_time64:
12834     {
12835         struct timespec ts;
12836 
12837         if (target_to_host_timespec64(&ts, arg3)) {
12838             return -TARGET_EFAULT;
12839         }
12840 
12841         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12842                                              &ts, arg4 ? &ts : NULL));
12843 
12844         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12845             host_to_target_timespec64(arg4, &ts)) {
12846             return -TARGET_EFAULT;
12847         }
12848         return ret;
12849     }
12850 #endif
12851 
12852 #if defined(TARGET_NR_set_tid_address)
12853     case TARGET_NR_set_tid_address:
12854     {
12855         TaskState *ts = cpu->opaque;
12856         ts->child_tidptr = arg1;
12857         /* do not call host set_tid_address() syscall, instead return tid() */
12858         return get_errno(sys_gettid());
12859     }
12860 #endif
12861 
12862     case TARGET_NR_tkill:
12863         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12864 
12865     case TARGET_NR_tgkill:
12866         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12867                          target_to_host_signal(arg3)));
12868 
12869 #ifdef TARGET_NR_set_robust_list
12870     case TARGET_NR_set_robust_list:
12871     case TARGET_NR_get_robust_list:
12872         /* The ABI for supporting robust futexes has userspace pass
12873          * the kernel a pointer to a linked list which is updated by
12874          * userspace after the syscall; the list is walked by the kernel
12875          * when the thread exits. Since the linked list in QEMU guest
12876          * memory isn't a valid linked list for the host and we have
12877          * no way to reliably intercept the thread-death event, we can't
12878          * support these. Silently return ENOSYS so that guest userspace
12879          * falls back to a non-robust futex implementation (which should
12880          * be OK except in the corner case of the guest crashing while
12881          * holding a mutex that is shared with another process via
12882          * shared memory).
12883          */
12884         return -TARGET_ENOSYS;
12885 #endif
12886 
12887 #if defined(TARGET_NR_utimensat)
12888     case TARGET_NR_utimensat:
12889         {
12890             struct timespec *tsp, ts[2];
12891             if (!arg3) {
12892                 tsp = NULL;
12893             } else {
12894                 if (target_to_host_timespec(ts, arg3)) {
12895                     return -TARGET_EFAULT;
12896                 }
12897                 if (target_to_host_timespec(ts + 1, arg3 +
12898                                             sizeof(struct target_timespec))) {
12899                     return -TARGET_EFAULT;
12900                 }
12901                 tsp = ts;
12902             }
12903             if (!arg2)
12904                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12905             else {
12906                 if (!(p = lock_user_string(arg2))) {
12907                     return -TARGET_EFAULT;
12908                 }
12909                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12910                 unlock_user(p, arg2, 0);
12911             }
12912         }
12913         return ret;
12914 #endif
12915 #ifdef TARGET_NR_utimensat_time64
12916     case TARGET_NR_utimensat_time64:
12917         {
12918             struct timespec *tsp, ts[2];
12919             if (!arg3) {
12920                 tsp = NULL;
12921             } else {
12922                 if (target_to_host_timespec64(ts, arg3)) {
12923                     return -TARGET_EFAULT;
12924                 }
12925                 if (target_to_host_timespec64(ts + 1, arg3 +
12926                                      sizeof(struct target__kernel_timespec))) {
12927                     return -TARGET_EFAULT;
12928                 }
12929                 tsp = ts;
12930             }
12931             if (!arg2)
12932                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12933             else {
12934                 p = lock_user_string(arg2);
12935                 if (!p) {
12936                     return -TARGET_EFAULT;
12937                 }
12938                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12939                 unlock_user(p, arg2, 0);
12940             }
12941         }
12942         return ret;
12943 #endif
12944 #ifdef TARGET_NR_futex
12945     case TARGET_NR_futex:
12946         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12947 #endif
12948 #ifdef TARGET_NR_futex_time64
12949     case TARGET_NR_futex_time64:
12950         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12951 #endif
12952 #ifdef CONFIG_INOTIFY
12953 #if defined(TARGET_NR_inotify_init)
12954     case TARGET_NR_inotify_init:
12955         ret = get_errno(inotify_init());
12956         if (ret >= 0) {
12957             fd_trans_register(ret, &target_inotify_trans);
12958         }
12959         return ret;
12960 #endif
12961 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12962     case TARGET_NR_inotify_init1:
12963         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12964                                           fcntl_flags_tbl)));
12965         if (ret >= 0) {
12966             fd_trans_register(ret, &target_inotify_trans);
12967         }
12968         return ret;
12969 #endif
12970 #if defined(TARGET_NR_inotify_add_watch)
12971     case TARGET_NR_inotify_add_watch:
12972         p = lock_user_string(arg2);
12973         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12974         unlock_user(p, arg2, 0);
12975         return ret;
12976 #endif
12977 #if defined(TARGET_NR_inotify_rm_watch)
12978     case TARGET_NR_inotify_rm_watch:
12979         return get_errno(inotify_rm_watch(arg1, arg2));
12980 #endif
12981 #endif
12982 
12983 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12984     case TARGET_NR_mq_open:
12985         {
12986             struct mq_attr posix_mq_attr;
12987             struct mq_attr *pposix_mq_attr;
12988             int host_flags;
12989 
12990             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12991             pposix_mq_attr = NULL;
12992             if (arg4) {
12993                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12994                     return -TARGET_EFAULT;
12995                 }
12996                 pposix_mq_attr = &posix_mq_attr;
12997             }
12998             p = lock_user_string(arg1 - 1);
12999             if (!p) {
13000                 return -TARGET_EFAULT;
13001             }
13002             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13003             unlock_user (p, arg1, 0);
13004         }
13005         return ret;
13006 
13007     case TARGET_NR_mq_unlink:
13008         p = lock_user_string(arg1 - 1);
13009         if (!p) {
13010             return -TARGET_EFAULT;
13011         }
13012         ret = get_errno(mq_unlink(p));
13013         unlock_user (p, arg1, 0);
13014         return ret;
13015 
13016 #ifdef TARGET_NR_mq_timedsend
13017     case TARGET_NR_mq_timedsend:
13018         {
13019             struct timespec ts;
13020 
13021             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13022             if (arg5 != 0) {
13023                 if (target_to_host_timespec(&ts, arg5)) {
13024                     return -TARGET_EFAULT;
13025                 }
13026                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13027                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13028                     return -TARGET_EFAULT;
13029                 }
13030             } else {
13031                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13032             }
13033             unlock_user (p, arg2, arg3);
13034         }
13035         return ret;
13036 #endif
13037 #ifdef TARGET_NR_mq_timedsend_time64
13038     case TARGET_NR_mq_timedsend_time64:
13039         {
13040             struct timespec ts;
13041 
13042             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13043             if (arg5 != 0) {
13044                 if (target_to_host_timespec64(&ts, arg5)) {
13045                     return -TARGET_EFAULT;
13046                 }
13047                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13048                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13049                     return -TARGET_EFAULT;
13050                 }
13051             } else {
13052                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13053             }
13054             unlock_user(p, arg2, arg3);
13055         }
13056         return ret;
13057 #endif
13058 
13059 #ifdef TARGET_NR_mq_timedreceive
13060     case TARGET_NR_mq_timedreceive:
13061         {
13062             struct timespec ts;
13063             unsigned int prio;
13064 
13065             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13066             if (arg5 != 0) {
13067                 if (target_to_host_timespec(&ts, arg5)) {
13068                     return -TARGET_EFAULT;
13069                 }
13070                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13071                                                      &prio, &ts));
13072                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13073                     return -TARGET_EFAULT;
13074                 }
13075             } else {
13076                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13077                                                      &prio, NULL));
13078             }
13079             unlock_user (p, arg2, arg3);
13080             if (arg4 != 0)
13081                 put_user_u32(prio, arg4);
13082         }
13083         return ret;
13084 #endif
13085 #ifdef TARGET_NR_mq_timedreceive_time64
13086     case TARGET_NR_mq_timedreceive_time64:
13087         {
13088             struct timespec ts;
13089             unsigned int prio;
13090 
13091             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13092             if (arg5 != 0) {
13093                 if (target_to_host_timespec64(&ts, arg5)) {
13094                     return -TARGET_EFAULT;
13095                 }
13096                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13097                                                      &prio, &ts));
13098                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13099                     return -TARGET_EFAULT;
13100                 }
13101             } else {
13102                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13103                                                      &prio, NULL));
13104             }
13105             unlock_user(p, arg2, arg3);
13106             if (arg4 != 0) {
13107                 put_user_u32(prio, arg4);
13108             }
13109         }
13110         return ret;
13111 #endif
13112 
13113     /* Not implemented for now... */
13114 /*     case TARGET_NR_mq_notify: */
13115 /*         break; */
13116 
13117     case TARGET_NR_mq_getsetattr:
13118         {
13119             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13120             ret = 0;
13121             if (arg2 != 0) {
13122                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13123                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13124                                            &posix_mq_attr_out));
13125             } else if (arg3 != 0) {
13126                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13127             }
13128             if (ret == 0 && arg3 != 0) {
13129                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13130             }
13131         }
13132         return ret;
13133 #endif
13134 
13135 #ifdef CONFIG_SPLICE
13136 #ifdef TARGET_NR_tee
13137     case TARGET_NR_tee:
13138         {
13139             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13140         }
13141         return ret;
13142 #endif
13143 #ifdef TARGET_NR_splice
13144     case TARGET_NR_splice:
13145         {
13146             loff_t loff_in, loff_out;
13147             loff_t *ploff_in = NULL, *ploff_out = NULL;
13148             if (arg2) {
13149                 if (get_user_u64(loff_in, arg2)) {
13150                     return -TARGET_EFAULT;
13151                 }
13152                 ploff_in = &loff_in;
13153             }
13154             if (arg4) {
13155                 if (get_user_u64(loff_out, arg4)) {
13156                     return -TARGET_EFAULT;
13157                 }
13158                 ploff_out = &loff_out;
13159             }
13160             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13161             if (arg2) {
13162                 if (put_user_u64(loff_in, arg2)) {
13163                     return -TARGET_EFAULT;
13164                 }
13165             }
13166             if (arg4) {
13167                 if (put_user_u64(loff_out, arg4)) {
13168                     return -TARGET_EFAULT;
13169                 }
13170             }
13171         }
13172         return ret;
13173 #endif
13174 #ifdef TARGET_NR_vmsplice
13175 	case TARGET_NR_vmsplice:
13176         {
13177             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13178             if (vec != NULL) {
13179                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13180                 unlock_iovec(vec, arg2, arg3, 0);
13181             } else {
13182                 ret = -host_to_target_errno(errno);
13183             }
13184         }
13185         return ret;
13186 #endif
13187 #endif /* CONFIG_SPLICE */
13188 #ifdef CONFIG_EVENTFD
13189 #if defined(TARGET_NR_eventfd)
13190     case TARGET_NR_eventfd:
13191         ret = get_errno(eventfd(arg1, 0));
13192         if (ret >= 0) {
13193             fd_trans_register(ret, &target_eventfd_trans);
13194         }
13195         return ret;
13196 #endif
13197 #if defined(TARGET_NR_eventfd2)
13198     case TARGET_NR_eventfd2:
13199     {
13200         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13201         if (arg2 & TARGET_O_NONBLOCK) {
13202             host_flags |= O_NONBLOCK;
13203         }
13204         if (arg2 & TARGET_O_CLOEXEC) {
13205             host_flags |= O_CLOEXEC;
13206         }
13207         ret = get_errno(eventfd(arg1, host_flags));
13208         if (ret >= 0) {
13209             fd_trans_register(ret, &target_eventfd_trans);
13210         }
13211         return ret;
13212     }
13213 #endif
13214 #endif /* CONFIG_EVENTFD  */
13215 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13216     case TARGET_NR_fallocate:
13217 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13218         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13219                                   target_offset64(arg5, arg6)));
13220 #else
13221         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13222 #endif
13223         return ret;
13224 #endif
13225 #if defined(CONFIG_SYNC_FILE_RANGE)
13226 #if defined(TARGET_NR_sync_file_range)
13227     case TARGET_NR_sync_file_range:
13228 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13229 #if defined(TARGET_MIPS)
13230         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13231                                         target_offset64(arg5, arg6), arg7));
13232 #else
13233         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13234                                         target_offset64(arg4, arg5), arg6));
13235 #endif /* !TARGET_MIPS */
13236 #else
13237         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13238 #endif
13239         return ret;
13240 #endif
13241 #if defined(TARGET_NR_sync_file_range2) || \
13242     defined(TARGET_NR_arm_sync_file_range)
13243 #if defined(TARGET_NR_sync_file_range2)
13244     case TARGET_NR_sync_file_range2:
13245 #endif
13246 #if defined(TARGET_NR_arm_sync_file_range)
13247     case TARGET_NR_arm_sync_file_range:
13248 #endif
13249         /* This is like sync_file_range but the arguments are reordered */
13250 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13251         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13252                                         target_offset64(arg5, arg6), arg2));
13253 #else
13254         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13255 #endif
13256         return ret;
13257 #endif
13258 #endif
13259 #if defined(TARGET_NR_signalfd4)
13260     case TARGET_NR_signalfd4:
13261         return do_signalfd4(arg1, arg2, arg4);
13262 #endif
13263 #if defined(TARGET_NR_signalfd)
13264     case TARGET_NR_signalfd:
13265         return do_signalfd4(arg1, arg2, 0);
13266 #endif
13267 #if defined(CONFIG_EPOLL)
13268 #if defined(TARGET_NR_epoll_create)
13269     case TARGET_NR_epoll_create:
13270         return get_errno(epoll_create(arg1));
13271 #endif
13272 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13273     case TARGET_NR_epoll_create1:
13274         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13275 #endif
13276 #if defined(TARGET_NR_epoll_ctl)
13277     case TARGET_NR_epoll_ctl:
13278     {
13279         struct epoll_event ep;
13280         struct epoll_event *epp = 0;
13281         if (arg4) {
13282             if (arg2 != EPOLL_CTL_DEL) {
13283                 struct target_epoll_event *target_ep;
13284                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13285                     return -TARGET_EFAULT;
13286                 }
13287                 ep.events = tswap32(target_ep->events);
13288                 /*
13289                  * The epoll_data_t union is just opaque data to the kernel,
13290                  * so we transfer all 64 bits across and need not worry what
13291                  * actual data type it is.
13292                  */
13293                 ep.data.u64 = tswap64(target_ep->data.u64);
13294                 unlock_user_struct(target_ep, arg4, 0);
13295             }
13296             /*
13297              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13298              * non-null pointer, even though this argument is ignored.
13299              *
13300              */
13301             epp = &ep;
13302         }
13303         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13304     }
13305 #endif
13306 
13307 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13308 #if defined(TARGET_NR_epoll_wait)
13309     case TARGET_NR_epoll_wait:
13310 #endif
13311 #if defined(TARGET_NR_epoll_pwait)
13312     case TARGET_NR_epoll_pwait:
13313 #endif
13314     {
13315         struct target_epoll_event *target_ep;
13316         struct epoll_event *ep;
13317         int epfd = arg1;
13318         int maxevents = arg3;
13319         int timeout = arg4;
13320 
13321         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13322             return -TARGET_EINVAL;
13323         }
13324 
13325         target_ep = lock_user(VERIFY_WRITE, arg2,
13326                               maxevents * sizeof(struct target_epoll_event), 1);
13327         if (!target_ep) {
13328             return -TARGET_EFAULT;
13329         }
13330 
13331         ep = g_try_new(struct epoll_event, maxevents);
13332         if (!ep) {
13333             unlock_user(target_ep, arg2, 0);
13334             return -TARGET_ENOMEM;
13335         }
13336 
13337         switch (num) {
13338 #if defined(TARGET_NR_epoll_pwait)
13339         case TARGET_NR_epoll_pwait:
13340         {
13341             sigset_t *set = NULL;
13342 
13343             if (arg5) {
13344                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13345                 if (ret != 0) {
13346                     break;
13347                 }
13348             }
13349 
13350             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13351                                              set, SIGSET_T_SIZE));
13352 
13353             if (set) {
13354                 finish_sigsuspend_mask(ret);
13355             }
13356             break;
13357         }
13358 #endif
13359 #if defined(TARGET_NR_epoll_wait)
13360         case TARGET_NR_epoll_wait:
13361             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13362                                              NULL, 0));
13363             break;
13364 #endif
13365         default:
13366             ret = -TARGET_ENOSYS;
13367         }
13368         if (!is_error(ret)) {
13369             int i;
13370             for (i = 0; i < ret; i++) {
13371                 target_ep[i].events = tswap32(ep[i].events);
13372                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13373             }
13374             unlock_user(target_ep, arg2,
13375                         ret * sizeof(struct target_epoll_event));
13376         } else {
13377             unlock_user(target_ep, arg2, 0);
13378         }
13379         g_free(ep);
13380         return ret;
13381     }
13382 #endif
13383 #endif
13384 #ifdef TARGET_NR_prlimit64
13385     case TARGET_NR_prlimit64:
13386     {
13387         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13388         struct target_rlimit64 *target_rnew, *target_rold;
13389         struct host_rlimit64 rnew, rold, *rnewp = 0;
13390         int resource = target_to_host_resource(arg2);
13391 
13392         if (arg3 && (resource != RLIMIT_AS &&
13393                      resource != RLIMIT_DATA &&
13394                      resource != RLIMIT_STACK)) {
13395             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13396                 return -TARGET_EFAULT;
13397             }
13398             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13399             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13400             unlock_user_struct(target_rnew, arg3, 0);
13401             rnewp = &rnew;
13402         }
13403 
13404         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13405         if (!is_error(ret) && arg4) {
13406             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13407                 return -TARGET_EFAULT;
13408             }
13409             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13410             __put_user(rold.rlim_max, &target_rold->rlim_max);
13411             unlock_user_struct(target_rold, arg4, 1);
13412         }
13413         return ret;
13414     }
13415 #endif
13416 #ifdef TARGET_NR_gethostname
13417     case TARGET_NR_gethostname:
13418     {
13419         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13420         if (name) {
13421             ret = get_errno(gethostname(name, arg2));
13422             unlock_user(name, arg1, arg2);
13423         } else {
13424             ret = -TARGET_EFAULT;
13425         }
13426         return ret;
13427     }
13428 #endif
13429 #ifdef TARGET_NR_atomic_cmpxchg_32
13430     case TARGET_NR_atomic_cmpxchg_32:
13431     {
13432         /* should use start_exclusive from main.c */
13433         abi_ulong mem_value;
13434         if (get_user_u32(mem_value, arg6)) {
13435             target_siginfo_t info;
13436             info.si_signo = SIGSEGV;
13437             info.si_errno = 0;
13438             info.si_code = TARGET_SEGV_MAPERR;
13439             info._sifields._sigfault._addr = arg6;
13440             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13441             ret = 0xdeadbeef;
13442 
13443         }
13444         if (mem_value == arg2)
13445             put_user_u32(arg1, arg6);
13446         return mem_value;
13447     }
13448 #endif
13449 #ifdef TARGET_NR_atomic_barrier
13450     case TARGET_NR_atomic_barrier:
13451         /* Like the kernel implementation and the
13452            qemu arm barrier, no-op this? */
13453         return 0;
13454 #endif
13455 
13456 #ifdef TARGET_NR_timer_create
13457     case TARGET_NR_timer_create:
13458     {
13459         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13460 
13461         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13462 
13463         int clkid = arg1;
13464         int timer_index = next_free_host_timer();
13465 
13466         if (timer_index < 0) {
13467             ret = -TARGET_EAGAIN;
13468         } else {
13469             timer_t *phtimer = g_posix_timers  + timer_index;
13470 
13471             if (arg2) {
13472                 phost_sevp = &host_sevp;
13473                 ret = target_to_host_sigevent(phost_sevp, arg2);
13474                 if (ret != 0) {
13475                     free_host_timer_slot(timer_index);
13476                     return ret;
13477                 }
13478             }
13479 
13480             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13481             if (ret) {
13482                 free_host_timer_slot(timer_index);
13483             } else {
13484                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13485                     timer_delete(*phtimer);
13486                     free_host_timer_slot(timer_index);
13487                     return -TARGET_EFAULT;
13488                 }
13489             }
13490         }
13491         return ret;
13492     }
13493 #endif
13494 
13495 #ifdef TARGET_NR_timer_settime
13496     case TARGET_NR_timer_settime:
13497     {
13498         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13499          * struct itimerspec * old_value */
13500         target_timer_t timerid = get_timer_id(arg1);
13501 
13502         if (timerid < 0) {
13503             ret = timerid;
13504         } else if (arg3 == 0) {
13505             ret = -TARGET_EINVAL;
13506         } else {
13507             timer_t htimer = g_posix_timers[timerid];
13508             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13509 
13510             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13511                 return -TARGET_EFAULT;
13512             }
13513             ret = get_errno(
13514                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13515             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13516                 return -TARGET_EFAULT;
13517             }
13518         }
13519         return ret;
13520     }
13521 #endif
13522 
13523 #ifdef TARGET_NR_timer_settime64
13524     case TARGET_NR_timer_settime64:
13525     {
13526         target_timer_t timerid = get_timer_id(arg1);
13527 
13528         if (timerid < 0) {
13529             ret = timerid;
13530         } else if (arg3 == 0) {
13531             ret = -TARGET_EINVAL;
13532         } else {
13533             timer_t htimer = g_posix_timers[timerid];
13534             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13535 
13536             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13537                 return -TARGET_EFAULT;
13538             }
13539             ret = get_errno(
13540                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13541             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13542                 return -TARGET_EFAULT;
13543             }
13544         }
13545         return ret;
13546     }
13547 #endif
13548 
13549 #ifdef TARGET_NR_timer_gettime
13550     case TARGET_NR_timer_gettime:
13551     {
13552         /* args: timer_t timerid, struct itimerspec *curr_value */
13553         target_timer_t timerid = get_timer_id(arg1);
13554 
13555         if (timerid < 0) {
13556             ret = timerid;
13557         } else if (!arg2) {
13558             ret = -TARGET_EFAULT;
13559         } else {
13560             timer_t htimer = g_posix_timers[timerid];
13561             struct itimerspec hspec;
13562             ret = get_errno(timer_gettime(htimer, &hspec));
13563 
13564             if (host_to_target_itimerspec(arg2, &hspec)) {
13565                 ret = -TARGET_EFAULT;
13566             }
13567         }
13568         return ret;
13569     }
13570 #endif
13571 
13572 #ifdef TARGET_NR_timer_gettime64
13573     case TARGET_NR_timer_gettime64:
13574     {
13575         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13576         target_timer_t timerid = get_timer_id(arg1);
13577 
13578         if (timerid < 0) {
13579             ret = timerid;
13580         } else if (!arg2) {
13581             ret = -TARGET_EFAULT;
13582         } else {
13583             timer_t htimer = g_posix_timers[timerid];
13584             struct itimerspec hspec;
13585             ret = get_errno(timer_gettime(htimer, &hspec));
13586 
13587             if (host_to_target_itimerspec64(arg2, &hspec)) {
13588                 ret = -TARGET_EFAULT;
13589             }
13590         }
13591         return ret;
13592     }
13593 #endif
13594 
13595 #ifdef TARGET_NR_timer_getoverrun
13596     case TARGET_NR_timer_getoverrun:
13597     {
13598         /* args: timer_t timerid */
13599         target_timer_t timerid = get_timer_id(arg1);
13600 
13601         if (timerid < 0) {
13602             ret = timerid;
13603         } else {
13604             timer_t htimer = g_posix_timers[timerid];
13605             ret = get_errno(timer_getoverrun(htimer));
13606         }
13607         return ret;
13608     }
13609 #endif
13610 
13611 #ifdef TARGET_NR_timer_delete
13612     case TARGET_NR_timer_delete:
13613     {
13614         /* args: timer_t timerid */
13615         target_timer_t timerid = get_timer_id(arg1);
13616 
13617         if (timerid < 0) {
13618             ret = timerid;
13619         } else {
13620             timer_t htimer = g_posix_timers[timerid];
13621             ret = get_errno(timer_delete(htimer));
13622             free_host_timer_slot(timerid);
13623         }
13624         return ret;
13625     }
13626 #endif
13627 
13628 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13629     case TARGET_NR_timerfd_create:
13630         ret = get_errno(timerfd_create(arg1,
13631                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13632         if (ret >= 0) {
13633             fd_trans_register(ret, &target_timerfd_trans);
13634         }
13635         return ret;
13636 #endif
13637 
13638 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13639     case TARGET_NR_timerfd_gettime:
13640         {
13641             struct itimerspec its_curr;
13642 
13643             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13644 
13645             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13646                 return -TARGET_EFAULT;
13647             }
13648         }
13649         return ret;
13650 #endif
13651 
13652 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13653     case TARGET_NR_timerfd_gettime64:
13654         {
13655             struct itimerspec its_curr;
13656 
13657             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13658 
13659             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13660                 return -TARGET_EFAULT;
13661             }
13662         }
13663         return ret;
13664 #endif
13665 
13666 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13667     case TARGET_NR_timerfd_settime:
13668         {
13669             struct itimerspec its_new, its_old, *p_new;
13670 
13671             if (arg3) {
13672                 if (target_to_host_itimerspec(&its_new, arg3)) {
13673                     return -TARGET_EFAULT;
13674                 }
13675                 p_new = &its_new;
13676             } else {
13677                 p_new = NULL;
13678             }
13679 
13680             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13681 
13682             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13683                 return -TARGET_EFAULT;
13684             }
13685         }
13686         return ret;
13687 #endif
13688 
13689 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13690     case TARGET_NR_timerfd_settime64:
13691         {
13692             struct itimerspec its_new, its_old, *p_new;
13693 
13694             if (arg3) {
13695                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13696                     return -TARGET_EFAULT;
13697                 }
13698                 p_new = &its_new;
13699             } else {
13700                 p_new = NULL;
13701             }
13702 
13703             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13704 
13705             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13706                 return -TARGET_EFAULT;
13707             }
13708         }
13709         return ret;
13710 #endif
13711 
13712 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13713     case TARGET_NR_ioprio_get:
13714         return get_errno(ioprio_get(arg1, arg2));
13715 #endif
13716 
13717 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13718     case TARGET_NR_ioprio_set:
13719         return get_errno(ioprio_set(arg1, arg2, arg3));
13720 #endif
13721 
13722 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13723     case TARGET_NR_setns:
13724         return get_errno(setns(arg1, arg2));
13725 #endif
13726 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13727     case TARGET_NR_unshare:
13728         return get_errno(unshare(arg1));
13729 #endif
13730 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13731     case TARGET_NR_kcmp:
13732         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13733 #endif
13734 #ifdef TARGET_NR_swapcontext
13735     case TARGET_NR_swapcontext:
13736         /* PowerPC specific.  */
13737         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13738 #endif
13739 #ifdef TARGET_NR_memfd_create
13740     case TARGET_NR_memfd_create:
13741         p = lock_user_string(arg1);
13742         if (!p) {
13743             return -TARGET_EFAULT;
13744         }
13745         ret = get_errno(memfd_create(p, arg2));
13746         fd_trans_unregister(ret);
13747         unlock_user(p, arg1, 0);
13748         return ret;
13749 #endif
13750 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13751     case TARGET_NR_membarrier:
13752         return get_errno(membarrier(arg1, arg2));
13753 #endif
13754 
13755 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13756     case TARGET_NR_copy_file_range:
13757         {
13758             loff_t inoff, outoff;
13759             loff_t *pinoff = NULL, *poutoff = NULL;
13760 
13761             if (arg2) {
13762                 if (get_user_u64(inoff, arg2)) {
13763                     return -TARGET_EFAULT;
13764                 }
13765                 pinoff = &inoff;
13766             }
13767             if (arg4) {
13768                 if (get_user_u64(outoff, arg4)) {
13769                     return -TARGET_EFAULT;
13770                 }
13771                 poutoff = &outoff;
13772             }
13773             /* Do not sign-extend the count parameter. */
13774             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13775                                                  (abi_ulong)arg5, arg6));
13776             if (!is_error(ret) && ret > 0) {
13777                 if (arg2) {
13778                     if (put_user_u64(inoff, arg2)) {
13779                         return -TARGET_EFAULT;
13780                     }
13781                 }
13782                 if (arg4) {
13783                     if (put_user_u64(outoff, arg4)) {
13784                         return -TARGET_EFAULT;
13785                     }
13786                 }
13787             }
13788         }
13789         return ret;
13790 #endif
13791 
13792 #if defined(TARGET_NR_pivot_root)
13793     case TARGET_NR_pivot_root:
13794         {
13795             void *p2;
13796             p = lock_user_string(arg1); /* new_root */
13797             p2 = lock_user_string(arg2); /* put_old */
13798             if (!p || !p2) {
13799                 ret = -TARGET_EFAULT;
13800             } else {
13801                 ret = get_errno(pivot_root(p, p2));
13802             }
13803             unlock_user(p2, arg2, 0);
13804             unlock_user(p, arg1, 0);
13805         }
13806         return ret;
13807 #endif
13808 
13809 #if defined(TARGET_NR_riscv_hwprobe)
13810     case TARGET_NR_riscv_hwprobe:
13811         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13812 #endif
13813 
13814     default:
13815         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13816         return -TARGET_ENOSYS;
13817     }
13818     return ret;
13819 }
13820 
13821 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13822                     abi_long arg2, abi_long arg3, abi_long arg4,
13823                     abi_long arg5, abi_long arg6, abi_long arg7,
13824                     abi_long arg8)
13825 {
13826     CPUState *cpu = env_cpu(cpu_env);
13827     abi_long ret;
13828 
13829 #ifdef DEBUG_ERESTARTSYS
13830     /* Debug-only code for exercising the syscall-restart code paths
13831      * in the per-architecture cpu main loops: restart every syscall
13832      * the guest makes once before letting it through.
13833      */
13834     {
13835         static bool flag;
13836         flag = !flag;
13837         if (flag) {
13838             return -QEMU_ERESTARTSYS;
13839         }
13840     }
13841 #endif
13842 
13843     record_syscall_start(cpu, num, arg1,
13844                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13845 
13846     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13847         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13848     }
13849 
13850     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13851                       arg5, arg6, arg7, arg8);
13852 
13853     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13854         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13855                           arg3, arg4, arg5, arg6);
13856     }
13857 
13858     record_syscall_return(cpu, num, ret);
13859     return ret;
13860 }
13861