xref: /openbmc/qemu/linux-user/syscall.c (revision 15ad9853)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
85 
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
92 
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
130 
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321           loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458   { 0, 0, 0, 0 }
459 };
460 
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462 
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467           const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470                          const struct timespec times[2], int flags)
471 {
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477 
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482           const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485                          int newfd, const char *new, int flags)
486 {
487     if (flags == 0) {
488         return renameat(oldfd, old, newfd, new);
489     }
490     errno = ENOSYS;
491     return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495 
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY  */
505 
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513     uint64_t rlim_cur;
514     uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517           const struct host_rlimit64 *, new_limit,
518           struct host_rlimit64 *, old_limit)
519 #endif
520 
521 
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527 
528 static inline int next_free_host_timer(void)
529 {
530     int k;
531     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533             return k;
534         }
535     }
536     return -1;
537 }
538 
539 static inline void free_host_timer_slot(int id)
540 {
541     qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544 
545 static inline int host_to_target_errno(int host_errno)
546 {
547     switch (host_errno) {
548 #define E(X)  case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return host_errno;
553     }
554 }
555 
556 static inline int target_to_host_errno(int target_errno)
557 {
558     switch (target_errno) {
559 #define E(X)  case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562     default:
563         return target_errno;
564     }
565 }
566 
567 abi_long get_errno(abi_long ret)
568 {
569     if (ret == -1)
570         return -host_to_target_errno(errno);
571     else
572         return ret;
573 }
574 
575 const char *target_strerror(int err)
576 {
577     if (err == QEMU_ERESTARTSYS) {
578         return "To be restarted";
579     }
580     if (err == QEMU_ESIGRETURN) {
581         return "Successful exit from sigreturn";
582     }
583 
584     return strerror(target_to_host_errno(err));
585 }
586 
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589     int i;
590     uint8_t b;
591     if (usize <= ksize) {
592         return 1;
593     }
594     for (i = ksize; i < usize; i++) {
595         if (get_user_u8(b, addr + i)) {
596             return -TARGET_EFAULT;
597         }
598         if (b != 0) {
599             return 0;
600         }
601     }
602     return 1;
603 }
604 
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608     return safe_syscall(__NR_##name); \
609 }
610 
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614     return safe_syscall(__NR_##name, arg1); \
615 }
616 
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622 
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628 
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630     type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635 
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637     type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639     type5 arg5) \
640 { \
641     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643 
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645     type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647     type5 arg5, type6 arg6) \
648 { \
649     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651 
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655               int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658               struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661               int, options, struct rusage *, rusage)
662 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
663 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
664               char **, argv, char **, envp, int, flags)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
668               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
669 #endif
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
672               struct timespec *, tsp, const sigset_t *, sigmask,
673               size_t, sigsetsize)
674 #endif
675 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
676               int, maxevents, int, timeout, const sigset_t *, sigmask,
677               size_t, sigsetsize)
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
680               const struct timespec *,timeout,int *,uaddr2,int,val3)
681 #endif
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
684               const struct timespec *,timeout,int *,uaddr2,int,val3)
685 #endif
686 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
687 safe_syscall2(int, kill, pid_t, pid, int, sig)
688 safe_syscall2(int, tkill, int, tid, int, sig)
689 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
690 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
692 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
693               unsigned long, pos_l, unsigned long, pos_h)
694 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
695               unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
697               socklen_t, addrlen)
698 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
699               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
700 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
701               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
702 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
703 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
704 safe_syscall2(int, flock, int, fd, int, operation)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
707               const struct timespec *, uts, size_t, sigsetsize)
708 #endif
709 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
710               int, flags)
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep, const struct timespec *, req,
713               struct timespec *, rem)
714 #endif
715 #if defined(TARGET_NR_clock_nanosleep) || \
716     defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
718               const struct timespec *, req, struct timespec *, rem)
719 #endif
720 #ifdef __NR_ipc
721 #ifdef __s390x__
722 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
723               void *, ptr)
724 #else
725 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
726               void *, ptr, long, fifth)
727 #endif
728 #endif
729 #ifdef __NR_msgsnd
730 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
731               int, flags)
732 #endif
733 #ifdef __NR_msgrcv
734 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
735               long, msgtype, int, flags)
736 #endif
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739               unsigned, nsops, const struct timespec *, timeout)
740 #endif
741 #if defined(TARGET_NR_mq_timedsend) || \
742     defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
744               size_t, len, unsigned, prio, const struct timespec *, timeout)
745 #endif
746 #if defined(TARGET_NR_mq_timedreceive) || \
747     defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
749               size_t, len, unsigned *, prio, const struct timespec *, timeout)
750 #endif
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
753               int, outfd, loff_t *, poutoff, size_t, length,
754               unsigned int, flags)
755 #endif
756 
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758  * "third argument might be integer or pointer or not present" behaviour of
759  * the libc function.
760  */
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764  *  use the flock64 struct rather than unsuffixed flock
765  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
766  */
767 #ifdef __NR_fcntl64
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
769 #else
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
771 #endif
772 
773 static inline int host_to_target_sock_type(int host_type)
774 {
775     int target_type;
776 
777     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
778     case SOCK_DGRAM:
779         target_type = TARGET_SOCK_DGRAM;
780         break;
781     case SOCK_STREAM:
782         target_type = TARGET_SOCK_STREAM;
783         break;
784     default:
785         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
786         break;
787     }
788 
789 #if defined(SOCK_CLOEXEC)
790     if (host_type & SOCK_CLOEXEC) {
791         target_type |= TARGET_SOCK_CLOEXEC;
792     }
793 #endif
794 
795 #if defined(SOCK_NONBLOCK)
796     if (host_type & SOCK_NONBLOCK) {
797         target_type |= TARGET_SOCK_NONBLOCK;
798     }
799 #endif
800 
801     return target_type;
802 }
803 
804 static abi_ulong target_brk;
805 static abi_ulong brk_page;
806 
807 void target_set_brk(abi_ulong new_brk)
808 {
809     target_brk = TARGET_PAGE_ALIGN(new_brk);
810     brk_page = HOST_PAGE_ALIGN(target_brk);
811 }
812 
813 /* do_brk() must return target values and target errnos. */
814 abi_long do_brk(abi_ulong brk_val)
815 {
816     abi_long mapped_addr;
817     abi_ulong new_alloc_size;
818     abi_ulong new_brk, new_host_brk_page;
819 
820     /* brk pointers are always untagged */
821 
822     /* return old brk value if brk_val unchanged or zero */
823     if (!brk_val || brk_val == target_brk) {
824         return target_brk;
825     }
826 
827     new_brk = TARGET_PAGE_ALIGN(brk_val);
828     new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
829 
830     /* brk_val and old target_brk might be on the same page */
831     if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
832         /* empty remaining bytes in (possibly larger) host page */
833         memset(g2h_untagged(new_brk), 0, new_host_brk_page - new_brk);
834         target_brk = brk_val;
835         return target_brk;
836     }
837 
838     /* Release heap if necesary */
839     if (new_brk < target_brk) {
840         /* empty remaining bytes in (possibly larger) host page */
841         memset(g2h_untagged(new_brk), 0, new_host_brk_page - new_brk);
842 
843         /* free unused host pages and set new brk_page */
844         target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
845         brk_page = new_host_brk_page;
846 
847         target_brk = brk_val;
848         return target_brk;
849     }
850 
851     /* We need to allocate more memory after the brk... Note that
852      * we don't use MAP_FIXED because that will map over the top of
853      * any existing mapping (like the one with the host libc or qemu
854      * itself); instead we treat "mapped but at wrong address" as
855      * a failure and unmap again.
856      */
857     new_alloc_size = new_host_brk_page - brk_page;
858     if (new_alloc_size) {
859         mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
860                                         PROT_READ|PROT_WRITE,
861                                         MAP_ANON|MAP_PRIVATE, 0, 0));
862     } else {
863         mapped_addr = brk_page;
864     }
865 
866     if (mapped_addr == brk_page) {
867         /* Heap contents are initialized to zero, as for anonymous
868          * mapped pages.  Technically the new pages are already
869          * initialized to zero since they *are* anonymous mapped
870          * pages, however we have to take care with the contents that
871          * come from the remaining part of the previous page: it may
872          * contains garbage data due to a previous heap usage (grown
873          * then shrunken).  */
874         memset(g2h_untagged(brk_page), 0, HOST_PAGE_ALIGN(brk_page) - brk_page);
875 
876         target_brk = brk_val;
877         brk_page = new_host_brk_page;
878         return target_brk;
879     } else if (mapped_addr != -1) {
880         /* Mapped but at wrong address, meaning there wasn't actually
881          * enough space for this brk.
882          */
883         target_munmap(mapped_addr, new_alloc_size);
884         mapped_addr = -1;
885     }
886 
887 #if defined(TARGET_ALPHA)
888     /* We (partially) emulate OSF/1 on Alpha, which requires we
889        return a proper errno, not an unchanged brk value.  */
890     return -TARGET_ENOMEM;
891 #endif
892     /* For everything else, return the previous break. */
893     return target_brk;
894 }
895 
896 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
897     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
898 static inline abi_long copy_from_user_fdset(fd_set *fds,
899                                             abi_ulong target_fds_addr,
900                                             int n)
901 {
902     int i, nw, j, k;
903     abi_ulong b, *target_fds;
904 
905     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
906     if (!(target_fds = lock_user(VERIFY_READ,
907                                  target_fds_addr,
908                                  sizeof(abi_ulong) * nw,
909                                  1)))
910         return -TARGET_EFAULT;
911 
912     FD_ZERO(fds);
913     k = 0;
914     for (i = 0; i < nw; i++) {
915         /* grab the abi_ulong */
916         __get_user(b, &target_fds[i]);
917         for (j = 0; j < TARGET_ABI_BITS; j++) {
918             /* check the bit inside the abi_ulong */
919             if ((b >> j) & 1)
920                 FD_SET(k, fds);
921             k++;
922         }
923     }
924 
925     unlock_user(target_fds, target_fds_addr, 0);
926 
927     return 0;
928 }
929 
930 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
931                                                  abi_ulong target_fds_addr,
932                                                  int n)
933 {
934     if (target_fds_addr) {
935         if (copy_from_user_fdset(fds, target_fds_addr, n))
936             return -TARGET_EFAULT;
937         *fds_ptr = fds;
938     } else {
939         *fds_ptr = NULL;
940     }
941     return 0;
942 }
943 
944 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
945                                           const fd_set *fds,
946                                           int n)
947 {
948     int i, nw, j, k;
949     abi_long v;
950     abi_ulong *target_fds;
951 
952     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
953     if (!(target_fds = lock_user(VERIFY_WRITE,
954                                  target_fds_addr,
955                                  sizeof(abi_ulong) * nw,
956                                  0)))
957         return -TARGET_EFAULT;
958 
959     k = 0;
960     for (i = 0; i < nw; i++) {
961         v = 0;
962         for (j = 0; j < TARGET_ABI_BITS; j++) {
963             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
964             k++;
965         }
966         __put_user(v, &target_fds[i]);
967     }
968 
969     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
970 
971     return 0;
972 }
973 #endif
974 
975 #if defined(__alpha__)
976 #define HOST_HZ 1024
977 #else
978 #define HOST_HZ 100
979 #endif
980 
981 static inline abi_long host_to_target_clock_t(long ticks)
982 {
983 #if HOST_HZ == TARGET_HZ
984     return ticks;
985 #else
986     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
987 #endif
988 }
989 
990 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
991                                              const struct rusage *rusage)
992 {
993     struct target_rusage *target_rusage;
994 
995     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
996         return -TARGET_EFAULT;
997     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
998     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
999     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1000     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1001     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1002     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1003     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1004     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1005     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1006     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1007     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1008     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1009     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1010     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1011     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1012     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1013     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1014     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1015     unlock_user_struct(target_rusage, target_addr, 1);
1016 
1017     return 0;
1018 }
1019 
1020 #ifdef TARGET_NR_setrlimit
1021 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1022 {
1023     abi_ulong target_rlim_swap;
1024     rlim_t result;
1025 
1026     target_rlim_swap = tswapal(target_rlim);
1027     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1028         return RLIM_INFINITY;
1029 
1030     result = target_rlim_swap;
1031     if (target_rlim_swap != (rlim_t)result)
1032         return RLIM_INFINITY;
1033 
1034     return result;
1035 }
1036 #endif
1037 
1038 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1039 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1040 {
1041     abi_ulong target_rlim_swap;
1042     abi_ulong result;
1043 
1044     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1045         target_rlim_swap = TARGET_RLIM_INFINITY;
1046     else
1047         target_rlim_swap = rlim;
1048     result = tswapal(target_rlim_swap);
1049 
1050     return result;
1051 }
1052 #endif
1053 
1054 static inline int target_to_host_resource(int code)
1055 {
1056     switch (code) {
1057     case TARGET_RLIMIT_AS:
1058         return RLIMIT_AS;
1059     case TARGET_RLIMIT_CORE:
1060         return RLIMIT_CORE;
1061     case TARGET_RLIMIT_CPU:
1062         return RLIMIT_CPU;
1063     case TARGET_RLIMIT_DATA:
1064         return RLIMIT_DATA;
1065     case TARGET_RLIMIT_FSIZE:
1066         return RLIMIT_FSIZE;
1067     case TARGET_RLIMIT_LOCKS:
1068         return RLIMIT_LOCKS;
1069     case TARGET_RLIMIT_MEMLOCK:
1070         return RLIMIT_MEMLOCK;
1071     case TARGET_RLIMIT_MSGQUEUE:
1072         return RLIMIT_MSGQUEUE;
1073     case TARGET_RLIMIT_NICE:
1074         return RLIMIT_NICE;
1075     case TARGET_RLIMIT_NOFILE:
1076         return RLIMIT_NOFILE;
1077     case TARGET_RLIMIT_NPROC:
1078         return RLIMIT_NPROC;
1079     case TARGET_RLIMIT_RSS:
1080         return RLIMIT_RSS;
1081     case TARGET_RLIMIT_RTPRIO:
1082         return RLIMIT_RTPRIO;
1083 #ifdef RLIMIT_RTTIME
1084     case TARGET_RLIMIT_RTTIME:
1085         return RLIMIT_RTTIME;
1086 #endif
1087     case TARGET_RLIMIT_SIGPENDING:
1088         return RLIMIT_SIGPENDING;
1089     case TARGET_RLIMIT_STACK:
1090         return RLIMIT_STACK;
1091     default:
1092         return code;
1093     }
1094 }
1095 
1096 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1097                                               abi_ulong target_tv_addr)
1098 {
1099     struct target_timeval *target_tv;
1100 
1101     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1102         return -TARGET_EFAULT;
1103     }
1104 
1105     __get_user(tv->tv_sec, &target_tv->tv_sec);
1106     __get_user(tv->tv_usec, &target_tv->tv_usec);
1107 
1108     unlock_user_struct(target_tv, target_tv_addr, 0);
1109 
1110     return 0;
1111 }
1112 
1113 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1114                                             const struct timeval *tv)
1115 {
1116     struct target_timeval *target_tv;
1117 
1118     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1119         return -TARGET_EFAULT;
1120     }
1121 
1122     __put_user(tv->tv_sec, &target_tv->tv_sec);
1123     __put_user(tv->tv_usec, &target_tv->tv_usec);
1124 
1125     unlock_user_struct(target_tv, target_tv_addr, 1);
1126 
1127     return 0;
1128 }
1129 
1130 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1131 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1132                                                 abi_ulong target_tv_addr)
1133 {
1134     struct target__kernel_sock_timeval *target_tv;
1135 
1136     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1137         return -TARGET_EFAULT;
1138     }
1139 
1140     __get_user(tv->tv_sec, &target_tv->tv_sec);
1141     __get_user(tv->tv_usec, &target_tv->tv_usec);
1142 
1143     unlock_user_struct(target_tv, target_tv_addr, 0);
1144 
1145     return 0;
1146 }
1147 #endif
1148 
1149 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1150                                               const struct timeval *tv)
1151 {
1152     struct target__kernel_sock_timeval *target_tv;
1153 
1154     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1155         return -TARGET_EFAULT;
1156     }
1157 
1158     __put_user(tv->tv_sec, &target_tv->tv_sec);
1159     __put_user(tv->tv_usec, &target_tv->tv_usec);
1160 
1161     unlock_user_struct(target_tv, target_tv_addr, 1);
1162 
1163     return 0;
1164 }
1165 
1166 #if defined(TARGET_NR_futex) || \
1167     defined(TARGET_NR_rt_sigtimedwait) || \
1168     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1169     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1170     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1171     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1172     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1173     defined(TARGET_NR_timer_settime) || \
1174     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1175 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1176                                                abi_ulong target_addr)
1177 {
1178     struct target_timespec *target_ts;
1179 
1180     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1181         return -TARGET_EFAULT;
1182     }
1183     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1184     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1185     unlock_user_struct(target_ts, target_addr, 0);
1186     return 0;
1187 }
1188 #endif
1189 
1190 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1191     defined(TARGET_NR_timer_settime64) || \
1192     defined(TARGET_NR_mq_timedsend_time64) || \
1193     defined(TARGET_NR_mq_timedreceive_time64) || \
1194     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1195     defined(TARGET_NR_clock_nanosleep_time64) || \
1196     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1197     defined(TARGET_NR_utimensat) || \
1198     defined(TARGET_NR_utimensat_time64) || \
1199     defined(TARGET_NR_semtimedop_time64) || \
1200     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1201 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1202                                                  abi_ulong target_addr)
1203 {
1204     struct target__kernel_timespec *target_ts;
1205 
1206     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1207         return -TARGET_EFAULT;
1208     }
1209     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1210     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1211     /* in 32bit mode, this drops the padding */
1212     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1213     unlock_user_struct(target_ts, target_addr, 0);
1214     return 0;
1215 }
1216 #endif
1217 
1218 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1219                                                struct timespec *host_ts)
1220 {
1221     struct target_timespec *target_ts;
1222 
1223     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1224         return -TARGET_EFAULT;
1225     }
1226     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1227     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1228     unlock_user_struct(target_ts, target_addr, 1);
1229     return 0;
1230 }
1231 
1232 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1233                                                  struct timespec *host_ts)
1234 {
1235     struct target__kernel_timespec *target_ts;
1236 
1237     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1238         return -TARGET_EFAULT;
1239     }
1240     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1241     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1242     unlock_user_struct(target_ts, target_addr, 1);
1243     return 0;
1244 }
1245 
1246 #if defined(TARGET_NR_gettimeofday)
1247 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1248                                              struct timezone *tz)
1249 {
1250     struct target_timezone *target_tz;
1251 
1252     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1253         return -TARGET_EFAULT;
1254     }
1255 
1256     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1257     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1258 
1259     unlock_user_struct(target_tz, target_tz_addr, 1);
1260 
1261     return 0;
1262 }
1263 #endif
1264 
1265 #if defined(TARGET_NR_settimeofday)
1266 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1267                                                abi_ulong target_tz_addr)
1268 {
1269     struct target_timezone *target_tz;
1270 
1271     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1272         return -TARGET_EFAULT;
1273     }
1274 
1275     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1276     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1277 
1278     unlock_user_struct(target_tz, target_tz_addr, 0);
1279 
1280     return 0;
1281 }
1282 #endif
1283 
1284 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1285 #include <mqueue.h>
1286 
1287 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1288                                               abi_ulong target_mq_attr_addr)
1289 {
1290     struct target_mq_attr *target_mq_attr;
1291 
1292     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1293                           target_mq_attr_addr, 1))
1294         return -TARGET_EFAULT;
1295 
1296     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1297     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1298     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1299     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1300 
1301     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1302 
1303     return 0;
1304 }
1305 
1306 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1307                                             const struct mq_attr *attr)
1308 {
1309     struct target_mq_attr *target_mq_attr;
1310 
1311     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1312                           target_mq_attr_addr, 0))
1313         return -TARGET_EFAULT;
1314 
1315     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1316     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1317     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1318     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1319 
1320     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1321 
1322     return 0;
1323 }
1324 #endif
1325 
1326 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1327 /* do_select() must return target values and target errnos. */
1328 static abi_long do_select(int n,
1329                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1330                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1331 {
1332     fd_set rfds, wfds, efds;
1333     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1334     struct timeval tv;
1335     struct timespec ts, *ts_ptr;
1336     abi_long ret;
1337 
1338     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1339     if (ret) {
1340         return ret;
1341     }
1342     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1343     if (ret) {
1344         return ret;
1345     }
1346     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1347     if (ret) {
1348         return ret;
1349     }
1350 
1351     if (target_tv_addr) {
1352         if (copy_from_user_timeval(&tv, target_tv_addr))
1353             return -TARGET_EFAULT;
1354         ts.tv_sec = tv.tv_sec;
1355         ts.tv_nsec = tv.tv_usec * 1000;
1356         ts_ptr = &ts;
1357     } else {
1358         ts_ptr = NULL;
1359     }
1360 
1361     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1362                                   ts_ptr, NULL));
1363 
1364     if (!is_error(ret)) {
1365         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1366             return -TARGET_EFAULT;
1367         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1368             return -TARGET_EFAULT;
1369         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1370             return -TARGET_EFAULT;
1371 
1372         if (target_tv_addr) {
1373             tv.tv_sec = ts.tv_sec;
1374             tv.tv_usec = ts.tv_nsec / 1000;
1375             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1376                 return -TARGET_EFAULT;
1377             }
1378         }
1379     }
1380 
1381     return ret;
1382 }
1383 
1384 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1385 static abi_long do_old_select(abi_ulong arg1)
1386 {
1387     struct target_sel_arg_struct *sel;
1388     abi_ulong inp, outp, exp, tvp;
1389     long nsel;
1390 
1391     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1392         return -TARGET_EFAULT;
1393     }
1394 
1395     nsel = tswapal(sel->n);
1396     inp = tswapal(sel->inp);
1397     outp = tswapal(sel->outp);
1398     exp = tswapal(sel->exp);
1399     tvp = tswapal(sel->tvp);
1400 
1401     unlock_user_struct(sel, arg1, 0);
1402 
1403     return do_select(nsel, inp, outp, exp, tvp);
1404 }
1405 #endif
1406 #endif
1407 
1408 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1409 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1410                             abi_long arg4, abi_long arg5, abi_long arg6,
1411                             bool time64)
1412 {
1413     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1414     fd_set rfds, wfds, efds;
1415     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1416     struct timespec ts, *ts_ptr;
1417     abi_long ret;
1418 
1419     /*
1420      * The 6th arg is actually two args smashed together,
1421      * so we cannot use the C library.
1422      */
1423     struct {
1424         sigset_t *set;
1425         size_t size;
1426     } sig, *sig_ptr;
1427 
1428     abi_ulong arg_sigset, arg_sigsize, *arg7;
1429 
1430     n = arg1;
1431     rfd_addr = arg2;
1432     wfd_addr = arg3;
1433     efd_addr = arg4;
1434     ts_addr = arg5;
1435 
1436     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1437     if (ret) {
1438         return ret;
1439     }
1440     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1441     if (ret) {
1442         return ret;
1443     }
1444     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1445     if (ret) {
1446         return ret;
1447     }
1448 
1449     /*
1450      * This takes a timespec, and not a timeval, so we cannot
1451      * use the do_select() helper ...
1452      */
1453     if (ts_addr) {
1454         if (time64) {
1455             if (target_to_host_timespec64(&ts, ts_addr)) {
1456                 return -TARGET_EFAULT;
1457             }
1458         } else {
1459             if (target_to_host_timespec(&ts, ts_addr)) {
1460                 return -TARGET_EFAULT;
1461             }
1462         }
1463             ts_ptr = &ts;
1464     } else {
1465         ts_ptr = NULL;
1466     }
1467 
1468     /* Extract the two packed args for the sigset */
1469     sig_ptr = NULL;
1470     if (arg6) {
1471         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1472         if (!arg7) {
1473             return -TARGET_EFAULT;
1474         }
1475         arg_sigset = tswapal(arg7[0]);
1476         arg_sigsize = tswapal(arg7[1]);
1477         unlock_user(arg7, arg6, 0);
1478 
1479         if (arg_sigset) {
1480             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1481             if (ret != 0) {
1482                 return ret;
1483             }
1484             sig_ptr = &sig;
1485             sig.size = SIGSET_T_SIZE;
1486         }
1487     }
1488 
1489     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1490                                   ts_ptr, sig_ptr));
1491 
1492     if (sig_ptr) {
1493         finish_sigsuspend_mask(ret);
1494     }
1495 
1496     if (!is_error(ret)) {
1497         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1498             return -TARGET_EFAULT;
1499         }
1500         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1501             return -TARGET_EFAULT;
1502         }
1503         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1504             return -TARGET_EFAULT;
1505         }
1506         if (time64) {
1507             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1508                 return -TARGET_EFAULT;
1509             }
1510         } else {
1511             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1512                 return -TARGET_EFAULT;
1513             }
1514         }
1515     }
1516     return ret;
1517 }
1518 #endif
1519 
1520 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1521     defined(TARGET_NR_ppoll_time64)
1522 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1523                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1524 {
1525     struct target_pollfd *target_pfd;
1526     unsigned int nfds = arg2;
1527     struct pollfd *pfd;
1528     unsigned int i;
1529     abi_long ret;
1530 
1531     pfd = NULL;
1532     target_pfd = NULL;
1533     if (nfds) {
1534         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1535             return -TARGET_EINVAL;
1536         }
1537         target_pfd = lock_user(VERIFY_WRITE, arg1,
1538                                sizeof(struct target_pollfd) * nfds, 1);
1539         if (!target_pfd) {
1540             return -TARGET_EFAULT;
1541         }
1542 
1543         pfd = alloca(sizeof(struct pollfd) * nfds);
1544         for (i = 0; i < nfds; i++) {
1545             pfd[i].fd = tswap32(target_pfd[i].fd);
1546             pfd[i].events = tswap16(target_pfd[i].events);
1547         }
1548     }
1549     if (ppoll) {
1550         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1551         sigset_t *set = NULL;
1552 
1553         if (arg3) {
1554             if (time64) {
1555                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1556                     unlock_user(target_pfd, arg1, 0);
1557                     return -TARGET_EFAULT;
1558                 }
1559             } else {
1560                 if (target_to_host_timespec(timeout_ts, arg3)) {
1561                     unlock_user(target_pfd, arg1, 0);
1562                     return -TARGET_EFAULT;
1563                 }
1564             }
1565         } else {
1566             timeout_ts = NULL;
1567         }
1568 
1569         if (arg4) {
1570             ret = process_sigsuspend_mask(&set, arg4, arg5);
1571             if (ret != 0) {
1572                 unlock_user(target_pfd, arg1, 0);
1573                 return ret;
1574             }
1575         }
1576 
1577         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1578                                    set, SIGSET_T_SIZE));
1579 
1580         if (set) {
1581             finish_sigsuspend_mask(ret);
1582         }
1583         if (!is_error(ret) && arg3) {
1584             if (time64) {
1585                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1586                     return -TARGET_EFAULT;
1587                 }
1588             } else {
1589                 if (host_to_target_timespec(arg3, timeout_ts)) {
1590                     return -TARGET_EFAULT;
1591                 }
1592             }
1593         }
1594     } else {
1595           struct timespec ts, *pts;
1596 
1597           if (arg3 >= 0) {
1598               /* Convert ms to secs, ns */
1599               ts.tv_sec = arg3 / 1000;
1600               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1601               pts = &ts;
1602           } else {
1603               /* -ve poll() timeout means "infinite" */
1604               pts = NULL;
1605           }
1606           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1607     }
1608 
1609     if (!is_error(ret)) {
1610         for (i = 0; i < nfds; i++) {
1611             target_pfd[i].revents = tswap16(pfd[i].revents);
1612         }
1613     }
1614     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1615     return ret;
1616 }
1617 #endif
1618 
1619 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1620                         int flags, int is_pipe2)
1621 {
1622     int host_pipe[2];
1623     abi_long ret;
1624     ret = pipe2(host_pipe, flags);
1625 
1626     if (is_error(ret))
1627         return get_errno(ret);
1628 
1629     /* Several targets have special calling conventions for the original
1630        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1631     if (!is_pipe2) {
1632 #if defined(TARGET_ALPHA)
1633         cpu_env->ir[IR_A4] = host_pipe[1];
1634         return host_pipe[0];
1635 #elif defined(TARGET_MIPS)
1636         cpu_env->active_tc.gpr[3] = host_pipe[1];
1637         return host_pipe[0];
1638 #elif defined(TARGET_SH4)
1639         cpu_env->gregs[1] = host_pipe[1];
1640         return host_pipe[0];
1641 #elif defined(TARGET_SPARC)
1642         cpu_env->regwptr[1] = host_pipe[1];
1643         return host_pipe[0];
1644 #endif
1645     }
1646 
1647     if (put_user_s32(host_pipe[0], pipedes)
1648         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1649         return -TARGET_EFAULT;
1650     return get_errno(ret);
1651 }
1652 
1653 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1654                                               abi_ulong target_addr,
1655                                               socklen_t len)
1656 {
1657     struct target_ip_mreqn *target_smreqn;
1658 
1659     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1660     if (!target_smreqn)
1661         return -TARGET_EFAULT;
1662     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1663     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1664     if (len == sizeof(struct target_ip_mreqn))
1665         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1666     unlock_user(target_smreqn, target_addr, 0);
1667 
1668     return 0;
1669 }
1670 
1671 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1672                                                abi_ulong target_addr,
1673                                                socklen_t len)
1674 {
1675     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1676     sa_family_t sa_family;
1677     struct target_sockaddr *target_saddr;
1678 
1679     if (fd_trans_target_to_host_addr(fd)) {
1680         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1681     }
1682 
1683     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1684     if (!target_saddr)
1685         return -TARGET_EFAULT;
1686 
1687     sa_family = tswap16(target_saddr->sa_family);
1688 
1689     /* Oops. The caller might send a incomplete sun_path; sun_path
1690      * must be terminated by \0 (see the manual page), but
1691      * unfortunately it is quite common to specify sockaddr_un
1692      * length as "strlen(x->sun_path)" while it should be
1693      * "strlen(...) + 1". We'll fix that here if needed.
1694      * Linux kernel has a similar feature.
1695      */
1696 
1697     if (sa_family == AF_UNIX) {
1698         if (len < unix_maxlen && len > 0) {
1699             char *cp = (char*)target_saddr;
1700 
1701             if ( cp[len-1] && !cp[len] )
1702                 len++;
1703         }
1704         if (len > unix_maxlen)
1705             len = unix_maxlen;
1706     }
1707 
1708     memcpy(addr, target_saddr, len);
1709     addr->sa_family = sa_family;
1710     if (sa_family == AF_NETLINK) {
1711         struct sockaddr_nl *nladdr;
1712 
1713         nladdr = (struct sockaddr_nl *)addr;
1714         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1715         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1716     } else if (sa_family == AF_PACKET) {
1717 	struct target_sockaddr_ll *lladdr;
1718 
1719 	lladdr = (struct target_sockaddr_ll *)addr;
1720 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1721 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1722     } else if (sa_family == AF_INET6) {
1723         struct sockaddr_in6 *in6addr;
1724 
1725         in6addr = (struct sockaddr_in6 *)addr;
1726         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1727     }
1728     unlock_user(target_saddr, target_addr, 0);
1729 
1730     return 0;
1731 }
1732 
1733 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1734                                                struct sockaddr *addr,
1735                                                socklen_t len)
1736 {
1737     struct target_sockaddr *target_saddr;
1738 
1739     if (len == 0) {
1740         return 0;
1741     }
1742     assert(addr);
1743 
1744     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1745     if (!target_saddr)
1746         return -TARGET_EFAULT;
1747     memcpy(target_saddr, addr, len);
1748     if (len >= offsetof(struct target_sockaddr, sa_family) +
1749         sizeof(target_saddr->sa_family)) {
1750         target_saddr->sa_family = tswap16(addr->sa_family);
1751     }
1752     if (addr->sa_family == AF_NETLINK &&
1753         len >= sizeof(struct target_sockaddr_nl)) {
1754         struct target_sockaddr_nl *target_nl =
1755                (struct target_sockaddr_nl *)target_saddr;
1756         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1757         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1758     } else if (addr->sa_family == AF_PACKET) {
1759         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1760         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1761         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1762     } else if (addr->sa_family == AF_INET6 &&
1763                len >= sizeof(struct target_sockaddr_in6)) {
1764         struct target_sockaddr_in6 *target_in6 =
1765                (struct target_sockaddr_in6 *)target_saddr;
1766         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1767     }
1768     unlock_user(target_saddr, target_addr, len);
1769 
1770     return 0;
1771 }
1772 
1773 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1774                                            struct target_msghdr *target_msgh)
1775 {
1776     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1777     abi_long msg_controllen;
1778     abi_ulong target_cmsg_addr;
1779     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1780     socklen_t space = 0;
1781 
1782     msg_controllen = tswapal(target_msgh->msg_controllen);
1783     if (msg_controllen < sizeof (struct target_cmsghdr))
1784         goto the_end;
1785     target_cmsg_addr = tswapal(target_msgh->msg_control);
1786     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1787     target_cmsg_start = target_cmsg;
1788     if (!target_cmsg)
1789         return -TARGET_EFAULT;
1790 
1791     while (cmsg && target_cmsg) {
1792         void *data = CMSG_DATA(cmsg);
1793         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1794 
1795         int len = tswapal(target_cmsg->cmsg_len)
1796             - sizeof(struct target_cmsghdr);
1797 
1798         space += CMSG_SPACE(len);
1799         if (space > msgh->msg_controllen) {
1800             space -= CMSG_SPACE(len);
1801             /* This is a QEMU bug, since we allocated the payload
1802              * area ourselves (unlike overflow in host-to-target
1803              * conversion, which is just the guest giving us a buffer
1804              * that's too small). It can't happen for the payload types
1805              * we currently support; if it becomes an issue in future
1806              * we would need to improve our allocation strategy to
1807              * something more intelligent than "twice the size of the
1808              * target buffer we're reading from".
1809              */
1810             qemu_log_mask(LOG_UNIMP,
1811                           ("Unsupported ancillary data %d/%d: "
1812                            "unhandled msg size\n"),
1813                           tswap32(target_cmsg->cmsg_level),
1814                           tswap32(target_cmsg->cmsg_type));
1815             break;
1816         }
1817 
1818         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1819             cmsg->cmsg_level = SOL_SOCKET;
1820         } else {
1821             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1822         }
1823         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1824         cmsg->cmsg_len = CMSG_LEN(len);
1825 
1826         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1827             int *fd = (int *)data;
1828             int *target_fd = (int *)target_data;
1829             int i, numfds = len / sizeof(int);
1830 
1831             for (i = 0; i < numfds; i++) {
1832                 __get_user(fd[i], target_fd + i);
1833             }
1834         } else if (cmsg->cmsg_level == SOL_SOCKET
1835                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1836             struct ucred *cred = (struct ucred *)data;
1837             struct target_ucred *target_cred =
1838                 (struct target_ucred *)target_data;
1839 
1840             __get_user(cred->pid, &target_cred->pid);
1841             __get_user(cred->uid, &target_cred->uid);
1842             __get_user(cred->gid, &target_cred->gid);
1843         } else if (cmsg->cmsg_level == SOL_ALG) {
1844             uint32_t *dst = (uint32_t *)data;
1845 
1846             memcpy(dst, target_data, len);
1847             /* fix endianess of first 32-bit word */
1848             if (len >= sizeof(uint32_t)) {
1849                 *dst = tswap32(*dst);
1850             }
1851         } else {
1852             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1853                           cmsg->cmsg_level, cmsg->cmsg_type);
1854             memcpy(data, target_data, len);
1855         }
1856 
1857         cmsg = CMSG_NXTHDR(msgh, cmsg);
1858         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1859                                          target_cmsg_start);
1860     }
1861     unlock_user(target_cmsg, target_cmsg_addr, 0);
1862  the_end:
1863     msgh->msg_controllen = space;
1864     return 0;
1865 }
1866 
1867 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1868                                            struct msghdr *msgh)
1869 {
1870     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1871     abi_long msg_controllen;
1872     abi_ulong target_cmsg_addr;
1873     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1874     socklen_t space = 0;
1875 
1876     msg_controllen = tswapal(target_msgh->msg_controllen);
1877     if (msg_controllen < sizeof (struct target_cmsghdr))
1878         goto the_end;
1879     target_cmsg_addr = tswapal(target_msgh->msg_control);
1880     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1881     target_cmsg_start = target_cmsg;
1882     if (!target_cmsg)
1883         return -TARGET_EFAULT;
1884 
1885     while (cmsg && target_cmsg) {
1886         void *data = CMSG_DATA(cmsg);
1887         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1888 
1889         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1890         int tgt_len, tgt_space;
1891 
1892         /* We never copy a half-header but may copy half-data;
1893          * this is Linux's behaviour in put_cmsg(). Note that
1894          * truncation here is a guest problem (which we report
1895          * to the guest via the CTRUNC bit), unlike truncation
1896          * in target_to_host_cmsg, which is a QEMU bug.
1897          */
1898         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1899             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1900             break;
1901         }
1902 
1903         if (cmsg->cmsg_level == SOL_SOCKET) {
1904             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1905         } else {
1906             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1907         }
1908         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1909 
1910         /* Payload types which need a different size of payload on
1911          * the target must adjust tgt_len here.
1912          */
1913         tgt_len = len;
1914         switch (cmsg->cmsg_level) {
1915         case SOL_SOCKET:
1916             switch (cmsg->cmsg_type) {
1917             case SO_TIMESTAMP:
1918                 tgt_len = sizeof(struct target_timeval);
1919                 break;
1920             default:
1921                 break;
1922             }
1923             break;
1924         default:
1925             break;
1926         }
1927 
1928         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1929             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1930             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1931         }
1932 
1933         /* We must now copy-and-convert len bytes of payload
1934          * into tgt_len bytes of destination space. Bear in mind
1935          * that in both source and destination we may be dealing
1936          * with a truncated value!
1937          */
1938         switch (cmsg->cmsg_level) {
1939         case SOL_SOCKET:
1940             switch (cmsg->cmsg_type) {
1941             case SCM_RIGHTS:
1942             {
1943                 int *fd = (int *)data;
1944                 int *target_fd = (int *)target_data;
1945                 int i, numfds = tgt_len / sizeof(int);
1946 
1947                 for (i = 0; i < numfds; i++) {
1948                     __put_user(fd[i], target_fd + i);
1949                 }
1950                 break;
1951             }
1952             case SO_TIMESTAMP:
1953             {
1954                 struct timeval *tv = (struct timeval *)data;
1955                 struct target_timeval *target_tv =
1956                     (struct target_timeval *)target_data;
1957 
1958                 if (len != sizeof(struct timeval) ||
1959                     tgt_len != sizeof(struct target_timeval)) {
1960                     goto unimplemented;
1961                 }
1962 
1963                 /* copy struct timeval to target */
1964                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1965                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1966                 break;
1967             }
1968             case SCM_CREDENTIALS:
1969             {
1970                 struct ucred *cred = (struct ucred *)data;
1971                 struct target_ucred *target_cred =
1972                     (struct target_ucred *)target_data;
1973 
1974                 __put_user(cred->pid, &target_cred->pid);
1975                 __put_user(cred->uid, &target_cred->uid);
1976                 __put_user(cred->gid, &target_cred->gid);
1977                 break;
1978             }
1979             default:
1980                 goto unimplemented;
1981             }
1982             break;
1983 
1984         case SOL_IP:
1985             switch (cmsg->cmsg_type) {
1986             case IP_TTL:
1987             {
1988                 uint32_t *v = (uint32_t *)data;
1989                 uint32_t *t_int = (uint32_t *)target_data;
1990 
1991                 if (len != sizeof(uint32_t) ||
1992                     tgt_len != sizeof(uint32_t)) {
1993                     goto unimplemented;
1994                 }
1995                 __put_user(*v, t_int);
1996                 break;
1997             }
1998             case IP_RECVERR:
1999             {
2000                 struct errhdr_t {
2001                    struct sock_extended_err ee;
2002                    struct sockaddr_in offender;
2003                 };
2004                 struct errhdr_t *errh = (struct errhdr_t *)data;
2005                 struct errhdr_t *target_errh =
2006                     (struct errhdr_t *)target_data;
2007 
2008                 if (len != sizeof(struct errhdr_t) ||
2009                     tgt_len != sizeof(struct errhdr_t)) {
2010                     goto unimplemented;
2011                 }
2012                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2013                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2014                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2015                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2016                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2017                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2018                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2019                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2020                     (void *) &errh->offender, sizeof(errh->offender));
2021                 break;
2022             }
2023             default:
2024                 goto unimplemented;
2025             }
2026             break;
2027 
2028         case SOL_IPV6:
2029             switch (cmsg->cmsg_type) {
2030             case IPV6_HOPLIMIT:
2031             {
2032                 uint32_t *v = (uint32_t *)data;
2033                 uint32_t *t_int = (uint32_t *)target_data;
2034 
2035                 if (len != sizeof(uint32_t) ||
2036                     tgt_len != sizeof(uint32_t)) {
2037                     goto unimplemented;
2038                 }
2039                 __put_user(*v, t_int);
2040                 break;
2041             }
2042             case IPV6_RECVERR:
2043             {
2044                 struct errhdr6_t {
2045                    struct sock_extended_err ee;
2046                    struct sockaddr_in6 offender;
2047                 };
2048                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2049                 struct errhdr6_t *target_errh =
2050                     (struct errhdr6_t *)target_data;
2051 
2052                 if (len != sizeof(struct errhdr6_t) ||
2053                     tgt_len != sizeof(struct errhdr6_t)) {
2054                     goto unimplemented;
2055                 }
2056                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2057                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2058                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2059                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2060                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2061                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2062                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2063                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2064                     (void *) &errh->offender, sizeof(errh->offender));
2065                 break;
2066             }
2067             default:
2068                 goto unimplemented;
2069             }
2070             break;
2071 
2072         default:
2073         unimplemented:
2074             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2075                           cmsg->cmsg_level, cmsg->cmsg_type);
2076             memcpy(target_data, data, MIN(len, tgt_len));
2077             if (tgt_len > len) {
2078                 memset(target_data + len, 0, tgt_len - len);
2079             }
2080         }
2081 
2082         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2083         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2084         if (msg_controllen < tgt_space) {
2085             tgt_space = msg_controllen;
2086         }
2087         msg_controllen -= tgt_space;
2088         space += tgt_space;
2089         cmsg = CMSG_NXTHDR(msgh, cmsg);
2090         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2091                                          target_cmsg_start);
2092     }
2093     unlock_user(target_cmsg, target_cmsg_addr, space);
2094  the_end:
2095     target_msgh->msg_controllen = tswapal(space);
2096     return 0;
2097 }
2098 
2099 /* do_setsockopt() Must return target values and target errnos. */
2100 static abi_long do_setsockopt(int sockfd, int level, int optname,
2101                               abi_ulong optval_addr, socklen_t optlen)
2102 {
2103     abi_long ret;
2104     int val;
2105     struct ip_mreqn *ip_mreq;
2106     struct ip_mreq_source *ip_mreq_source;
2107 
2108     switch(level) {
2109     case SOL_TCP:
2110     case SOL_UDP:
2111         /* TCP and UDP options all take an 'int' value.  */
2112         if (optlen < sizeof(uint32_t))
2113             return -TARGET_EINVAL;
2114 
2115         if (get_user_u32(val, optval_addr))
2116             return -TARGET_EFAULT;
2117         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2118         break;
2119     case SOL_IP:
2120         switch(optname) {
2121         case IP_TOS:
2122         case IP_TTL:
2123         case IP_HDRINCL:
2124         case IP_ROUTER_ALERT:
2125         case IP_RECVOPTS:
2126         case IP_RETOPTS:
2127         case IP_PKTINFO:
2128         case IP_MTU_DISCOVER:
2129         case IP_RECVERR:
2130         case IP_RECVTTL:
2131         case IP_RECVTOS:
2132 #ifdef IP_FREEBIND
2133         case IP_FREEBIND:
2134 #endif
2135         case IP_MULTICAST_TTL:
2136         case IP_MULTICAST_LOOP:
2137             val = 0;
2138             if (optlen >= sizeof(uint32_t)) {
2139                 if (get_user_u32(val, optval_addr))
2140                     return -TARGET_EFAULT;
2141             } else if (optlen >= 1) {
2142                 if (get_user_u8(val, optval_addr))
2143                     return -TARGET_EFAULT;
2144             }
2145             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2146             break;
2147         case IP_ADD_MEMBERSHIP:
2148         case IP_DROP_MEMBERSHIP:
2149             if (optlen < sizeof (struct target_ip_mreq) ||
2150                 optlen > sizeof (struct target_ip_mreqn))
2151                 return -TARGET_EINVAL;
2152 
2153             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2154             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2155             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2156             break;
2157 
2158         case IP_BLOCK_SOURCE:
2159         case IP_UNBLOCK_SOURCE:
2160         case IP_ADD_SOURCE_MEMBERSHIP:
2161         case IP_DROP_SOURCE_MEMBERSHIP:
2162             if (optlen != sizeof (struct target_ip_mreq_source))
2163                 return -TARGET_EINVAL;
2164 
2165             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2166             if (!ip_mreq_source) {
2167                 return -TARGET_EFAULT;
2168             }
2169             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2170             unlock_user (ip_mreq_source, optval_addr, 0);
2171             break;
2172 
2173         default:
2174             goto unimplemented;
2175         }
2176         break;
2177     case SOL_IPV6:
2178         switch (optname) {
2179         case IPV6_MTU_DISCOVER:
2180         case IPV6_MTU:
2181         case IPV6_V6ONLY:
2182         case IPV6_RECVPKTINFO:
2183         case IPV6_UNICAST_HOPS:
2184         case IPV6_MULTICAST_HOPS:
2185         case IPV6_MULTICAST_LOOP:
2186         case IPV6_RECVERR:
2187         case IPV6_RECVHOPLIMIT:
2188         case IPV6_2292HOPLIMIT:
2189         case IPV6_CHECKSUM:
2190         case IPV6_ADDRFORM:
2191         case IPV6_2292PKTINFO:
2192         case IPV6_RECVTCLASS:
2193         case IPV6_RECVRTHDR:
2194         case IPV6_2292RTHDR:
2195         case IPV6_RECVHOPOPTS:
2196         case IPV6_2292HOPOPTS:
2197         case IPV6_RECVDSTOPTS:
2198         case IPV6_2292DSTOPTS:
2199         case IPV6_TCLASS:
2200         case IPV6_ADDR_PREFERENCES:
2201 #ifdef IPV6_RECVPATHMTU
2202         case IPV6_RECVPATHMTU:
2203 #endif
2204 #ifdef IPV6_TRANSPARENT
2205         case IPV6_TRANSPARENT:
2206 #endif
2207 #ifdef IPV6_FREEBIND
2208         case IPV6_FREEBIND:
2209 #endif
2210 #ifdef IPV6_RECVORIGDSTADDR
2211         case IPV6_RECVORIGDSTADDR:
2212 #endif
2213             val = 0;
2214             if (optlen < sizeof(uint32_t)) {
2215                 return -TARGET_EINVAL;
2216             }
2217             if (get_user_u32(val, optval_addr)) {
2218                 return -TARGET_EFAULT;
2219             }
2220             ret = get_errno(setsockopt(sockfd, level, optname,
2221                                        &val, sizeof(val)));
2222             break;
2223         case IPV6_PKTINFO:
2224         {
2225             struct in6_pktinfo pki;
2226 
2227             if (optlen < sizeof(pki)) {
2228                 return -TARGET_EINVAL;
2229             }
2230 
2231             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2232                 return -TARGET_EFAULT;
2233             }
2234 
2235             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2236 
2237             ret = get_errno(setsockopt(sockfd, level, optname,
2238                                        &pki, sizeof(pki)));
2239             break;
2240         }
2241         case IPV6_ADD_MEMBERSHIP:
2242         case IPV6_DROP_MEMBERSHIP:
2243         {
2244             struct ipv6_mreq ipv6mreq;
2245 
2246             if (optlen < sizeof(ipv6mreq)) {
2247                 return -TARGET_EINVAL;
2248             }
2249 
2250             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2251                 return -TARGET_EFAULT;
2252             }
2253 
2254             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2255 
2256             ret = get_errno(setsockopt(sockfd, level, optname,
2257                                        &ipv6mreq, sizeof(ipv6mreq)));
2258             break;
2259         }
2260         default:
2261             goto unimplemented;
2262         }
2263         break;
2264     case SOL_ICMPV6:
2265         switch (optname) {
2266         case ICMPV6_FILTER:
2267         {
2268             struct icmp6_filter icmp6f;
2269 
2270             if (optlen > sizeof(icmp6f)) {
2271                 optlen = sizeof(icmp6f);
2272             }
2273 
2274             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2275                 return -TARGET_EFAULT;
2276             }
2277 
2278             for (val = 0; val < 8; val++) {
2279                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2280             }
2281 
2282             ret = get_errno(setsockopt(sockfd, level, optname,
2283                                        &icmp6f, optlen));
2284             break;
2285         }
2286         default:
2287             goto unimplemented;
2288         }
2289         break;
2290     case SOL_RAW:
2291         switch (optname) {
2292         case ICMP_FILTER:
2293         case IPV6_CHECKSUM:
2294             /* those take an u32 value */
2295             if (optlen < sizeof(uint32_t)) {
2296                 return -TARGET_EINVAL;
2297             }
2298 
2299             if (get_user_u32(val, optval_addr)) {
2300                 return -TARGET_EFAULT;
2301             }
2302             ret = get_errno(setsockopt(sockfd, level, optname,
2303                                        &val, sizeof(val)));
2304             break;
2305 
2306         default:
2307             goto unimplemented;
2308         }
2309         break;
2310 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2311     case SOL_ALG:
2312         switch (optname) {
2313         case ALG_SET_KEY:
2314         {
2315             char *alg_key = g_malloc(optlen);
2316 
2317             if (!alg_key) {
2318                 return -TARGET_ENOMEM;
2319             }
2320             if (copy_from_user(alg_key, optval_addr, optlen)) {
2321                 g_free(alg_key);
2322                 return -TARGET_EFAULT;
2323             }
2324             ret = get_errno(setsockopt(sockfd, level, optname,
2325                                        alg_key, optlen));
2326             g_free(alg_key);
2327             break;
2328         }
2329         case ALG_SET_AEAD_AUTHSIZE:
2330         {
2331             ret = get_errno(setsockopt(sockfd, level, optname,
2332                                        NULL, optlen));
2333             break;
2334         }
2335         default:
2336             goto unimplemented;
2337         }
2338         break;
2339 #endif
2340     case TARGET_SOL_SOCKET:
2341         switch (optname) {
2342         case TARGET_SO_RCVTIMEO:
2343         {
2344                 struct timeval tv;
2345 
2346                 optname = SO_RCVTIMEO;
2347 
2348 set_timeout:
2349                 if (optlen != sizeof(struct target_timeval)) {
2350                     return -TARGET_EINVAL;
2351                 }
2352 
2353                 if (copy_from_user_timeval(&tv, optval_addr)) {
2354                     return -TARGET_EFAULT;
2355                 }
2356 
2357                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2358                                 &tv, sizeof(tv)));
2359                 return ret;
2360         }
2361         case TARGET_SO_SNDTIMEO:
2362                 optname = SO_SNDTIMEO;
2363                 goto set_timeout;
2364         case TARGET_SO_ATTACH_FILTER:
2365         {
2366                 struct target_sock_fprog *tfprog;
2367                 struct target_sock_filter *tfilter;
2368                 struct sock_fprog fprog;
2369                 struct sock_filter *filter;
2370                 int i;
2371 
2372                 if (optlen != sizeof(*tfprog)) {
2373                     return -TARGET_EINVAL;
2374                 }
2375                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2376                     return -TARGET_EFAULT;
2377                 }
2378                 if (!lock_user_struct(VERIFY_READ, tfilter,
2379                                       tswapal(tfprog->filter), 0)) {
2380                     unlock_user_struct(tfprog, optval_addr, 1);
2381                     return -TARGET_EFAULT;
2382                 }
2383 
2384                 fprog.len = tswap16(tfprog->len);
2385                 filter = g_try_new(struct sock_filter, fprog.len);
2386                 if (filter == NULL) {
2387                     unlock_user_struct(tfilter, tfprog->filter, 1);
2388                     unlock_user_struct(tfprog, optval_addr, 1);
2389                     return -TARGET_ENOMEM;
2390                 }
2391                 for (i = 0; i < fprog.len; i++) {
2392                     filter[i].code = tswap16(tfilter[i].code);
2393                     filter[i].jt = tfilter[i].jt;
2394                     filter[i].jf = tfilter[i].jf;
2395                     filter[i].k = tswap32(tfilter[i].k);
2396                 }
2397                 fprog.filter = filter;
2398 
2399                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2400                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2401                 g_free(filter);
2402 
2403                 unlock_user_struct(tfilter, tfprog->filter, 1);
2404                 unlock_user_struct(tfprog, optval_addr, 1);
2405                 return ret;
2406         }
2407 	case TARGET_SO_BINDTODEVICE:
2408 	{
2409 		char *dev_ifname, *addr_ifname;
2410 
2411 		if (optlen > IFNAMSIZ - 1) {
2412 		    optlen = IFNAMSIZ - 1;
2413 		}
2414 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2415 		if (!dev_ifname) {
2416 		    return -TARGET_EFAULT;
2417 		}
2418 		optname = SO_BINDTODEVICE;
2419 		addr_ifname = alloca(IFNAMSIZ);
2420 		memcpy(addr_ifname, dev_ifname, optlen);
2421 		addr_ifname[optlen] = 0;
2422 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2423                                            addr_ifname, optlen));
2424 		unlock_user (dev_ifname, optval_addr, 0);
2425 		return ret;
2426 	}
2427         case TARGET_SO_LINGER:
2428         {
2429                 struct linger lg;
2430                 struct target_linger *tlg;
2431 
2432                 if (optlen != sizeof(struct target_linger)) {
2433                     return -TARGET_EINVAL;
2434                 }
2435                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2436                     return -TARGET_EFAULT;
2437                 }
2438                 __get_user(lg.l_onoff, &tlg->l_onoff);
2439                 __get_user(lg.l_linger, &tlg->l_linger);
2440                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2441                                 &lg, sizeof(lg)));
2442                 unlock_user_struct(tlg, optval_addr, 0);
2443                 return ret;
2444         }
2445             /* Options with 'int' argument.  */
2446         case TARGET_SO_DEBUG:
2447 		optname = SO_DEBUG;
2448 		break;
2449         case TARGET_SO_REUSEADDR:
2450 		optname = SO_REUSEADDR;
2451 		break;
2452 #ifdef SO_REUSEPORT
2453         case TARGET_SO_REUSEPORT:
2454                 optname = SO_REUSEPORT;
2455                 break;
2456 #endif
2457         case TARGET_SO_TYPE:
2458 		optname = SO_TYPE;
2459 		break;
2460         case TARGET_SO_ERROR:
2461 		optname = SO_ERROR;
2462 		break;
2463         case TARGET_SO_DONTROUTE:
2464 		optname = SO_DONTROUTE;
2465 		break;
2466         case TARGET_SO_BROADCAST:
2467 		optname = SO_BROADCAST;
2468 		break;
2469         case TARGET_SO_SNDBUF:
2470 		optname = SO_SNDBUF;
2471 		break;
2472         case TARGET_SO_SNDBUFFORCE:
2473                 optname = SO_SNDBUFFORCE;
2474                 break;
2475         case TARGET_SO_RCVBUF:
2476 		optname = SO_RCVBUF;
2477 		break;
2478         case TARGET_SO_RCVBUFFORCE:
2479                 optname = SO_RCVBUFFORCE;
2480                 break;
2481         case TARGET_SO_KEEPALIVE:
2482 		optname = SO_KEEPALIVE;
2483 		break;
2484         case TARGET_SO_OOBINLINE:
2485 		optname = SO_OOBINLINE;
2486 		break;
2487         case TARGET_SO_NO_CHECK:
2488 		optname = SO_NO_CHECK;
2489 		break;
2490         case TARGET_SO_PRIORITY:
2491 		optname = SO_PRIORITY;
2492 		break;
2493 #ifdef SO_BSDCOMPAT
2494         case TARGET_SO_BSDCOMPAT:
2495 		optname = SO_BSDCOMPAT;
2496 		break;
2497 #endif
2498         case TARGET_SO_PASSCRED:
2499 		optname = SO_PASSCRED;
2500 		break;
2501         case TARGET_SO_PASSSEC:
2502                 optname = SO_PASSSEC;
2503                 break;
2504         case TARGET_SO_TIMESTAMP:
2505 		optname = SO_TIMESTAMP;
2506 		break;
2507         case TARGET_SO_RCVLOWAT:
2508 		optname = SO_RCVLOWAT;
2509 		break;
2510         default:
2511             goto unimplemented;
2512         }
2513 	if (optlen < sizeof(uint32_t))
2514             return -TARGET_EINVAL;
2515 
2516 	if (get_user_u32(val, optval_addr))
2517             return -TARGET_EFAULT;
2518 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2519         break;
2520 #ifdef SOL_NETLINK
2521     case SOL_NETLINK:
2522         switch (optname) {
2523         case NETLINK_PKTINFO:
2524         case NETLINK_ADD_MEMBERSHIP:
2525         case NETLINK_DROP_MEMBERSHIP:
2526         case NETLINK_BROADCAST_ERROR:
2527         case NETLINK_NO_ENOBUFS:
2528 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2529         case NETLINK_LISTEN_ALL_NSID:
2530         case NETLINK_CAP_ACK:
2531 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2532 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2533         case NETLINK_EXT_ACK:
2534 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2535 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2536         case NETLINK_GET_STRICT_CHK:
2537 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2538             break;
2539         default:
2540             goto unimplemented;
2541         }
2542         val = 0;
2543         if (optlen < sizeof(uint32_t)) {
2544             return -TARGET_EINVAL;
2545         }
2546         if (get_user_u32(val, optval_addr)) {
2547             return -TARGET_EFAULT;
2548         }
2549         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2550                                    sizeof(val)));
2551         break;
2552 #endif /* SOL_NETLINK */
2553     default:
2554     unimplemented:
2555         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2556                       level, optname);
2557         ret = -TARGET_ENOPROTOOPT;
2558     }
2559     return ret;
2560 }
2561 
2562 /* do_getsockopt() Must return target values and target errnos. */
2563 static abi_long do_getsockopt(int sockfd, int level, int optname,
2564                               abi_ulong optval_addr, abi_ulong optlen)
2565 {
2566     abi_long ret;
2567     int len, val;
2568     socklen_t lv;
2569 
2570     switch(level) {
2571     case TARGET_SOL_SOCKET:
2572         level = SOL_SOCKET;
2573         switch (optname) {
2574         /* These don't just return a single integer */
2575         case TARGET_SO_PEERNAME:
2576             goto unimplemented;
2577         case TARGET_SO_RCVTIMEO: {
2578             struct timeval tv;
2579             socklen_t tvlen;
2580 
2581             optname = SO_RCVTIMEO;
2582 
2583 get_timeout:
2584             if (get_user_u32(len, optlen)) {
2585                 return -TARGET_EFAULT;
2586             }
2587             if (len < 0) {
2588                 return -TARGET_EINVAL;
2589             }
2590 
2591             tvlen = sizeof(tv);
2592             ret = get_errno(getsockopt(sockfd, level, optname,
2593                                        &tv, &tvlen));
2594             if (ret < 0) {
2595                 return ret;
2596             }
2597             if (len > sizeof(struct target_timeval)) {
2598                 len = sizeof(struct target_timeval);
2599             }
2600             if (copy_to_user_timeval(optval_addr, &tv)) {
2601                 return -TARGET_EFAULT;
2602             }
2603             if (put_user_u32(len, optlen)) {
2604                 return -TARGET_EFAULT;
2605             }
2606             break;
2607         }
2608         case TARGET_SO_SNDTIMEO:
2609             optname = SO_SNDTIMEO;
2610             goto get_timeout;
2611         case TARGET_SO_PEERCRED: {
2612             struct ucred cr;
2613             socklen_t crlen;
2614             struct target_ucred *tcr;
2615 
2616             if (get_user_u32(len, optlen)) {
2617                 return -TARGET_EFAULT;
2618             }
2619             if (len < 0) {
2620                 return -TARGET_EINVAL;
2621             }
2622 
2623             crlen = sizeof(cr);
2624             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2625                                        &cr, &crlen));
2626             if (ret < 0) {
2627                 return ret;
2628             }
2629             if (len > crlen) {
2630                 len = crlen;
2631             }
2632             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2633                 return -TARGET_EFAULT;
2634             }
2635             __put_user(cr.pid, &tcr->pid);
2636             __put_user(cr.uid, &tcr->uid);
2637             __put_user(cr.gid, &tcr->gid);
2638             unlock_user_struct(tcr, optval_addr, 1);
2639             if (put_user_u32(len, optlen)) {
2640                 return -TARGET_EFAULT;
2641             }
2642             break;
2643         }
2644         case TARGET_SO_PEERSEC: {
2645             char *name;
2646 
2647             if (get_user_u32(len, optlen)) {
2648                 return -TARGET_EFAULT;
2649             }
2650             if (len < 0) {
2651                 return -TARGET_EINVAL;
2652             }
2653             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2654             if (!name) {
2655                 return -TARGET_EFAULT;
2656             }
2657             lv = len;
2658             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2659                                        name, &lv));
2660             if (put_user_u32(lv, optlen)) {
2661                 ret = -TARGET_EFAULT;
2662             }
2663             unlock_user(name, optval_addr, lv);
2664             break;
2665         }
2666         case TARGET_SO_LINGER:
2667         {
2668             struct linger lg;
2669             socklen_t lglen;
2670             struct target_linger *tlg;
2671 
2672             if (get_user_u32(len, optlen)) {
2673                 return -TARGET_EFAULT;
2674             }
2675             if (len < 0) {
2676                 return -TARGET_EINVAL;
2677             }
2678 
2679             lglen = sizeof(lg);
2680             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2681                                        &lg, &lglen));
2682             if (ret < 0) {
2683                 return ret;
2684             }
2685             if (len > lglen) {
2686                 len = lglen;
2687             }
2688             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2689                 return -TARGET_EFAULT;
2690             }
2691             __put_user(lg.l_onoff, &tlg->l_onoff);
2692             __put_user(lg.l_linger, &tlg->l_linger);
2693             unlock_user_struct(tlg, optval_addr, 1);
2694             if (put_user_u32(len, optlen)) {
2695                 return -TARGET_EFAULT;
2696             }
2697             break;
2698         }
2699         /* Options with 'int' argument.  */
2700         case TARGET_SO_DEBUG:
2701             optname = SO_DEBUG;
2702             goto int_case;
2703         case TARGET_SO_REUSEADDR:
2704             optname = SO_REUSEADDR;
2705             goto int_case;
2706 #ifdef SO_REUSEPORT
2707         case TARGET_SO_REUSEPORT:
2708             optname = SO_REUSEPORT;
2709             goto int_case;
2710 #endif
2711         case TARGET_SO_TYPE:
2712             optname = SO_TYPE;
2713             goto int_case;
2714         case TARGET_SO_ERROR:
2715             optname = SO_ERROR;
2716             goto int_case;
2717         case TARGET_SO_DONTROUTE:
2718             optname = SO_DONTROUTE;
2719             goto int_case;
2720         case TARGET_SO_BROADCAST:
2721             optname = SO_BROADCAST;
2722             goto int_case;
2723         case TARGET_SO_SNDBUF:
2724             optname = SO_SNDBUF;
2725             goto int_case;
2726         case TARGET_SO_RCVBUF:
2727             optname = SO_RCVBUF;
2728             goto int_case;
2729         case TARGET_SO_KEEPALIVE:
2730             optname = SO_KEEPALIVE;
2731             goto int_case;
2732         case TARGET_SO_OOBINLINE:
2733             optname = SO_OOBINLINE;
2734             goto int_case;
2735         case TARGET_SO_NO_CHECK:
2736             optname = SO_NO_CHECK;
2737             goto int_case;
2738         case TARGET_SO_PRIORITY:
2739             optname = SO_PRIORITY;
2740             goto int_case;
2741 #ifdef SO_BSDCOMPAT
2742         case TARGET_SO_BSDCOMPAT:
2743             optname = SO_BSDCOMPAT;
2744             goto int_case;
2745 #endif
2746         case TARGET_SO_PASSCRED:
2747             optname = SO_PASSCRED;
2748             goto int_case;
2749         case TARGET_SO_TIMESTAMP:
2750             optname = SO_TIMESTAMP;
2751             goto int_case;
2752         case TARGET_SO_RCVLOWAT:
2753             optname = SO_RCVLOWAT;
2754             goto int_case;
2755         case TARGET_SO_ACCEPTCONN:
2756             optname = SO_ACCEPTCONN;
2757             goto int_case;
2758         case TARGET_SO_PROTOCOL:
2759             optname = SO_PROTOCOL;
2760             goto int_case;
2761         case TARGET_SO_DOMAIN:
2762             optname = SO_DOMAIN;
2763             goto int_case;
2764         default:
2765             goto int_case;
2766         }
2767         break;
2768     case SOL_TCP:
2769     case SOL_UDP:
2770         /* TCP and UDP options all take an 'int' value.  */
2771     int_case:
2772         if (get_user_u32(len, optlen))
2773             return -TARGET_EFAULT;
2774         if (len < 0)
2775             return -TARGET_EINVAL;
2776         lv = sizeof(lv);
2777         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2778         if (ret < 0)
2779             return ret;
2780         switch (optname) {
2781         case SO_TYPE:
2782             val = host_to_target_sock_type(val);
2783             break;
2784         case SO_ERROR:
2785             val = host_to_target_errno(val);
2786             break;
2787         }
2788         if (len > lv)
2789             len = lv;
2790         if (len == 4) {
2791             if (put_user_u32(val, optval_addr))
2792                 return -TARGET_EFAULT;
2793         } else {
2794             if (put_user_u8(val, optval_addr))
2795                 return -TARGET_EFAULT;
2796         }
2797         if (put_user_u32(len, optlen))
2798             return -TARGET_EFAULT;
2799         break;
2800     case SOL_IP:
2801         switch(optname) {
2802         case IP_TOS:
2803         case IP_TTL:
2804         case IP_HDRINCL:
2805         case IP_ROUTER_ALERT:
2806         case IP_RECVOPTS:
2807         case IP_RETOPTS:
2808         case IP_PKTINFO:
2809         case IP_MTU_DISCOVER:
2810         case IP_RECVERR:
2811         case IP_RECVTOS:
2812 #ifdef IP_FREEBIND
2813         case IP_FREEBIND:
2814 #endif
2815         case IP_MULTICAST_TTL:
2816         case IP_MULTICAST_LOOP:
2817             if (get_user_u32(len, optlen))
2818                 return -TARGET_EFAULT;
2819             if (len < 0)
2820                 return -TARGET_EINVAL;
2821             lv = sizeof(lv);
2822             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2823             if (ret < 0)
2824                 return ret;
2825             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2826                 len = 1;
2827                 if (put_user_u32(len, optlen)
2828                     || put_user_u8(val, optval_addr))
2829                     return -TARGET_EFAULT;
2830             } else {
2831                 if (len > sizeof(int))
2832                     len = sizeof(int);
2833                 if (put_user_u32(len, optlen)
2834                     || put_user_u32(val, optval_addr))
2835                     return -TARGET_EFAULT;
2836             }
2837             break;
2838         default:
2839             ret = -TARGET_ENOPROTOOPT;
2840             break;
2841         }
2842         break;
2843     case SOL_IPV6:
2844         switch (optname) {
2845         case IPV6_MTU_DISCOVER:
2846         case IPV6_MTU:
2847         case IPV6_V6ONLY:
2848         case IPV6_RECVPKTINFO:
2849         case IPV6_UNICAST_HOPS:
2850         case IPV6_MULTICAST_HOPS:
2851         case IPV6_MULTICAST_LOOP:
2852         case IPV6_RECVERR:
2853         case IPV6_RECVHOPLIMIT:
2854         case IPV6_2292HOPLIMIT:
2855         case IPV6_CHECKSUM:
2856         case IPV6_ADDRFORM:
2857         case IPV6_2292PKTINFO:
2858         case IPV6_RECVTCLASS:
2859         case IPV6_RECVRTHDR:
2860         case IPV6_2292RTHDR:
2861         case IPV6_RECVHOPOPTS:
2862         case IPV6_2292HOPOPTS:
2863         case IPV6_RECVDSTOPTS:
2864         case IPV6_2292DSTOPTS:
2865         case IPV6_TCLASS:
2866         case IPV6_ADDR_PREFERENCES:
2867 #ifdef IPV6_RECVPATHMTU
2868         case IPV6_RECVPATHMTU:
2869 #endif
2870 #ifdef IPV6_TRANSPARENT
2871         case IPV6_TRANSPARENT:
2872 #endif
2873 #ifdef IPV6_FREEBIND
2874         case IPV6_FREEBIND:
2875 #endif
2876 #ifdef IPV6_RECVORIGDSTADDR
2877         case IPV6_RECVORIGDSTADDR:
2878 #endif
2879             if (get_user_u32(len, optlen))
2880                 return -TARGET_EFAULT;
2881             if (len < 0)
2882                 return -TARGET_EINVAL;
2883             lv = sizeof(lv);
2884             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2885             if (ret < 0)
2886                 return ret;
2887             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2888                 len = 1;
2889                 if (put_user_u32(len, optlen)
2890                     || put_user_u8(val, optval_addr))
2891                     return -TARGET_EFAULT;
2892             } else {
2893                 if (len > sizeof(int))
2894                     len = sizeof(int);
2895                 if (put_user_u32(len, optlen)
2896                     || put_user_u32(val, optval_addr))
2897                     return -TARGET_EFAULT;
2898             }
2899             break;
2900         default:
2901             ret = -TARGET_ENOPROTOOPT;
2902             break;
2903         }
2904         break;
2905 #ifdef SOL_NETLINK
2906     case SOL_NETLINK:
2907         switch (optname) {
2908         case NETLINK_PKTINFO:
2909         case NETLINK_BROADCAST_ERROR:
2910         case NETLINK_NO_ENOBUFS:
2911 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2912         case NETLINK_LISTEN_ALL_NSID:
2913         case NETLINK_CAP_ACK:
2914 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2915 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2916         case NETLINK_EXT_ACK:
2917 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2918 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2919         case NETLINK_GET_STRICT_CHK:
2920 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2921             if (get_user_u32(len, optlen)) {
2922                 return -TARGET_EFAULT;
2923             }
2924             if (len != sizeof(val)) {
2925                 return -TARGET_EINVAL;
2926             }
2927             lv = len;
2928             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2929             if (ret < 0) {
2930                 return ret;
2931             }
2932             if (put_user_u32(lv, optlen)
2933                 || put_user_u32(val, optval_addr)) {
2934                 return -TARGET_EFAULT;
2935             }
2936             break;
2937 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2938         case NETLINK_LIST_MEMBERSHIPS:
2939         {
2940             uint32_t *results;
2941             int i;
2942             if (get_user_u32(len, optlen)) {
2943                 return -TARGET_EFAULT;
2944             }
2945             if (len < 0) {
2946                 return -TARGET_EINVAL;
2947             }
2948             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2949             if (!results && len > 0) {
2950                 return -TARGET_EFAULT;
2951             }
2952             lv = len;
2953             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2954             if (ret < 0) {
2955                 unlock_user(results, optval_addr, 0);
2956                 return ret;
2957             }
2958             /* swap host endianess to target endianess. */
2959             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2960                 results[i] = tswap32(results[i]);
2961             }
2962             if (put_user_u32(lv, optlen)) {
2963                 return -TARGET_EFAULT;
2964             }
2965             unlock_user(results, optval_addr, 0);
2966             break;
2967         }
2968 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2969         default:
2970             goto unimplemented;
2971         }
2972         break;
2973 #endif /* SOL_NETLINK */
2974     default:
2975     unimplemented:
2976         qemu_log_mask(LOG_UNIMP,
2977                       "getsockopt level=%d optname=%d not yet supported\n",
2978                       level, optname);
2979         ret = -TARGET_EOPNOTSUPP;
2980         break;
2981     }
2982     return ret;
2983 }
2984 
2985 /* Convert target low/high pair representing file offset into the host
2986  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2987  * as the kernel doesn't handle them either.
2988  */
2989 static void target_to_host_low_high(abi_ulong tlow,
2990                                     abi_ulong thigh,
2991                                     unsigned long *hlow,
2992                                     unsigned long *hhigh)
2993 {
2994     uint64_t off = tlow |
2995         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2996         TARGET_LONG_BITS / 2;
2997 
2998     *hlow = off;
2999     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3000 }
3001 
3002 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3003                                 abi_ulong count, int copy)
3004 {
3005     struct target_iovec *target_vec;
3006     struct iovec *vec;
3007     abi_ulong total_len, max_len;
3008     int i;
3009     int err = 0;
3010     bool bad_address = false;
3011 
3012     if (count == 0) {
3013         errno = 0;
3014         return NULL;
3015     }
3016     if (count > IOV_MAX) {
3017         errno = EINVAL;
3018         return NULL;
3019     }
3020 
3021     vec = g_try_new0(struct iovec, count);
3022     if (vec == NULL) {
3023         errno = ENOMEM;
3024         return NULL;
3025     }
3026 
3027     target_vec = lock_user(VERIFY_READ, target_addr,
3028                            count * sizeof(struct target_iovec), 1);
3029     if (target_vec == NULL) {
3030         err = EFAULT;
3031         goto fail2;
3032     }
3033 
3034     /* ??? If host page size > target page size, this will result in a
3035        value larger than what we can actually support.  */
3036     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3037     total_len = 0;
3038 
3039     for (i = 0; i < count; i++) {
3040         abi_ulong base = tswapal(target_vec[i].iov_base);
3041         abi_long len = tswapal(target_vec[i].iov_len);
3042 
3043         if (len < 0) {
3044             err = EINVAL;
3045             goto fail;
3046         } else if (len == 0) {
3047             /* Zero length pointer is ignored.  */
3048             vec[i].iov_base = 0;
3049         } else {
3050             vec[i].iov_base = lock_user(type, base, len, copy);
3051             /* If the first buffer pointer is bad, this is a fault.  But
3052              * subsequent bad buffers will result in a partial write; this
3053              * is realized by filling the vector with null pointers and
3054              * zero lengths. */
3055             if (!vec[i].iov_base) {
3056                 if (i == 0) {
3057                     err = EFAULT;
3058                     goto fail;
3059                 } else {
3060                     bad_address = true;
3061                 }
3062             }
3063             if (bad_address) {
3064                 len = 0;
3065             }
3066             if (len > max_len - total_len) {
3067                 len = max_len - total_len;
3068             }
3069         }
3070         vec[i].iov_len = len;
3071         total_len += len;
3072     }
3073 
3074     unlock_user(target_vec, target_addr, 0);
3075     return vec;
3076 
3077  fail:
3078     while (--i >= 0) {
3079         if (tswapal(target_vec[i].iov_len) > 0) {
3080             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3081         }
3082     }
3083     unlock_user(target_vec, target_addr, 0);
3084  fail2:
3085     g_free(vec);
3086     errno = err;
3087     return NULL;
3088 }
3089 
3090 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3091                          abi_ulong count, int copy)
3092 {
3093     struct target_iovec *target_vec;
3094     int i;
3095 
3096     target_vec = lock_user(VERIFY_READ, target_addr,
3097                            count * sizeof(struct target_iovec), 1);
3098     if (target_vec) {
3099         for (i = 0; i < count; i++) {
3100             abi_ulong base = tswapal(target_vec[i].iov_base);
3101             abi_long len = tswapal(target_vec[i].iov_len);
3102             if (len < 0) {
3103                 break;
3104             }
3105             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3106         }
3107         unlock_user(target_vec, target_addr, 0);
3108     }
3109 
3110     g_free(vec);
3111 }
3112 
3113 static inline int target_to_host_sock_type(int *type)
3114 {
3115     int host_type = 0;
3116     int target_type = *type;
3117 
3118     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3119     case TARGET_SOCK_DGRAM:
3120         host_type = SOCK_DGRAM;
3121         break;
3122     case TARGET_SOCK_STREAM:
3123         host_type = SOCK_STREAM;
3124         break;
3125     default:
3126         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3127         break;
3128     }
3129     if (target_type & TARGET_SOCK_CLOEXEC) {
3130 #if defined(SOCK_CLOEXEC)
3131         host_type |= SOCK_CLOEXEC;
3132 #else
3133         return -TARGET_EINVAL;
3134 #endif
3135     }
3136     if (target_type & TARGET_SOCK_NONBLOCK) {
3137 #if defined(SOCK_NONBLOCK)
3138         host_type |= SOCK_NONBLOCK;
3139 #elif !defined(O_NONBLOCK)
3140         return -TARGET_EINVAL;
3141 #endif
3142     }
3143     *type = host_type;
3144     return 0;
3145 }
3146 
3147 /* Try to emulate socket type flags after socket creation.  */
3148 static int sock_flags_fixup(int fd, int target_type)
3149 {
3150 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3151     if (target_type & TARGET_SOCK_NONBLOCK) {
3152         int flags = fcntl(fd, F_GETFL);
3153         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3154             close(fd);
3155             return -TARGET_EINVAL;
3156         }
3157     }
3158 #endif
3159     return fd;
3160 }
3161 
3162 /* do_socket() Must return target values and target errnos. */
3163 static abi_long do_socket(int domain, int type, int protocol)
3164 {
3165     int target_type = type;
3166     int ret;
3167 
3168     ret = target_to_host_sock_type(&type);
3169     if (ret) {
3170         return ret;
3171     }
3172 
3173     if (domain == PF_NETLINK && !(
3174 #ifdef CONFIG_RTNETLINK
3175          protocol == NETLINK_ROUTE ||
3176 #endif
3177          protocol == NETLINK_KOBJECT_UEVENT ||
3178          protocol == NETLINK_AUDIT)) {
3179         return -TARGET_EPROTONOSUPPORT;
3180     }
3181 
3182     if (domain == AF_PACKET ||
3183         (domain == AF_INET && type == SOCK_PACKET)) {
3184         protocol = tswap16(protocol);
3185     }
3186 
3187     ret = get_errno(socket(domain, type, protocol));
3188     if (ret >= 0) {
3189         ret = sock_flags_fixup(ret, target_type);
3190         if (type == SOCK_PACKET) {
3191             /* Manage an obsolete case :
3192              * if socket type is SOCK_PACKET, bind by name
3193              */
3194             fd_trans_register(ret, &target_packet_trans);
3195         } else if (domain == PF_NETLINK) {
3196             switch (protocol) {
3197 #ifdef CONFIG_RTNETLINK
3198             case NETLINK_ROUTE:
3199                 fd_trans_register(ret, &target_netlink_route_trans);
3200                 break;
3201 #endif
3202             case NETLINK_KOBJECT_UEVENT:
3203                 /* nothing to do: messages are strings */
3204                 break;
3205             case NETLINK_AUDIT:
3206                 fd_trans_register(ret, &target_netlink_audit_trans);
3207                 break;
3208             default:
3209                 g_assert_not_reached();
3210             }
3211         }
3212     }
3213     return ret;
3214 }
3215 
3216 /* do_bind() Must return target values and target errnos. */
3217 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3218                         socklen_t addrlen)
3219 {
3220     void *addr;
3221     abi_long ret;
3222 
3223     if ((int)addrlen < 0) {
3224         return -TARGET_EINVAL;
3225     }
3226 
3227     addr = alloca(addrlen+1);
3228 
3229     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3230     if (ret)
3231         return ret;
3232 
3233     return get_errno(bind(sockfd, addr, addrlen));
3234 }
3235 
3236 /* do_connect() Must return target values and target errnos. */
3237 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3238                            socklen_t addrlen)
3239 {
3240     void *addr;
3241     abi_long ret;
3242 
3243     if ((int)addrlen < 0) {
3244         return -TARGET_EINVAL;
3245     }
3246 
3247     addr = alloca(addrlen+1);
3248 
3249     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3250     if (ret)
3251         return ret;
3252 
3253     return get_errno(safe_connect(sockfd, addr, addrlen));
3254 }
3255 
3256 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3257 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3258                                       int flags, int send)
3259 {
3260     abi_long ret, len;
3261     struct msghdr msg;
3262     abi_ulong count;
3263     struct iovec *vec;
3264     abi_ulong target_vec;
3265 
3266     if (msgp->msg_name) {
3267         msg.msg_namelen = tswap32(msgp->msg_namelen);
3268         msg.msg_name = alloca(msg.msg_namelen+1);
3269         ret = target_to_host_sockaddr(fd, msg.msg_name,
3270                                       tswapal(msgp->msg_name),
3271                                       msg.msg_namelen);
3272         if (ret == -TARGET_EFAULT) {
3273             /* For connected sockets msg_name and msg_namelen must
3274              * be ignored, so returning EFAULT immediately is wrong.
3275              * Instead, pass a bad msg_name to the host kernel, and
3276              * let it decide whether to return EFAULT or not.
3277              */
3278             msg.msg_name = (void *)-1;
3279         } else if (ret) {
3280             goto out2;
3281         }
3282     } else {
3283         msg.msg_name = NULL;
3284         msg.msg_namelen = 0;
3285     }
3286     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3287     msg.msg_control = alloca(msg.msg_controllen);
3288     memset(msg.msg_control, 0, msg.msg_controllen);
3289 
3290     msg.msg_flags = tswap32(msgp->msg_flags);
3291 
3292     count = tswapal(msgp->msg_iovlen);
3293     target_vec = tswapal(msgp->msg_iov);
3294 
3295     if (count > IOV_MAX) {
3296         /* sendrcvmsg returns a different errno for this condition than
3297          * readv/writev, so we must catch it here before lock_iovec() does.
3298          */
3299         ret = -TARGET_EMSGSIZE;
3300         goto out2;
3301     }
3302 
3303     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3304                      target_vec, count, send);
3305     if (vec == NULL) {
3306         ret = -host_to_target_errno(errno);
3307         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3308         if (!send || ret) {
3309             goto out2;
3310         }
3311     }
3312     msg.msg_iovlen = count;
3313     msg.msg_iov = vec;
3314 
3315     if (send) {
3316         if (fd_trans_target_to_host_data(fd)) {
3317             void *host_msg;
3318 
3319             host_msg = g_malloc(msg.msg_iov->iov_len);
3320             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3321             ret = fd_trans_target_to_host_data(fd)(host_msg,
3322                                                    msg.msg_iov->iov_len);
3323             if (ret >= 0) {
3324                 msg.msg_iov->iov_base = host_msg;
3325                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3326             }
3327             g_free(host_msg);
3328         } else {
3329             ret = target_to_host_cmsg(&msg, msgp);
3330             if (ret == 0) {
3331                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3332             }
3333         }
3334     } else {
3335         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3336         if (!is_error(ret)) {
3337             len = ret;
3338             if (fd_trans_host_to_target_data(fd)) {
3339                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3340                                                MIN(msg.msg_iov->iov_len, len));
3341             }
3342             if (!is_error(ret)) {
3343                 ret = host_to_target_cmsg(msgp, &msg);
3344             }
3345             if (!is_error(ret)) {
3346                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3347                 msgp->msg_flags = tswap32(msg.msg_flags);
3348                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3349                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3350                                     msg.msg_name, msg.msg_namelen);
3351                     if (ret) {
3352                         goto out;
3353                     }
3354                 }
3355 
3356                 ret = len;
3357             }
3358         }
3359     }
3360 
3361 out:
3362     if (vec) {
3363         unlock_iovec(vec, target_vec, count, !send);
3364     }
3365 out2:
3366     return ret;
3367 }
3368 
3369 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3370                                int flags, int send)
3371 {
3372     abi_long ret;
3373     struct target_msghdr *msgp;
3374 
3375     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3376                           msgp,
3377                           target_msg,
3378                           send ? 1 : 0)) {
3379         return -TARGET_EFAULT;
3380     }
3381     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3382     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3383     return ret;
3384 }
3385 
3386 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3387  * so it might not have this *mmsg-specific flag either.
3388  */
3389 #ifndef MSG_WAITFORONE
3390 #define MSG_WAITFORONE 0x10000
3391 #endif
3392 
3393 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3394                                 unsigned int vlen, unsigned int flags,
3395                                 int send)
3396 {
3397     struct target_mmsghdr *mmsgp;
3398     abi_long ret = 0;
3399     int i;
3400 
3401     if (vlen > UIO_MAXIOV) {
3402         vlen = UIO_MAXIOV;
3403     }
3404 
3405     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3406     if (!mmsgp) {
3407         return -TARGET_EFAULT;
3408     }
3409 
3410     for (i = 0; i < vlen; i++) {
3411         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3412         if (is_error(ret)) {
3413             break;
3414         }
3415         mmsgp[i].msg_len = tswap32(ret);
3416         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3417         if (flags & MSG_WAITFORONE) {
3418             flags |= MSG_DONTWAIT;
3419         }
3420     }
3421 
3422     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3423 
3424     /* Return number of datagrams sent if we sent any at all;
3425      * otherwise return the error.
3426      */
3427     if (i) {
3428         return i;
3429     }
3430     return ret;
3431 }
3432 
3433 /* do_accept4() Must return target values and target errnos. */
3434 static abi_long do_accept4(int fd, abi_ulong target_addr,
3435                            abi_ulong target_addrlen_addr, int flags)
3436 {
3437     socklen_t addrlen, ret_addrlen;
3438     void *addr;
3439     abi_long ret;
3440     int host_flags;
3441 
3442     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3443         return -TARGET_EINVAL;
3444     }
3445 
3446     host_flags = 0;
3447     if (flags & TARGET_SOCK_NONBLOCK) {
3448         host_flags |= SOCK_NONBLOCK;
3449     }
3450     if (flags & TARGET_SOCK_CLOEXEC) {
3451         host_flags |= SOCK_CLOEXEC;
3452     }
3453 
3454     if (target_addr == 0) {
3455         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3456     }
3457 
3458     /* linux returns EFAULT if addrlen pointer is invalid */
3459     if (get_user_u32(addrlen, target_addrlen_addr))
3460         return -TARGET_EFAULT;
3461 
3462     if ((int)addrlen < 0) {
3463         return -TARGET_EINVAL;
3464     }
3465 
3466     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3467         return -TARGET_EFAULT;
3468     }
3469 
3470     addr = alloca(addrlen);
3471 
3472     ret_addrlen = addrlen;
3473     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3474     if (!is_error(ret)) {
3475         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3476         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3477             ret = -TARGET_EFAULT;
3478         }
3479     }
3480     return ret;
3481 }
3482 
3483 /* do_getpeername() Must return target values and target errnos. */
3484 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3485                                abi_ulong target_addrlen_addr)
3486 {
3487     socklen_t addrlen, ret_addrlen;
3488     void *addr;
3489     abi_long ret;
3490 
3491     if (get_user_u32(addrlen, target_addrlen_addr))
3492         return -TARGET_EFAULT;
3493 
3494     if ((int)addrlen < 0) {
3495         return -TARGET_EINVAL;
3496     }
3497 
3498     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3499         return -TARGET_EFAULT;
3500     }
3501 
3502     addr = alloca(addrlen);
3503 
3504     ret_addrlen = addrlen;
3505     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3506     if (!is_error(ret)) {
3507         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3508         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3509             ret = -TARGET_EFAULT;
3510         }
3511     }
3512     return ret;
3513 }
3514 
3515 /* do_getsockname() Must return target values and target errnos. */
3516 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3517                                abi_ulong target_addrlen_addr)
3518 {
3519     socklen_t addrlen, ret_addrlen;
3520     void *addr;
3521     abi_long ret;
3522 
3523     if (get_user_u32(addrlen, target_addrlen_addr))
3524         return -TARGET_EFAULT;
3525 
3526     if ((int)addrlen < 0) {
3527         return -TARGET_EINVAL;
3528     }
3529 
3530     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3531         return -TARGET_EFAULT;
3532     }
3533 
3534     addr = alloca(addrlen);
3535 
3536     ret_addrlen = addrlen;
3537     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3538     if (!is_error(ret)) {
3539         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3540         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3541             ret = -TARGET_EFAULT;
3542         }
3543     }
3544     return ret;
3545 }
3546 
3547 /* do_socketpair() Must return target values and target errnos. */
3548 static abi_long do_socketpair(int domain, int type, int protocol,
3549                               abi_ulong target_tab_addr)
3550 {
3551     int tab[2];
3552     abi_long ret;
3553 
3554     target_to_host_sock_type(&type);
3555 
3556     ret = get_errno(socketpair(domain, type, protocol, tab));
3557     if (!is_error(ret)) {
3558         if (put_user_s32(tab[0], target_tab_addr)
3559             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3560             ret = -TARGET_EFAULT;
3561     }
3562     return ret;
3563 }
3564 
3565 /* do_sendto() Must return target values and target errnos. */
3566 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3567                           abi_ulong target_addr, socklen_t addrlen)
3568 {
3569     void *addr;
3570     void *host_msg;
3571     void *copy_msg = NULL;
3572     abi_long ret;
3573 
3574     if ((int)addrlen < 0) {
3575         return -TARGET_EINVAL;
3576     }
3577 
3578     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3579     if (!host_msg)
3580         return -TARGET_EFAULT;
3581     if (fd_trans_target_to_host_data(fd)) {
3582         copy_msg = host_msg;
3583         host_msg = g_malloc(len);
3584         memcpy(host_msg, copy_msg, len);
3585         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3586         if (ret < 0) {
3587             goto fail;
3588         }
3589     }
3590     if (target_addr) {
3591         addr = alloca(addrlen+1);
3592         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3593         if (ret) {
3594             goto fail;
3595         }
3596         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3597     } else {
3598         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3599     }
3600 fail:
3601     if (copy_msg) {
3602         g_free(host_msg);
3603         host_msg = copy_msg;
3604     }
3605     unlock_user(host_msg, msg, 0);
3606     return ret;
3607 }
3608 
3609 /* do_recvfrom() Must return target values and target errnos. */
3610 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3611                             abi_ulong target_addr,
3612                             abi_ulong target_addrlen)
3613 {
3614     socklen_t addrlen, ret_addrlen;
3615     void *addr;
3616     void *host_msg;
3617     abi_long ret;
3618 
3619     if (!msg) {
3620         host_msg = NULL;
3621     } else {
3622         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3623         if (!host_msg) {
3624             return -TARGET_EFAULT;
3625         }
3626     }
3627     if (target_addr) {
3628         if (get_user_u32(addrlen, target_addrlen)) {
3629             ret = -TARGET_EFAULT;
3630             goto fail;
3631         }
3632         if ((int)addrlen < 0) {
3633             ret = -TARGET_EINVAL;
3634             goto fail;
3635         }
3636         addr = alloca(addrlen);
3637         ret_addrlen = addrlen;
3638         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3639                                       addr, &ret_addrlen));
3640     } else {
3641         addr = NULL; /* To keep compiler quiet.  */
3642         addrlen = 0; /* To keep compiler quiet.  */
3643         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3644     }
3645     if (!is_error(ret)) {
3646         if (fd_trans_host_to_target_data(fd)) {
3647             abi_long trans;
3648             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3649             if (is_error(trans)) {
3650                 ret = trans;
3651                 goto fail;
3652             }
3653         }
3654         if (target_addr) {
3655             host_to_target_sockaddr(target_addr, addr,
3656                                     MIN(addrlen, ret_addrlen));
3657             if (put_user_u32(ret_addrlen, target_addrlen)) {
3658                 ret = -TARGET_EFAULT;
3659                 goto fail;
3660             }
3661         }
3662         unlock_user(host_msg, msg, len);
3663     } else {
3664 fail:
3665         unlock_user(host_msg, msg, 0);
3666     }
3667     return ret;
3668 }
3669 
3670 #ifdef TARGET_NR_socketcall
3671 /* do_socketcall() must return target values and target errnos. */
3672 static abi_long do_socketcall(int num, abi_ulong vptr)
3673 {
3674     static const unsigned nargs[] = { /* number of arguments per operation */
3675         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3676         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3677         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3678         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3679         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3680         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3681         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3682         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3683         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3684         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3685         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3686         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3687         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3688         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3689         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3690         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3691         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3692         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3693         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3694         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3695     };
3696     abi_long a[6]; /* max 6 args */
3697     unsigned i;
3698 
3699     /* check the range of the first argument num */
3700     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3701     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3702         return -TARGET_EINVAL;
3703     }
3704     /* ensure we have space for args */
3705     if (nargs[num] > ARRAY_SIZE(a)) {
3706         return -TARGET_EINVAL;
3707     }
3708     /* collect the arguments in a[] according to nargs[] */
3709     for (i = 0; i < nargs[num]; ++i) {
3710         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3711             return -TARGET_EFAULT;
3712         }
3713     }
3714     /* now when we have the args, invoke the appropriate underlying function */
3715     switch (num) {
3716     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3717         return do_socket(a[0], a[1], a[2]);
3718     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3719         return do_bind(a[0], a[1], a[2]);
3720     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3721         return do_connect(a[0], a[1], a[2]);
3722     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3723         return get_errno(listen(a[0], a[1]));
3724     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3725         return do_accept4(a[0], a[1], a[2], 0);
3726     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3727         return do_getsockname(a[0], a[1], a[2]);
3728     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3729         return do_getpeername(a[0], a[1], a[2]);
3730     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3731         return do_socketpair(a[0], a[1], a[2], a[3]);
3732     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3733         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3734     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3735         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3736     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3737         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3738     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3739         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3740     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3741         return get_errno(shutdown(a[0], a[1]));
3742     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3743         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3744     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3745         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3746     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3747         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3748     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3749         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3750     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3751         return do_accept4(a[0], a[1], a[2], a[3]);
3752     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3753         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3754     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3755         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3756     default:
3757         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3758         return -TARGET_EINVAL;
3759     }
3760 }
3761 #endif
3762 
3763 #define N_SHM_REGIONS	32
3764 
3765 static struct shm_region {
3766     abi_ulong start;
3767     abi_ulong size;
3768     bool in_use;
3769 } shm_regions[N_SHM_REGIONS];
3770 
3771 #ifndef TARGET_SEMID64_DS
3772 /* asm-generic version of this struct */
3773 struct target_semid64_ds
3774 {
3775   struct target_ipc_perm sem_perm;
3776   abi_ulong sem_otime;
3777 #if TARGET_ABI_BITS == 32
3778   abi_ulong __unused1;
3779 #endif
3780   abi_ulong sem_ctime;
3781 #if TARGET_ABI_BITS == 32
3782   abi_ulong __unused2;
3783 #endif
3784   abi_ulong sem_nsems;
3785   abi_ulong __unused3;
3786   abi_ulong __unused4;
3787 };
3788 #endif
3789 
3790 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3791                                                abi_ulong target_addr)
3792 {
3793     struct target_ipc_perm *target_ip;
3794     struct target_semid64_ds *target_sd;
3795 
3796     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3797         return -TARGET_EFAULT;
3798     target_ip = &(target_sd->sem_perm);
3799     host_ip->__key = tswap32(target_ip->__key);
3800     host_ip->uid = tswap32(target_ip->uid);
3801     host_ip->gid = tswap32(target_ip->gid);
3802     host_ip->cuid = tswap32(target_ip->cuid);
3803     host_ip->cgid = tswap32(target_ip->cgid);
3804 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3805     host_ip->mode = tswap32(target_ip->mode);
3806 #else
3807     host_ip->mode = tswap16(target_ip->mode);
3808 #endif
3809 #if defined(TARGET_PPC)
3810     host_ip->__seq = tswap32(target_ip->__seq);
3811 #else
3812     host_ip->__seq = tswap16(target_ip->__seq);
3813 #endif
3814     unlock_user_struct(target_sd, target_addr, 0);
3815     return 0;
3816 }
3817 
3818 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3819                                                struct ipc_perm *host_ip)
3820 {
3821     struct target_ipc_perm *target_ip;
3822     struct target_semid64_ds *target_sd;
3823 
3824     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3825         return -TARGET_EFAULT;
3826     target_ip = &(target_sd->sem_perm);
3827     target_ip->__key = tswap32(host_ip->__key);
3828     target_ip->uid = tswap32(host_ip->uid);
3829     target_ip->gid = tswap32(host_ip->gid);
3830     target_ip->cuid = tswap32(host_ip->cuid);
3831     target_ip->cgid = tswap32(host_ip->cgid);
3832 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3833     target_ip->mode = tswap32(host_ip->mode);
3834 #else
3835     target_ip->mode = tswap16(host_ip->mode);
3836 #endif
3837 #if defined(TARGET_PPC)
3838     target_ip->__seq = tswap32(host_ip->__seq);
3839 #else
3840     target_ip->__seq = tswap16(host_ip->__seq);
3841 #endif
3842     unlock_user_struct(target_sd, target_addr, 1);
3843     return 0;
3844 }
3845 
3846 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3847                                                abi_ulong target_addr)
3848 {
3849     struct target_semid64_ds *target_sd;
3850 
3851     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3852         return -TARGET_EFAULT;
3853     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3854         return -TARGET_EFAULT;
3855     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3856     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3857     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3858     unlock_user_struct(target_sd, target_addr, 0);
3859     return 0;
3860 }
3861 
3862 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3863                                                struct semid_ds *host_sd)
3864 {
3865     struct target_semid64_ds *target_sd;
3866 
3867     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3868         return -TARGET_EFAULT;
3869     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3870         return -TARGET_EFAULT;
3871     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3872     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3873     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3874     unlock_user_struct(target_sd, target_addr, 1);
3875     return 0;
3876 }
3877 
3878 struct target_seminfo {
3879     int semmap;
3880     int semmni;
3881     int semmns;
3882     int semmnu;
3883     int semmsl;
3884     int semopm;
3885     int semume;
3886     int semusz;
3887     int semvmx;
3888     int semaem;
3889 };
3890 
3891 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3892                                               struct seminfo *host_seminfo)
3893 {
3894     struct target_seminfo *target_seminfo;
3895     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3896         return -TARGET_EFAULT;
3897     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3898     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3899     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3900     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3901     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3902     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3903     __put_user(host_seminfo->semume, &target_seminfo->semume);
3904     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3905     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3906     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3907     unlock_user_struct(target_seminfo, target_addr, 1);
3908     return 0;
3909 }
3910 
3911 union semun {
3912 	int val;
3913 	struct semid_ds *buf;
3914 	unsigned short *array;
3915 	struct seminfo *__buf;
3916 };
3917 
3918 union target_semun {
3919 	int val;
3920 	abi_ulong buf;
3921 	abi_ulong array;
3922 	abi_ulong __buf;
3923 };
3924 
3925 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3926                                                abi_ulong target_addr)
3927 {
3928     int nsems;
3929     unsigned short *array;
3930     union semun semun;
3931     struct semid_ds semid_ds;
3932     int i, ret;
3933 
3934     semun.buf = &semid_ds;
3935 
3936     ret = semctl(semid, 0, IPC_STAT, semun);
3937     if (ret == -1)
3938         return get_errno(ret);
3939 
3940     nsems = semid_ds.sem_nsems;
3941 
3942     *host_array = g_try_new(unsigned short, nsems);
3943     if (!*host_array) {
3944         return -TARGET_ENOMEM;
3945     }
3946     array = lock_user(VERIFY_READ, target_addr,
3947                       nsems*sizeof(unsigned short), 1);
3948     if (!array) {
3949         g_free(*host_array);
3950         return -TARGET_EFAULT;
3951     }
3952 
3953     for(i=0; i<nsems; i++) {
3954         __get_user((*host_array)[i], &array[i]);
3955     }
3956     unlock_user(array, target_addr, 0);
3957 
3958     return 0;
3959 }
3960 
3961 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3962                                                unsigned short **host_array)
3963 {
3964     int nsems;
3965     unsigned short *array;
3966     union semun semun;
3967     struct semid_ds semid_ds;
3968     int i, ret;
3969 
3970     semun.buf = &semid_ds;
3971 
3972     ret = semctl(semid, 0, IPC_STAT, semun);
3973     if (ret == -1)
3974         return get_errno(ret);
3975 
3976     nsems = semid_ds.sem_nsems;
3977 
3978     array = lock_user(VERIFY_WRITE, target_addr,
3979                       nsems*sizeof(unsigned short), 0);
3980     if (!array)
3981         return -TARGET_EFAULT;
3982 
3983     for(i=0; i<nsems; i++) {
3984         __put_user((*host_array)[i], &array[i]);
3985     }
3986     g_free(*host_array);
3987     unlock_user(array, target_addr, 1);
3988 
3989     return 0;
3990 }
3991 
3992 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3993                                  abi_ulong target_arg)
3994 {
3995     union target_semun target_su = { .buf = target_arg };
3996     union semun arg;
3997     struct semid_ds dsarg;
3998     unsigned short *array = NULL;
3999     struct seminfo seminfo;
4000     abi_long ret = -TARGET_EINVAL;
4001     abi_long err;
4002     cmd &= 0xff;
4003 
4004     switch( cmd ) {
4005 	case GETVAL:
4006 	case SETVAL:
4007             /* In 64 bit cross-endian situations, we will erroneously pick up
4008              * the wrong half of the union for the "val" element.  To rectify
4009              * this, the entire 8-byte structure is byteswapped, followed by
4010 	     * a swap of the 4 byte val field. In other cases, the data is
4011 	     * already in proper host byte order. */
4012 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4013 		target_su.buf = tswapal(target_su.buf);
4014 		arg.val = tswap32(target_su.val);
4015 	    } else {
4016 		arg.val = target_su.val;
4017 	    }
4018             ret = get_errno(semctl(semid, semnum, cmd, arg));
4019             break;
4020 	case GETALL:
4021 	case SETALL:
4022             err = target_to_host_semarray(semid, &array, target_su.array);
4023             if (err)
4024                 return err;
4025             arg.array = array;
4026             ret = get_errno(semctl(semid, semnum, cmd, arg));
4027             err = host_to_target_semarray(semid, target_su.array, &array);
4028             if (err)
4029                 return err;
4030             break;
4031 	case IPC_STAT:
4032 	case IPC_SET:
4033 	case SEM_STAT:
4034             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4035             if (err)
4036                 return err;
4037             arg.buf = &dsarg;
4038             ret = get_errno(semctl(semid, semnum, cmd, arg));
4039             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4040             if (err)
4041                 return err;
4042             break;
4043 	case IPC_INFO:
4044 	case SEM_INFO:
4045             arg.__buf = &seminfo;
4046             ret = get_errno(semctl(semid, semnum, cmd, arg));
4047             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4048             if (err)
4049                 return err;
4050             break;
4051 	case IPC_RMID:
4052 	case GETPID:
4053 	case GETNCNT:
4054 	case GETZCNT:
4055             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4056             break;
4057     }
4058 
4059     return ret;
4060 }
4061 
4062 struct target_sembuf {
4063     unsigned short sem_num;
4064     short sem_op;
4065     short sem_flg;
4066 };
4067 
4068 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4069                                              abi_ulong target_addr,
4070                                              unsigned nsops)
4071 {
4072     struct target_sembuf *target_sembuf;
4073     int i;
4074 
4075     target_sembuf = lock_user(VERIFY_READ, target_addr,
4076                               nsops*sizeof(struct target_sembuf), 1);
4077     if (!target_sembuf)
4078         return -TARGET_EFAULT;
4079 
4080     for(i=0; i<nsops; i++) {
4081         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4082         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4083         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4084     }
4085 
4086     unlock_user(target_sembuf, target_addr, 0);
4087 
4088     return 0;
4089 }
4090 
4091 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4092     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4093 
4094 /*
4095  * This macro is required to handle the s390 variants, which passes the
4096  * arguments in a different order than default.
4097  */
4098 #ifdef __s390x__
4099 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4100   (__nsops), (__timeout), (__sops)
4101 #else
4102 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4103   (__nsops), 0, (__sops), (__timeout)
4104 #endif
4105 
4106 static inline abi_long do_semtimedop(int semid,
4107                                      abi_long ptr,
4108                                      unsigned nsops,
4109                                      abi_long timeout, bool time64)
4110 {
4111     struct sembuf *sops;
4112     struct timespec ts, *pts = NULL;
4113     abi_long ret;
4114 
4115     if (timeout) {
4116         pts = &ts;
4117         if (time64) {
4118             if (target_to_host_timespec64(pts, timeout)) {
4119                 return -TARGET_EFAULT;
4120             }
4121         } else {
4122             if (target_to_host_timespec(pts, timeout)) {
4123                 return -TARGET_EFAULT;
4124             }
4125         }
4126     }
4127 
4128     if (nsops > TARGET_SEMOPM) {
4129         return -TARGET_E2BIG;
4130     }
4131 
4132     sops = g_new(struct sembuf, nsops);
4133 
4134     if (target_to_host_sembuf(sops, ptr, nsops)) {
4135         g_free(sops);
4136         return -TARGET_EFAULT;
4137     }
4138 
4139     ret = -TARGET_ENOSYS;
4140 #ifdef __NR_semtimedop
4141     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4142 #endif
4143 #ifdef __NR_ipc
4144     if (ret == -TARGET_ENOSYS) {
4145         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4146                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4147     }
4148 #endif
4149     g_free(sops);
4150     return ret;
4151 }
4152 #endif
4153 
4154 struct target_msqid_ds
4155 {
4156     struct target_ipc_perm msg_perm;
4157     abi_ulong msg_stime;
4158 #if TARGET_ABI_BITS == 32
4159     abi_ulong __unused1;
4160 #endif
4161     abi_ulong msg_rtime;
4162 #if TARGET_ABI_BITS == 32
4163     abi_ulong __unused2;
4164 #endif
4165     abi_ulong msg_ctime;
4166 #if TARGET_ABI_BITS == 32
4167     abi_ulong __unused3;
4168 #endif
4169     abi_ulong __msg_cbytes;
4170     abi_ulong msg_qnum;
4171     abi_ulong msg_qbytes;
4172     abi_ulong msg_lspid;
4173     abi_ulong msg_lrpid;
4174     abi_ulong __unused4;
4175     abi_ulong __unused5;
4176 };
4177 
4178 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4179                                                abi_ulong target_addr)
4180 {
4181     struct target_msqid_ds *target_md;
4182 
4183     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4184         return -TARGET_EFAULT;
4185     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4186         return -TARGET_EFAULT;
4187     host_md->msg_stime = tswapal(target_md->msg_stime);
4188     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4189     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4190     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4191     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4192     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4193     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4194     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4195     unlock_user_struct(target_md, target_addr, 0);
4196     return 0;
4197 }
4198 
4199 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4200                                                struct msqid_ds *host_md)
4201 {
4202     struct target_msqid_ds *target_md;
4203 
4204     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4205         return -TARGET_EFAULT;
4206     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4207         return -TARGET_EFAULT;
4208     target_md->msg_stime = tswapal(host_md->msg_stime);
4209     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4210     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4211     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4212     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4213     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4214     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4215     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4216     unlock_user_struct(target_md, target_addr, 1);
4217     return 0;
4218 }
4219 
4220 struct target_msginfo {
4221     int msgpool;
4222     int msgmap;
4223     int msgmax;
4224     int msgmnb;
4225     int msgmni;
4226     int msgssz;
4227     int msgtql;
4228     unsigned short int msgseg;
4229 };
4230 
4231 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4232                                               struct msginfo *host_msginfo)
4233 {
4234     struct target_msginfo *target_msginfo;
4235     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4236         return -TARGET_EFAULT;
4237     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4238     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4239     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4240     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4241     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4242     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4243     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4244     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4245     unlock_user_struct(target_msginfo, target_addr, 1);
4246     return 0;
4247 }
4248 
4249 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4250 {
4251     struct msqid_ds dsarg;
4252     struct msginfo msginfo;
4253     abi_long ret = -TARGET_EINVAL;
4254 
4255     cmd &= 0xff;
4256 
4257     switch (cmd) {
4258     case IPC_STAT:
4259     case IPC_SET:
4260     case MSG_STAT:
4261         if (target_to_host_msqid_ds(&dsarg,ptr))
4262             return -TARGET_EFAULT;
4263         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4264         if (host_to_target_msqid_ds(ptr,&dsarg))
4265             return -TARGET_EFAULT;
4266         break;
4267     case IPC_RMID:
4268         ret = get_errno(msgctl(msgid, cmd, NULL));
4269         break;
4270     case IPC_INFO:
4271     case MSG_INFO:
4272         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4273         if (host_to_target_msginfo(ptr, &msginfo))
4274             return -TARGET_EFAULT;
4275         break;
4276     }
4277 
4278     return ret;
4279 }
4280 
4281 struct target_msgbuf {
4282     abi_long mtype;
4283     char	mtext[1];
4284 };
4285 
4286 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4287                                  ssize_t msgsz, int msgflg)
4288 {
4289     struct target_msgbuf *target_mb;
4290     struct msgbuf *host_mb;
4291     abi_long ret = 0;
4292 
4293     if (msgsz < 0) {
4294         return -TARGET_EINVAL;
4295     }
4296 
4297     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4298         return -TARGET_EFAULT;
4299     host_mb = g_try_malloc(msgsz + sizeof(long));
4300     if (!host_mb) {
4301         unlock_user_struct(target_mb, msgp, 0);
4302         return -TARGET_ENOMEM;
4303     }
4304     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4305     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4306     ret = -TARGET_ENOSYS;
4307 #ifdef __NR_msgsnd
4308     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4309 #endif
4310 #ifdef __NR_ipc
4311     if (ret == -TARGET_ENOSYS) {
4312 #ifdef __s390x__
4313         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4314                                  host_mb));
4315 #else
4316         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4317                                  host_mb, 0));
4318 #endif
4319     }
4320 #endif
4321     g_free(host_mb);
4322     unlock_user_struct(target_mb, msgp, 0);
4323 
4324     return ret;
4325 }
4326 
4327 #ifdef __NR_ipc
4328 #if defined(__sparc__)
4329 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4330 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4331 #elif defined(__s390x__)
4332 /* The s390 sys_ipc variant has only five parameters.  */
4333 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4334     ((long int[]){(long int)__msgp, __msgtyp})
4335 #else
4336 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4337     ((long int[]){(long int)__msgp, __msgtyp}), 0
4338 #endif
4339 #endif
4340 
4341 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4342                                  ssize_t msgsz, abi_long msgtyp,
4343                                  int msgflg)
4344 {
4345     struct target_msgbuf *target_mb;
4346     char *target_mtext;
4347     struct msgbuf *host_mb;
4348     abi_long ret = 0;
4349 
4350     if (msgsz < 0) {
4351         return -TARGET_EINVAL;
4352     }
4353 
4354     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4355         return -TARGET_EFAULT;
4356 
4357     host_mb = g_try_malloc(msgsz + sizeof(long));
4358     if (!host_mb) {
4359         ret = -TARGET_ENOMEM;
4360         goto end;
4361     }
4362     ret = -TARGET_ENOSYS;
4363 #ifdef __NR_msgrcv
4364     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4365 #endif
4366 #ifdef __NR_ipc
4367     if (ret == -TARGET_ENOSYS) {
4368         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4369                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4370     }
4371 #endif
4372 
4373     if (ret > 0) {
4374         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4375         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4376         if (!target_mtext) {
4377             ret = -TARGET_EFAULT;
4378             goto end;
4379         }
4380         memcpy(target_mb->mtext, host_mb->mtext, ret);
4381         unlock_user(target_mtext, target_mtext_addr, ret);
4382     }
4383 
4384     target_mb->mtype = tswapal(host_mb->mtype);
4385 
4386 end:
4387     if (target_mb)
4388         unlock_user_struct(target_mb, msgp, 1);
4389     g_free(host_mb);
4390     return ret;
4391 }
4392 
4393 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4394                                                abi_ulong target_addr)
4395 {
4396     struct target_shmid_ds *target_sd;
4397 
4398     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4399         return -TARGET_EFAULT;
4400     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4401         return -TARGET_EFAULT;
4402     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4403     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4404     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4405     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4406     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4407     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4408     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4409     unlock_user_struct(target_sd, target_addr, 0);
4410     return 0;
4411 }
4412 
4413 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4414                                                struct shmid_ds *host_sd)
4415 {
4416     struct target_shmid_ds *target_sd;
4417 
4418     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4419         return -TARGET_EFAULT;
4420     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4421         return -TARGET_EFAULT;
4422     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4423     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4424     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4425     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4426     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4427     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4428     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4429     unlock_user_struct(target_sd, target_addr, 1);
4430     return 0;
4431 }
4432 
4433 struct  target_shminfo {
4434     abi_ulong shmmax;
4435     abi_ulong shmmin;
4436     abi_ulong shmmni;
4437     abi_ulong shmseg;
4438     abi_ulong shmall;
4439 };
4440 
4441 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4442                                               struct shminfo *host_shminfo)
4443 {
4444     struct target_shminfo *target_shminfo;
4445     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4446         return -TARGET_EFAULT;
4447     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4448     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4449     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4450     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4451     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4452     unlock_user_struct(target_shminfo, target_addr, 1);
4453     return 0;
4454 }
4455 
4456 struct target_shm_info {
4457     int used_ids;
4458     abi_ulong shm_tot;
4459     abi_ulong shm_rss;
4460     abi_ulong shm_swp;
4461     abi_ulong swap_attempts;
4462     abi_ulong swap_successes;
4463 };
4464 
4465 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4466                                                struct shm_info *host_shm_info)
4467 {
4468     struct target_shm_info *target_shm_info;
4469     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4470         return -TARGET_EFAULT;
4471     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4472     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4473     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4474     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4475     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4476     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4477     unlock_user_struct(target_shm_info, target_addr, 1);
4478     return 0;
4479 }
4480 
4481 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4482 {
4483     struct shmid_ds dsarg;
4484     struct shminfo shminfo;
4485     struct shm_info shm_info;
4486     abi_long ret = -TARGET_EINVAL;
4487 
4488     cmd &= 0xff;
4489 
4490     switch(cmd) {
4491     case IPC_STAT:
4492     case IPC_SET:
4493     case SHM_STAT:
4494         if (target_to_host_shmid_ds(&dsarg, buf))
4495             return -TARGET_EFAULT;
4496         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4497         if (host_to_target_shmid_ds(buf, &dsarg))
4498             return -TARGET_EFAULT;
4499         break;
4500     case IPC_INFO:
4501         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4502         if (host_to_target_shminfo(buf, &shminfo))
4503             return -TARGET_EFAULT;
4504         break;
4505     case SHM_INFO:
4506         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4507         if (host_to_target_shm_info(buf, &shm_info))
4508             return -TARGET_EFAULT;
4509         break;
4510     case IPC_RMID:
4511     case SHM_LOCK:
4512     case SHM_UNLOCK:
4513         ret = get_errno(shmctl(shmid, cmd, NULL));
4514         break;
4515     }
4516 
4517     return ret;
4518 }
4519 
4520 #ifndef TARGET_FORCE_SHMLBA
4521 /* For most architectures, SHMLBA is the same as the page size;
4522  * some architectures have larger values, in which case they should
4523  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4524  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4525  * and defining its own value for SHMLBA.
4526  *
4527  * The kernel also permits SHMLBA to be set by the architecture to a
4528  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4529  * this means that addresses are rounded to the large size if
4530  * SHM_RND is set but addresses not aligned to that size are not rejected
4531  * as long as they are at least page-aligned. Since the only architecture
4532  * which uses this is ia64 this code doesn't provide for that oddity.
4533  */
4534 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4535 {
4536     return TARGET_PAGE_SIZE;
4537 }
4538 #endif
4539 
4540 static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
4541                           abi_ulong shmaddr, int shmflg)
4542 {
4543     CPUState *cpu = env_cpu(cpu_env);
4544     abi_ulong raddr;
4545     void *host_raddr;
4546     struct shmid_ds shm_info;
4547     int i, ret;
4548     abi_ulong shmlba;
4549 
4550     /* shmat pointers are always untagged */
4551 
4552     /* find out the length of the shared memory segment */
4553     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4554     if (is_error(ret)) {
4555         /* can't get length, bail out */
4556         return ret;
4557     }
4558 
4559     shmlba = target_shmlba(cpu_env);
4560 
4561     if (shmaddr & (shmlba - 1)) {
4562         if (shmflg & SHM_RND) {
4563             shmaddr &= ~(shmlba - 1);
4564         } else {
4565             return -TARGET_EINVAL;
4566         }
4567     }
4568     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4569         return -TARGET_EINVAL;
4570     }
4571 
4572     mmap_lock();
4573 
4574     /*
4575      * We're mapping shared memory, so ensure we generate code for parallel
4576      * execution and flush old translations.  This will work up to the level
4577      * supported by the host -- anything that requires EXCP_ATOMIC will not
4578      * be atomic with respect to an external process.
4579      */
4580     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4581         cpu->tcg_cflags |= CF_PARALLEL;
4582         tb_flush(cpu);
4583     }
4584 
4585     if (shmaddr)
4586         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4587     else {
4588         abi_ulong mmap_start;
4589 
4590         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4591         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4592 
4593         if (mmap_start == -1) {
4594             errno = ENOMEM;
4595             host_raddr = (void *)-1;
4596         } else
4597             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4598                                shmflg | SHM_REMAP);
4599     }
4600 
4601     if (host_raddr == (void *)-1) {
4602         mmap_unlock();
4603         return get_errno((intptr_t)host_raddr);
4604     }
4605     raddr = h2g((uintptr_t)host_raddr);
4606 
4607     page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4608                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4609                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4610 
4611     for (i = 0; i < N_SHM_REGIONS; i++) {
4612         if (!shm_regions[i].in_use) {
4613             shm_regions[i].in_use = true;
4614             shm_regions[i].start = raddr;
4615             shm_regions[i].size = shm_info.shm_segsz;
4616             break;
4617         }
4618     }
4619 
4620     mmap_unlock();
4621     return raddr;
4622 }
4623 
4624 static inline abi_long do_shmdt(abi_ulong shmaddr)
4625 {
4626     int i;
4627     abi_long rv;
4628 
4629     /* shmdt pointers are always untagged */
4630 
4631     mmap_lock();
4632 
4633     for (i = 0; i < N_SHM_REGIONS; ++i) {
4634         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4635             shm_regions[i].in_use = false;
4636             page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4637             break;
4638         }
4639     }
4640     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4641 
4642     mmap_unlock();
4643 
4644     return rv;
4645 }
4646 
4647 #ifdef TARGET_NR_ipc
4648 /* ??? This only works with linear mappings.  */
4649 /* do_ipc() must return target values and target errnos. */
4650 static abi_long do_ipc(CPUArchState *cpu_env,
4651                        unsigned int call, abi_long first,
4652                        abi_long second, abi_long third,
4653                        abi_long ptr, abi_long fifth)
4654 {
4655     int version;
4656     abi_long ret = 0;
4657 
4658     version = call >> 16;
4659     call &= 0xffff;
4660 
4661     switch (call) {
4662     case IPCOP_semop:
4663         ret = do_semtimedop(first, ptr, second, 0, false);
4664         break;
4665     case IPCOP_semtimedop:
4666     /*
4667      * The s390 sys_ipc variant has only five parameters instead of six
4668      * (as for default variant) and the only difference is the handling of
4669      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4670      * to a struct timespec where the generic variant uses fifth parameter.
4671      */
4672 #if defined(TARGET_S390X)
4673         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4674 #else
4675         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4676 #endif
4677         break;
4678 
4679     case IPCOP_semget:
4680         ret = get_errno(semget(first, second, third));
4681         break;
4682 
4683     case IPCOP_semctl: {
4684         /* The semun argument to semctl is passed by value, so dereference the
4685          * ptr argument. */
4686         abi_ulong atptr;
4687         get_user_ual(atptr, ptr);
4688         ret = do_semctl(first, second, third, atptr);
4689         break;
4690     }
4691 
4692     case IPCOP_msgget:
4693         ret = get_errno(msgget(first, second));
4694         break;
4695 
4696     case IPCOP_msgsnd:
4697         ret = do_msgsnd(first, ptr, second, third);
4698         break;
4699 
4700     case IPCOP_msgctl:
4701         ret = do_msgctl(first, second, ptr);
4702         break;
4703 
4704     case IPCOP_msgrcv:
4705         switch (version) {
4706         case 0:
4707             {
4708                 struct target_ipc_kludge {
4709                     abi_long msgp;
4710                     abi_long msgtyp;
4711                 } *tmp;
4712 
4713                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4714                     ret = -TARGET_EFAULT;
4715                     break;
4716                 }
4717 
4718                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4719 
4720                 unlock_user_struct(tmp, ptr, 0);
4721                 break;
4722             }
4723         default:
4724             ret = do_msgrcv(first, ptr, second, fifth, third);
4725         }
4726         break;
4727 
4728     case IPCOP_shmat:
4729         switch (version) {
4730         default:
4731         {
4732             abi_ulong raddr;
4733             raddr = do_shmat(cpu_env, first, ptr, second);
4734             if (is_error(raddr))
4735                 return get_errno(raddr);
4736             if (put_user_ual(raddr, third))
4737                 return -TARGET_EFAULT;
4738             break;
4739         }
4740         case 1:
4741             ret = -TARGET_EINVAL;
4742             break;
4743         }
4744 	break;
4745     case IPCOP_shmdt:
4746         ret = do_shmdt(ptr);
4747 	break;
4748 
4749     case IPCOP_shmget:
4750 	/* IPC_* flag values are the same on all linux platforms */
4751 	ret = get_errno(shmget(first, second, third));
4752 	break;
4753 
4754 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4755     case IPCOP_shmctl:
4756         ret = do_shmctl(first, second, ptr);
4757         break;
4758     default:
4759         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4760                       call, version);
4761 	ret = -TARGET_ENOSYS;
4762 	break;
4763     }
4764     return ret;
4765 }
4766 #endif
4767 
4768 /* kernel structure types definitions */
4769 
4770 #define STRUCT(name, ...) STRUCT_ ## name,
4771 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4772 enum {
4773 #include "syscall_types.h"
4774 STRUCT_MAX
4775 };
4776 #undef STRUCT
4777 #undef STRUCT_SPECIAL
4778 
4779 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4780 #define STRUCT_SPECIAL(name)
4781 #include "syscall_types.h"
4782 #undef STRUCT
4783 #undef STRUCT_SPECIAL
4784 
4785 #define MAX_STRUCT_SIZE 4096
4786 
4787 #ifdef CONFIG_FIEMAP
4788 /* So fiemap access checks don't overflow on 32 bit systems.
4789  * This is very slightly smaller than the limit imposed by
4790  * the underlying kernel.
4791  */
4792 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4793                             / sizeof(struct fiemap_extent))
4794 
4795 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4796                                        int fd, int cmd, abi_long arg)
4797 {
4798     /* The parameter for this ioctl is a struct fiemap followed
4799      * by an array of struct fiemap_extent whose size is set
4800      * in fiemap->fm_extent_count. The array is filled in by the
4801      * ioctl.
4802      */
4803     int target_size_in, target_size_out;
4804     struct fiemap *fm;
4805     const argtype *arg_type = ie->arg_type;
4806     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4807     void *argptr, *p;
4808     abi_long ret;
4809     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4810     uint32_t outbufsz;
4811     int free_fm = 0;
4812 
4813     assert(arg_type[0] == TYPE_PTR);
4814     assert(ie->access == IOC_RW);
4815     arg_type++;
4816     target_size_in = thunk_type_size(arg_type, 0);
4817     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4818     if (!argptr) {
4819         return -TARGET_EFAULT;
4820     }
4821     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4822     unlock_user(argptr, arg, 0);
4823     fm = (struct fiemap *)buf_temp;
4824     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4825         return -TARGET_EINVAL;
4826     }
4827 
4828     outbufsz = sizeof (*fm) +
4829         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4830 
4831     if (outbufsz > MAX_STRUCT_SIZE) {
4832         /* We can't fit all the extents into the fixed size buffer.
4833          * Allocate one that is large enough and use it instead.
4834          */
4835         fm = g_try_malloc(outbufsz);
4836         if (!fm) {
4837             return -TARGET_ENOMEM;
4838         }
4839         memcpy(fm, buf_temp, sizeof(struct fiemap));
4840         free_fm = 1;
4841     }
4842     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4843     if (!is_error(ret)) {
4844         target_size_out = target_size_in;
4845         /* An extent_count of 0 means we were only counting the extents
4846          * so there are no structs to copy
4847          */
4848         if (fm->fm_extent_count != 0) {
4849             target_size_out += fm->fm_mapped_extents * extent_size;
4850         }
4851         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4852         if (!argptr) {
4853             ret = -TARGET_EFAULT;
4854         } else {
4855             /* Convert the struct fiemap */
4856             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4857             if (fm->fm_extent_count != 0) {
4858                 p = argptr + target_size_in;
4859                 /* ...and then all the struct fiemap_extents */
4860                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4861                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4862                                   THUNK_TARGET);
4863                     p += extent_size;
4864                 }
4865             }
4866             unlock_user(argptr, arg, target_size_out);
4867         }
4868     }
4869     if (free_fm) {
4870         g_free(fm);
4871     }
4872     return ret;
4873 }
4874 #endif
4875 
4876 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4877                                 int fd, int cmd, abi_long arg)
4878 {
4879     const argtype *arg_type = ie->arg_type;
4880     int target_size;
4881     void *argptr;
4882     int ret;
4883     struct ifconf *host_ifconf;
4884     uint32_t outbufsz;
4885     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4886     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4887     int target_ifreq_size;
4888     int nb_ifreq;
4889     int free_buf = 0;
4890     int i;
4891     int target_ifc_len;
4892     abi_long target_ifc_buf;
4893     int host_ifc_len;
4894     char *host_ifc_buf;
4895 
4896     assert(arg_type[0] == TYPE_PTR);
4897     assert(ie->access == IOC_RW);
4898 
4899     arg_type++;
4900     target_size = thunk_type_size(arg_type, 0);
4901 
4902     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4903     if (!argptr)
4904         return -TARGET_EFAULT;
4905     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4906     unlock_user(argptr, arg, 0);
4907 
4908     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4909     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4910     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4911 
4912     if (target_ifc_buf != 0) {
4913         target_ifc_len = host_ifconf->ifc_len;
4914         nb_ifreq = target_ifc_len / target_ifreq_size;
4915         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4916 
4917         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4918         if (outbufsz > MAX_STRUCT_SIZE) {
4919             /*
4920              * We can't fit all the extents into the fixed size buffer.
4921              * Allocate one that is large enough and use it instead.
4922              */
4923             host_ifconf = g_try_malloc(outbufsz);
4924             if (!host_ifconf) {
4925                 return -TARGET_ENOMEM;
4926             }
4927             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4928             free_buf = 1;
4929         }
4930         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4931 
4932         host_ifconf->ifc_len = host_ifc_len;
4933     } else {
4934       host_ifc_buf = NULL;
4935     }
4936     host_ifconf->ifc_buf = host_ifc_buf;
4937 
4938     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4939     if (!is_error(ret)) {
4940 	/* convert host ifc_len to target ifc_len */
4941 
4942         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4943         target_ifc_len = nb_ifreq * target_ifreq_size;
4944         host_ifconf->ifc_len = target_ifc_len;
4945 
4946 	/* restore target ifc_buf */
4947 
4948         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4949 
4950 	/* copy struct ifconf to target user */
4951 
4952         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4953         if (!argptr)
4954             return -TARGET_EFAULT;
4955         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4956         unlock_user(argptr, arg, target_size);
4957 
4958         if (target_ifc_buf != 0) {
4959             /* copy ifreq[] to target user */
4960             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4961             for (i = 0; i < nb_ifreq ; i++) {
4962                 thunk_convert(argptr + i * target_ifreq_size,
4963                               host_ifc_buf + i * sizeof(struct ifreq),
4964                               ifreq_arg_type, THUNK_TARGET);
4965             }
4966             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4967         }
4968     }
4969 
4970     if (free_buf) {
4971         g_free(host_ifconf);
4972     }
4973 
4974     return ret;
4975 }
4976 
4977 #if defined(CONFIG_USBFS)
4978 #if HOST_LONG_BITS > 64
4979 #error USBDEVFS thunks do not support >64 bit hosts yet.
4980 #endif
4981 struct live_urb {
4982     uint64_t target_urb_adr;
4983     uint64_t target_buf_adr;
4984     char *target_buf_ptr;
4985     struct usbdevfs_urb host_urb;
4986 };
4987 
4988 static GHashTable *usbdevfs_urb_hashtable(void)
4989 {
4990     static GHashTable *urb_hashtable;
4991 
4992     if (!urb_hashtable) {
4993         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4994     }
4995     return urb_hashtable;
4996 }
4997 
4998 static void urb_hashtable_insert(struct live_urb *urb)
4999 {
5000     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5001     g_hash_table_insert(urb_hashtable, urb, urb);
5002 }
5003 
5004 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5005 {
5006     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5007     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5008 }
5009 
5010 static void urb_hashtable_remove(struct live_urb *urb)
5011 {
5012     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5013     g_hash_table_remove(urb_hashtable, urb);
5014 }
5015 
5016 static abi_long
5017 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5018                           int fd, int cmd, abi_long arg)
5019 {
5020     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5021     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5022     struct live_urb *lurb;
5023     void *argptr;
5024     uint64_t hurb;
5025     int target_size;
5026     uintptr_t target_urb_adr;
5027     abi_long ret;
5028 
5029     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5030 
5031     memset(buf_temp, 0, sizeof(uint64_t));
5032     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5033     if (is_error(ret)) {
5034         return ret;
5035     }
5036 
5037     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5038     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5039     if (!lurb->target_urb_adr) {
5040         return -TARGET_EFAULT;
5041     }
5042     urb_hashtable_remove(lurb);
5043     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5044         lurb->host_urb.buffer_length);
5045     lurb->target_buf_ptr = NULL;
5046 
5047     /* restore the guest buffer pointer */
5048     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5049 
5050     /* update the guest urb struct */
5051     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5052     if (!argptr) {
5053         g_free(lurb);
5054         return -TARGET_EFAULT;
5055     }
5056     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5057     unlock_user(argptr, lurb->target_urb_adr, target_size);
5058 
5059     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5060     /* write back the urb handle */
5061     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5062     if (!argptr) {
5063         g_free(lurb);
5064         return -TARGET_EFAULT;
5065     }
5066 
5067     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5068     target_urb_adr = lurb->target_urb_adr;
5069     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5070     unlock_user(argptr, arg, target_size);
5071 
5072     g_free(lurb);
5073     return ret;
5074 }
5075 
5076 static abi_long
5077 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5078                              uint8_t *buf_temp __attribute__((unused)),
5079                              int fd, int cmd, abi_long arg)
5080 {
5081     struct live_urb *lurb;
5082 
5083     /* map target address back to host URB with metadata. */
5084     lurb = urb_hashtable_lookup(arg);
5085     if (!lurb) {
5086         return -TARGET_EFAULT;
5087     }
5088     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5089 }
5090 
5091 static abi_long
5092 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5093                             int fd, int cmd, abi_long arg)
5094 {
5095     const argtype *arg_type = ie->arg_type;
5096     int target_size;
5097     abi_long ret;
5098     void *argptr;
5099     int rw_dir;
5100     struct live_urb *lurb;
5101 
5102     /*
5103      * each submitted URB needs to map to a unique ID for the
5104      * kernel, and that unique ID needs to be a pointer to
5105      * host memory.  hence, we need to malloc for each URB.
5106      * isochronous transfers have a variable length struct.
5107      */
5108     arg_type++;
5109     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5110 
5111     /* construct host copy of urb and metadata */
5112     lurb = g_try_new0(struct live_urb, 1);
5113     if (!lurb) {
5114         return -TARGET_ENOMEM;
5115     }
5116 
5117     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5118     if (!argptr) {
5119         g_free(lurb);
5120         return -TARGET_EFAULT;
5121     }
5122     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5123     unlock_user(argptr, arg, 0);
5124 
5125     lurb->target_urb_adr = arg;
5126     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5127 
5128     /* buffer space used depends on endpoint type so lock the entire buffer */
5129     /* control type urbs should check the buffer contents for true direction */
5130     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5131     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5132         lurb->host_urb.buffer_length, 1);
5133     if (lurb->target_buf_ptr == NULL) {
5134         g_free(lurb);
5135         return -TARGET_EFAULT;
5136     }
5137 
5138     /* update buffer pointer in host copy */
5139     lurb->host_urb.buffer = lurb->target_buf_ptr;
5140 
5141     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5142     if (is_error(ret)) {
5143         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5144         g_free(lurb);
5145     } else {
5146         urb_hashtable_insert(lurb);
5147     }
5148 
5149     return ret;
5150 }
5151 #endif /* CONFIG_USBFS */
5152 
5153 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5154                             int cmd, abi_long arg)
5155 {
5156     void *argptr;
5157     struct dm_ioctl *host_dm;
5158     abi_long guest_data;
5159     uint32_t guest_data_size;
5160     int target_size;
5161     const argtype *arg_type = ie->arg_type;
5162     abi_long ret;
5163     void *big_buf = NULL;
5164     char *host_data;
5165 
5166     arg_type++;
5167     target_size = thunk_type_size(arg_type, 0);
5168     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5169     if (!argptr) {
5170         ret = -TARGET_EFAULT;
5171         goto out;
5172     }
5173     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5174     unlock_user(argptr, arg, 0);
5175 
5176     /* buf_temp is too small, so fetch things into a bigger buffer */
5177     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5178     memcpy(big_buf, buf_temp, target_size);
5179     buf_temp = big_buf;
5180     host_dm = big_buf;
5181 
5182     guest_data = arg + host_dm->data_start;
5183     if ((guest_data - arg) < 0) {
5184         ret = -TARGET_EINVAL;
5185         goto out;
5186     }
5187     guest_data_size = host_dm->data_size - host_dm->data_start;
5188     host_data = (char*)host_dm + host_dm->data_start;
5189 
5190     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5191     if (!argptr) {
5192         ret = -TARGET_EFAULT;
5193         goto out;
5194     }
5195 
5196     switch (ie->host_cmd) {
5197     case DM_REMOVE_ALL:
5198     case DM_LIST_DEVICES:
5199     case DM_DEV_CREATE:
5200     case DM_DEV_REMOVE:
5201     case DM_DEV_SUSPEND:
5202     case DM_DEV_STATUS:
5203     case DM_DEV_WAIT:
5204     case DM_TABLE_STATUS:
5205     case DM_TABLE_CLEAR:
5206     case DM_TABLE_DEPS:
5207     case DM_LIST_VERSIONS:
5208         /* no input data */
5209         break;
5210     case DM_DEV_RENAME:
5211     case DM_DEV_SET_GEOMETRY:
5212         /* data contains only strings */
5213         memcpy(host_data, argptr, guest_data_size);
5214         break;
5215     case DM_TARGET_MSG:
5216         memcpy(host_data, argptr, guest_data_size);
5217         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5218         break;
5219     case DM_TABLE_LOAD:
5220     {
5221         void *gspec = argptr;
5222         void *cur_data = host_data;
5223         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5224         int spec_size = thunk_type_size(arg_type, 0);
5225         int i;
5226 
5227         for (i = 0; i < host_dm->target_count; i++) {
5228             struct dm_target_spec *spec = cur_data;
5229             uint32_t next;
5230             int slen;
5231 
5232             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5233             slen = strlen((char*)gspec + spec_size) + 1;
5234             next = spec->next;
5235             spec->next = sizeof(*spec) + slen;
5236             strcpy((char*)&spec[1], gspec + spec_size);
5237             gspec += next;
5238             cur_data += spec->next;
5239         }
5240         break;
5241     }
5242     default:
5243         ret = -TARGET_EINVAL;
5244         unlock_user(argptr, guest_data, 0);
5245         goto out;
5246     }
5247     unlock_user(argptr, guest_data, 0);
5248 
5249     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5250     if (!is_error(ret)) {
5251         guest_data = arg + host_dm->data_start;
5252         guest_data_size = host_dm->data_size - host_dm->data_start;
5253         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5254         switch (ie->host_cmd) {
5255         case DM_REMOVE_ALL:
5256         case DM_DEV_CREATE:
5257         case DM_DEV_REMOVE:
5258         case DM_DEV_RENAME:
5259         case DM_DEV_SUSPEND:
5260         case DM_DEV_STATUS:
5261         case DM_TABLE_LOAD:
5262         case DM_TABLE_CLEAR:
5263         case DM_TARGET_MSG:
5264         case DM_DEV_SET_GEOMETRY:
5265             /* no return data */
5266             break;
5267         case DM_LIST_DEVICES:
5268         {
5269             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5270             uint32_t remaining_data = guest_data_size;
5271             void *cur_data = argptr;
5272             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5273             int nl_size = 12; /* can't use thunk_size due to alignment */
5274 
5275             while (1) {
5276                 uint32_t next = nl->next;
5277                 if (next) {
5278                     nl->next = nl_size + (strlen(nl->name) + 1);
5279                 }
5280                 if (remaining_data < nl->next) {
5281                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5282                     break;
5283                 }
5284                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5285                 strcpy(cur_data + nl_size, nl->name);
5286                 cur_data += nl->next;
5287                 remaining_data -= nl->next;
5288                 if (!next) {
5289                     break;
5290                 }
5291                 nl = (void*)nl + next;
5292             }
5293             break;
5294         }
5295         case DM_DEV_WAIT:
5296         case DM_TABLE_STATUS:
5297         {
5298             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5299             void *cur_data = argptr;
5300             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5301             int spec_size = thunk_type_size(arg_type, 0);
5302             int i;
5303 
5304             for (i = 0; i < host_dm->target_count; i++) {
5305                 uint32_t next = spec->next;
5306                 int slen = strlen((char*)&spec[1]) + 1;
5307                 spec->next = (cur_data - argptr) + spec_size + slen;
5308                 if (guest_data_size < spec->next) {
5309                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5310                     break;
5311                 }
5312                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5313                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5314                 cur_data = argptr + spec->next;
5315                 spec = (void*)host_dm + host_dm->data_start + next;
5316             }
5317             break;
5318         }
5319         case DM_TABLE_DEPS:
5320         {
5321             void *hdata = (void*)host_dm + host_dm->data_start;
5322             int count = *(uint32_t*)hdata;
5323             uint64_t *hdev = hdata + 8;
5324             uint64_t *gdev = argptr + 8;
5325             int i;
5326 
5327             *(uint32_t*)argptr = tswap32(count);
5328             for (i = 0; i < count; i++) {
5329                 *gdev = tswap64(*hdev);
5330                 gdev++;
5331                 hdev++;
5332             }
5333             break;
5334         }
5335         case DM_LIST_VERSIONS:
5336         {
5337             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5338             uint32_t remaining_data = guest_data_size;
5339             void *cur_data = argptr;
5340             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5341             int vers_size = thunk_type_size(arg_type, 0);
5342 
5343             while (1) {
5344                 uint32_t next = vers->next;
5345                 if (next) {
5346                     vers->next = vers_size + (strlen(vers->name) + 1);
5347                 }
5348                 if (remaining_data < vers->next) {
5349                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5350                     break;
5351                 }
5352                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5353                 strcpy(cur_data + vers_size, vers->name);
5354                 cur_data += vers->next;
5355                 remaining_data -= vers->next;
5356                 if (!next) {
5357                     break;
5358                 }
5359                 vers = (void*)vers + next;
5360             }
5361             break;
5362         }
5363         default:
5364             unlock_user(argptr, guest_data, 0);
5365             ret = -TARGET_EINVAL;
5366             goto out;
5367         }
5368         unlock_user(argptr, guest_data, guest_data_size);
5369 
5370         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5371         if (!argptr) {
5372             ret = -TARGET_EFAULT;
5373             goto out;
5374         }
5375         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5376         unlock_user(argptr, arg, target_size);
5377     }
5378 out:
5379     g_free(big_buf);
5380     return ret;
5381 }
5382 
5383 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5384                                int cmd, abi_long arg)
5385 {
5386     void *argptr;
5387     int target_size;
5388     const argtype *arg_type = ie->arg_type;
5389     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5390     abi_long ret;
5391 
5392     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5393     struct blkpg_partition host_part;
5394 
5395     /* Read and convert blkpg */
5396     arg_type++;
5397     target_size = thunk_type_size(arg_type, 0);
5398     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5399     if (!argptr) {
5400         ret = -TARGET_EFAULT;
5401         goto out;
5402     }
5403     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5404     unlock_user(argptr, arg, 0);
5405 
5406     switch (host_blkpg->op) {
5407     case BLKPG_ADD_PARTITION:
5408     case BLKPG_DEL_PARTITION:
5409         /* payload is struct blkpg_partition */
5410         break;
5411     default:
5412         /* Unknown opcode */
5413         ret = -TARGET_EINVAL;
5414         goto out;
5415     }
5416 
5417     /* Read and convert blkpg->data */
5418     arg = (abi_long)(uintptr_t)host_blkpg->data;
5419     target_size = thunk_type_size(part_arg_type, 0);
5420     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5421     if (!argptr) {
5422         ret = -TARGET_EFAULT;
5423         goto out;
5424     }
5425     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5426     unlock_user(argptr, arg, 0);
5427 
5428     /* Swizzle the data pointer to our local copy and call! */
5429     host_blkpg->data = &host_part;
5430     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5431 
5432 out:
5433     return ret;
5434 }
5435 
5436 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5437                                 int fd, int cmd, abi_long arg)
5438 {
5439     const argtype *arg_type = ie->arg_type;
5440     const StructEntry *se;
5441     const argtype *field_types;
5442     const int *dst_offsets, *src_offsets;
5443     int target_size;
5444     void *argptr;
5445     abi_ulong *target_rt_dev_ptr = NULL;
5446     unsigned long *host_rt_dev_ptr = NULL;
5447     abi_long ret;
5448     int i;
5449 
5450     assert(ie->access == IOC_W);
5451     assert(*arg_type == TYPE_PTR);
5452     arg_type++;
5453     assert(*arg_type == TYPE_STRUCT);
5454     target_size = thunk_type_size(arg_type, 0);
5455     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5456     if (!argptr) {
5457         return -TARGET_EFAULT;
5458     }
5459     arg_type++;
5460     assert(*arg_type == (int)STRUCT_rtentry);
5461     se = struct_entries + *arg_type++;
5462     assert(se->convert[0] == NULL);
5463     /* convert struct here to be able to catch rt_dev string */
5464     field_types = se->field_types;
5465     dst_offsets = se->field_offsets[THUNK_HOST];
5466     src_offsets = se->field_offsets[THUNK_TARGET];
5467     for (i = 0; i < se->nb_fields; i++) {
5468         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5469             assert(*field_types == TYPE_PTRVOID);
5470             target_rt_dev_ptr = argptr + src_offsets[i];
5471             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5472             if (*target_rt_dev_ptr != 0) {
5473                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5474                                                   tswapal(*target_rt_dev_ptr));
5475                 if (!*host_rt_dev_ptr) {
5476                     unlock_user(argptr, arg, 0);
5477                     return -TARGET_EFAULT;
5478                 }
5479             } else {
5480                 *host_rt_dev_ptr = 0;
5481             }
5482             field_types++;
5483             continue;
5484         }
5485         field_types = thunk_convert(buf_temp + dst_offsets[i],
5486                                     argptr + src_offsets[i],
5487                                     field_types, THUNK_HOST);
5488     }
5489     unlock_user(argptr, arg, 0);
5490 
5491     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5492 
5493     assert(host_rt_dev_ptr != NULL);
5494     assert(target_rt_dev_ptr != NULL);
5495     if (*host_rt_dev_ptr != 0) {
5496         unlock_user((void *)*host_rt_dev_ptr,
5497                     *target_rt_dev_ptr, 0);
5498     }
5499     return ret;
5500 }
5501 
5502 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5503                                      int fd, int cmd, abi_long arg)
5504 {
5505     int sig = target_to_host_signal(arg);
5506     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5507 }
5508 
5509 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5510                                     int fd, int cmd, abi_long arg)
5511 {
5512     struct timeval tv;
5513     abi_long ret;
5514 
5515     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5516     if (is_error(ret)) {
5517         return ret;
5518     }
5519 
5520     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5521         if (copy_to_user_timeval(arg, &tv)) {
5522             return -TARGET_EFAULT;
5523         }
5524     } else {
5525         if (copy_to_user_timeval64(arg, &tv)) {
5526             return -TARGET_EFAULT;
5527         }
5528     }
5529 
5530     return ret;
5531 }
5532 
5533 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5534                                       int fd, int cmd, abi_long arg)
5535 {
5536     struct timespec ts;
5537     abi_long ret;
5538 
5539     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5540     if (is_error(ret)) {
5541         return ret;
5542     }
5543 
5544     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5545         if (host_to_target_timespec(arg, &ts)) {
5546             return -TARGET_EFAULT;
5547         }
5548     } else{
5549         if (host_to_target_timespec64(arg, &ts)) {
5550             return -TARGET_EFAULT;
5551         }
5552     }
5553 
5554     return ret;
5555 }
5556 
5557 #ifdef TIOCGPTPEER
5558 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5559                                      int fd, int cmd, abi_long arg)
5560 {
5561     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5562     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5563 }
5564 #endif
5565 
5566 #ifdef HAVE_DRM_H
5567 
5568 static void unlock_drm_version(struct drm_version *host_ver,
5569                                struct target_drm_version *target_ver,
5570                                bool copy)
5571 {
5572     unlock_user(host_ver->name, target_ver->name,
5573                                 copy ? host_ver->name_len : 0);
5574     unlock_user(host_ver->date, target_ver->date,
5575                                 copy ? host_ver->date_len : 0);
5576     unlock_user(host_ver->desc, target_ver->desc,
5577                                 copy ? host_ver->desc_len : 0);
5578 }
5579 
5580 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5581                                           struct target_drm_version *target_ver)
5582 {
5583     memset(host_ver, 0, sizeof(*host_ver));
5584 
5585     __get_user(host_ver->name_len, &target_ver->name_len);
5586     if (host_ver->name_len) {
5587         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5588                                    target_ver->name_len, 0);
5589         if (!host_ver->name) {
5590             return -EFAULT;
5591         }
5592     }
5593 
5594     __get_user(host_ver->date_len, &target_ver->date_len);
5595     if (host_ver->date_len) {
5596         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5597                                    target_ver->date_len, 0);
5598         if (!host_ver->date) {
5599             goto err;
5600         }
5601     }
5602 
5603     __get_user(host_ver->desc_len, &target_ver->desc_len);
5604     if (host_ver->desc_len) {
5605         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5606                                    target_ver->desc_len, 0);
5607         if (!host_ver->desc) {
5608             goto err;
5609         }
5610     }
5611 
5612     return 0;
5613 err:
5614     unlock_drm_version(host_ver, target_ver, false);
5615     return -EFAULT;
5616 }
5617 
5618 static inline void host_to_target_drmversion(
5619                                           struct target_drm_version *target_ver,
5620                                           struct drm_version *host_ver)
5621 {
5622     __put_user(host_ver->version_major, &target_ver->version_major);
5623     __put_user(host_ver->version_minor, &target_ver->version_minor);
5624     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5625     __put_user(host_ver->name_len, &target_ver->name_len);
5626     __put_user(host_ver->date_len, &target_ver->date_len);
5627     __put_user(host_ver->desc_len, &target_ver->desc_len);
5628     unlock_drm_version(host_ver, target_ver, true);
5629 }
5630 
5631 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5632                              int fd, int cmd, abi_long arg)
5633 {
5634     struct drm_version *ver;
5635     struct target_drm_version *target_ver;
5636     abi_long ret;
5637 
5638     switch (ie->host_cmd) {
5639     case DRM_IOCTL_VERSION:
5640         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5641             return -TARGET_EFAULT;
5642         }
5643         ver = (struct drm_version *)buf_temp;
5644         ret = target_to_host_drmversion(ver, target_ver);
5645         if (!is_error(ret)) {
5646             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5647             if (is_error(ret)) {
5648                 unlock_drm_version(ver, target_ver, false);
5649             } else {
5650                 host_to_target_drmversion(target_ver, ver);
5651             }
5652         }
5653         unlock_user_struct(target_ver, arg, 0);
5654         return ret;
5655     }
5656     return -TARGET_ENOSYS;
5657 }
5658 
5659 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5660                                            struct drm_i915_getparam *gparam,
5661                                            int fd, abi_long arg)
5662 {
5663     abi_long ret;
5664     int value;
5665     struct target_drm_i915_getparam *target_gparam;
5666 
5667     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5668         return -TARGET_EFAULT;
5669     }
5670 
5671     __get_user(gparam->param, &target_gparam->param);
5672     gparam->value = &value;
5673     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5674     put_user_s32(value, target_gparam->value);
5675 
5676     unlock_user_struct(target_gparam, arg, 0);
5677     return ret;
5678 }
5679 
5680 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5681                                   int fd, int cmd, abi_long arg)
5682 {
5683     switch (ie->host_cmd) {
5684     case DRM_IOCTL_I915_GETPARAM:
5685         return do_ioctl_drm_i915_getparam(ie,
5686                                           (struct drm_i915_getparam *)buf_temp,
5687                                           fd, arg);
5688     default:
5689         return -TARGET_ENOSYS;
5690     }
5691 }
5692 
5693 #endif
5694 
5695 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5696                                         int fd, int cmd, abi_long arg)
5697 {
5698     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5699     struct tun_filter *target_filter;
5700     char *target_addr;
5701 
5702     assert(ie->access == IOC_W);
5703 
5704     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5705     if (!target_filter) {
5706         return -TARGET_EFAULT;
5707     }
5708     filter->flags = tswap16(target_filter->flags);
5709     filter->count = tswap16(target_filter->count);
5710     unlock_user(target_filter, arg, 0);
5711 
5712     if (filter->count) {
5713         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5714             MAX_STRUCT_SIZE) {
5715             return -TARGET_EFAULT;
5716         }
5717 
5718         target_addr = lock_user(VERIFY_READ,
5719                                 arg + offsetof(struct tun_filter, addr),
5720                                 filter->count * ETH_ALEN, 1);
5721         if (!target_addr) {
5722             return -TARGET_EFAULT;
5723         }
5724         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5725         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5726     }
5727 
5728     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5729 }
5730 
5731 IOCTLEntry ioctl_entries[] = {
5732 #define IOCTL(cmd, access, ...) \
5733     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5734 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5735     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5736 #define IOCTL_IGNORE(cmd) \
5737     { TARGET_ ## cmd, 0, #cmd },
5738 #include "ioctls.h"
5739     { 0, 0, },
5740 };
5741 
5742 /* ??? Implement proper locking for ioctls.  */
5743 /* do_ioctl() Must return target values and target errnos. */
5744 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5745 {
5746     const IOCTLEntry *ie;
5747     const argtype *arg_type;
5748     abi_long ret;
5749     uint8_t buf_temp[MAX_STRUCT_SIZE];
5750     int target_size;
5751     void *argptr;
5752 
5753     ie = ioctl_entries;
5754     for(;;) {
5755         if (ie->target_cmd == 0) {
5756             qemu_log_mask(
5757                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5758             return -TARGET_ENOTTY;
5759         }
5760         if (ie->target_cmd == cmd)
5761             break;
5762         ie++;
5763     }
5764     arg_type = ie->arg_type;
5765     if (ie->do_ioctl) {
5766         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5767     } else if (!ie->host_cmd) {
5768         /* Some architectures define BSD ioctls in their headers
5769            that are not implemented in Linux.  */
5770         return -TARGET_ENOTTY;
5771     }
5772 
5773     switch(arg_type[0]) {
5774     case TYPE_NULL:
5775         /* no argument */
5776         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5777         break;
5778     case TYPE_PTRVOID:
5779     case TYPE_INT:
5780     case TYPE_LONG:
5781     case TYPE_ULONG:
5782         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5783         break;
5784     case TYPE_PTR:
5785         arg_type++;
5786         target_size = thunk_type_size(arg_type, 0);
5787         switch(ie->access) {
5788         case IOC_R:
5789             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5790             if (!is_error(ret)) {
5791                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5792                 if (!argptr)
5793                     return -TARGET_EFAULT;
5794                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5795                 unlock_user(argptr, arg, target_size);
5796             }
5797             break;
5798         case IOC_W:
5799             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5800             if (!argptr)
5801                 return -TARGET_EFAULT;
5802             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5803             unlock_user(argptr, arg, 0);
5804             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5805             break;
5806         default:
5807         case IOC_RW:
5808             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5809             if (!argptr)
5810                 return -TARGET_EFAULT;
5811             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5812             unlock_user(argptr, arg, 0);
5813             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5814             if (!is_error(ret)) {
5815                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5816                 if (!argptr)
5817                     return -TARGET_EFAULT;
5818                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5819                 unlock_user(argptr, arg, target_size);
5820             }
5821             break;
5822         }
5823         break;
5824     default:
5825         qemu_log_mask(LOG_UNIMP,
5826                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5827                       (long)cmd, arg_type[0]);
5828         ret = -TARGET_ENOTTY;
5829         break;
5830     }
5831     return ret;
5832 }
5833 
5834 static const bitmask_transtbl iflag_tbl[] = {
5835         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5836         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5837         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5838         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5839         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5840         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5841         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5842         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5843         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5844         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5845         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5846         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5847         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5848         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5849         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5850         { 0, 0, 0, 0 }
5851 };
5852 
5853 static const bitmask_transtbl oflag_tbl[] = {
5854 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5855 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5856 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5857 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5858 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5859 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5860 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5861 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5862 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5863 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5864 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5865 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5866 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5867 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5868 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5869 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5870 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5871 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5872 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5873 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5874 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5875 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5876 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5877 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5878 	{ 0, 0, 0, 0 }
5879 };
5880 
5881 static const bitmask_transtbl cflag_tbl[] = {
5882 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5883 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5884 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5885 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5886 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5887 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5888 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5889 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5890 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5891 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5892 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5893 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5894 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5895 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5896 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5897 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5898 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5899 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5900 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5901 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5902 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5903 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5904 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5905 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5906 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5907 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5908 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5909 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5910 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5911 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5912 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5913 	{ 0, 0, 0, 0 }
5914 };
5915 
5916 static const bitmask_transtbl lflag_tbl[] = {
5917   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5918   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5919   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5920   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5921   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5922   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5923   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5924   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5925   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5926   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5927   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5928   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5929   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5930   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5931   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5932   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5933   { 0, 0, 0, 0 }
5934 };
5935 
5936 static void target_to_host_termios (void *dst, const void *src)
5937 {
5938     struct host_termios *host = dst;
5939     const struct target_termios *target = src;
5940 
5941     host->c_iflag =
5942         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5943     host->c_oflag =
5944         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5945     host->c_cflag =
5946         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5947     host->c_lflag =
5948         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5949     host->c_line = target->c_line;
5950 
5951     memset(host->c_cc, 0, sizeof(host->c_cc));
5952     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5953     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5954     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5955     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5956     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5957     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5958     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5959     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5960     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5961     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5962     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5963     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5964     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5965     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5966     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5967     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5968     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5969 }
5970 
5971 static void host_to_target_termios (void *dst, const void *src)
5972 {
5973     struct target_termios *target = dst;
5974     const struct host_termios *host = src;
5975 
5976     target->c_iflag =
5977         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5978     target->c_oflag =
5979         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5980     target->c_cflag =
5981         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5982     target->c_lflag =
5983         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5984     target->c_line = host->c_line;
5985 
5986     memset(target->c_cc, 0, sizeof(target->c_cc));
5987     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5988     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5989     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5990     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5991     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5992     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5993     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5994     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5995     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5996     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5997     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5998     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5999     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6000     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6001     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6002     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6003     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6004 }
6005 
6006 static const StructEntry struct_termios_def = {
6007     .convert = { host_to_target_termios, target_to_host_termios },
6008     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6009     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6010     .print = print_termios,
6011 };
6012 
6013 /* If the host does not provide these bits, they may be safely discarded. */
6014 #ifndef MAP_SYNC
6015 #define MAP_SYNC 0
6016 #endif
6017 #ifndef MAP_UNINITIALIZED
6018 #define MAP_UNINITIALIZED 0
6019 #endif
6020 
6021 static const bitmask_transtbl mmap_flags_tbl[] = {
6022     { TARGET_MAP_TYPE, TARGET_MAP_SHARED, MAP_TYPE, MAP_SHARED },
6023     { TARGET_MAP_TYPE, TARGET_MAP_PRIVATE, MAP_TYPE, MAP_PRIVATE },
6024     { TARGET_MAP_TYPE, TARGET_MAP_SHARED_VALIDATE,
6025       MAP_TYPE, MAP_SHARED_VALIDATE },
6026     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6027     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6028       MAP_ANONYMOUS, MAP_ANONYMOUS },
6029     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6030       MAP_GROWSDOWN, MAP_GROWSDOWN },
6031     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6032       MAP_DENYWRITE, MAP_DENYWRITE },
6033     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6034       MAP_EXECUTABLE, MAP_EXECUTABLE },
6035     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6036     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6037       MAP_NORESERVE, MAP_NORESERVE },
6038     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6039     /* MAP_STACK had been ignored by the kernel for quite some time.
6040        Recognize it for the target insofar as we do not want to pass
6041        it through to the host.  */
6042     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6043     { TARGET_MAP_SYNC, TARGET_MAP_SYNC, MAP_SYNC, MAP_SYNC },
6044     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
6045     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
6046     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
6047       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
6048     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
6049       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
6050     { 0, 0, 0, 0 }
6051 };
6052 
6053 /*
6054  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6055  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6056  */
6057 #if defined(TARGET_I386)
6058 
6059 /* NOTE: there is really one LDT for all the threads */
6060 static uint8_t *ldt_table;
6061 
6062 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6063 {
6064     int size;
6065     void *p;
6066 
6067     if (!ldt_table)
6068         return 0;
6069     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6070     if (size > bytecount)
6071         size = bytecount;
6072     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6073     if (!p)
6074         return -TARGET_EFAULT;
6075     /* ??? Should this by byteswapped?  */
6076     memcpy(p, ldt_table, size);
6077     unlock_user(p, ptr, size);
6078     return size;
6079 }
6080 
6081 /* XXX: add locking support */
6082 static abi_long write_ldt(CPUX86State *env,
6083                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6084 {
6085     struct target_modify_ldt_ldt_s ldt_info;
6086     struct target_modify_ldt_ldt_s *target_ldt_info;
6087     int seg_32bit, contents, read_exec_only, limit_in_pages;
6088     int seg_not_present, useable, lm;
6089     uint32_t *lp, entry_1, entry_2;
6090 
6091     if (bytecount != sizeof(ldt_info))
6092         return -TARGET_EINVAL;
6093     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6094         return -TARGET_EFAULT;
6095     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6096     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6097     ldt_info.limit = tswap32(target_ldt_info->limit);
6098     ldt_info.flags = tswap32(target_ldt_info->flags);
6099     unlock_user_struct(target_ldt_info, ptr, 0);
6100 
6101     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6102         return -TARGET_EINVAL;
6103     seg_32bit = ldt_info.flags & 1;
6104     contents = (ldt_info.flags >> 1) & 3;
6105     read_exec_only = (ldt_info.flags >> 3) & 1;
6106     limit_in_pages = (ldt_info.flags >> 4) & 1;
6107     seg_not_present = (ldt_info.flags >> 5) & 1;
6108     useable = (ldt_info.flags >> 6) & 1;
6109 #ifdef TARGET_ABI32
6110     lm = 0;
6111 #else
6112     lm = (ldt_info.flags >> 7) & 1;
6113 #endif
6114     if (contents == 3) {
6115         if (oldmode)
6116             return -TARGET_EINVAL;
6117         if (seg_not_present == 0)
6118             return -TARGET_EINVAL;
6119     }
6120     /* allocate the LDT */
6121     if (!ldt_table) {
6122         env->ldt.base = target_mmap(0,
6123                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6124                                     PROT_READ|PROT_WRITE,
6125                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6126         if (env->ldt.base == -1)
6127             return -TARGET_ENOMEM;
6128         memset(g2h_untagged(env->ldt.base), 0,
6129                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6130         env->ldt.limit = 0xffff;
6131         ldt_table = g2h_untagged(env->ldt.base);
6132     }
6133 
6134     /* NOTE: same code as Linux kernel */
6135     /* Allow LDTs to be cleared by the user. */
6136     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6137         if (oldmode ||
6138             (contents == 0		&&
6139              read_exec_only == 1	&&
6140              seg_32bit == 0		&&
6141              limit_in_pages == 0	&&
6142              seg_not_present == 1	&&
6143              useable == 0 )) {
6144             entry_1 = 0;
6145             entry_2 = 0;
6146             goto install;
6147         }
6148     }
6149 
6150     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6151         (ldt_info.limit & 0x0ffff);
6152     entry_2 = (ldt_info.base_addr & 0xff000000) |
6153         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6154         (ldt_info.limit & 0xf0000) |
6155         ((read_exec_only ^ 1) << 9) |
6156         (contents << 10) |
6157         ((seg_not_present ^ 1) << 15) |
6158         (seg_32bit << 22) |
6159         (limit_in_pages << 23) |
6160         (lm << 21) |
6161         0x7000;
6162     if (!oldmode)
6163         entry_2 |= (useable << 20);
6164 
6165     /* Install the new entry ...  */
6166 install:
6167     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6168     lp[0] = tswap32(entry_1);
6169     lp[1] = tswap32(entry_2);
6170     return 0;
6171 }
6172 
6173 /* specific and weird i386 syscalls */
6174 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6175                               unsigned long bytecount)
6176 {
6177     abi_long ret;
6178 
6179     switch (func) {
6180     case 0:
6181         ret = read_ldt(ptr, bytecount);
6182         break;
6183     case 1:
6184         ret = write_ldt(env, ptr, bytecount, 1);
6185         break;
6186     case 0x11:
6187         ret = write_ldt(env, ptr, bytecount, 0);
6188         break;
6189     default:
6190         ret = -TARGET_ENOSYS;
6191         break;
6192     }
6193     return ret;
6194 }
6195 
6196 #if defined(TARGET_ABI32)
6197 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6198 {
6199     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6200     struct target_modify_ldt_ldt_s ldt_info;
6201     struct target_modify_ldt_ldt_s *target_ldt_info;
6202     int seg_32bit, contents, read_exec_only, limit_in_pages;
6203     int seg_not_present, useable, lm;
6204     uint32_t *lp, entry_1, entry_2;
6205     int i;
6206 
6207     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6208     if (!target_ldt_info)
6209         return -TARGET_EFAULT;
6210     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6211     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6212     ldt_info.limit = tswap32(target_ldt_info->limit);
6213     ldt_info.flags = tswap32(target_ldt_info->flags);
6214     if (ldt_info.entry_number == -1) {
6215         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6216             if (gdt_table[i] == 0) {
6217                 ldt_info.entry_number = i;
6218                 target_ldt_info->entry_number = tswap32(i);
6219                 break;
6220             }
6221         }
6222     }
6223     unlock_user_struct(target_ldt_info, ptr, 1);
6224 
6225     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6226         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6227            return -TARGET_EINVAL;
6228     seg_32bit = ldt_info.flags & 1;
6229     contents = (ldt_info.flags >> 1) & 3;
6230     read_exec_only = (ldt_info.flags >> 3) & 1;
6231     limit_in_pages = (ldt_info.flags >> 4) & 1;
6232     seg_not_present = (ldt_info.flags >> 5) & 1;
6233     useable = (ldt_info.flags >> 6) & 1;
6234 #ifdef TARGET_ABI32
6235     lm = 0;
6236 #else
6237     lm = (ldt_info.flags >> 7) & 1;
6238 #endif
6239 
6240     if (contents == 3) {
6241         if (seg_not_present == 0)
6242             return -TARGET_EINVAL;
6243     }
6244 
6245     /* NOTE: same code as Linux kernel */
6246     /* Allow LDTs to be cleared by the user. */
6247     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6248         if ((contents == 0             &&
6249              read_exec_only == 1       &&
6250              seg_32bit == 0            &&
6251              limit_in_pages == 0       &&
6252              seg_not_present == 1      &&
6253              useable == 0 )) {
6254             entry_1 = 0;
6255             entry_2 = 0;
6256             goto install;
6257         }
6258     }
6259 
6260     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6261         (ldt_info.limit & 0x0ffff);
6262     entry_2 = (ldt_info.base_addr & 0xff000000) |
6263         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6264         (ldt_info.limit & 0xf0000) |
6265         ((read_exec_only ^ 1) << 9) |
6266         (contents << 10) |
6267         ((seg_not_present ^ 1) << 15) |
6268         (seg_32bit << 22) |
6269         (limit_in_pages << 23) |
6270         (useable << 20) |
6271         (lm << 21) |
6272         0x7000;
6273 
6274     /* Install the new entry ...  */
6275 install:
6276     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6277     lp[0] = tswap32(entry_1);
6278     lp[1] = tswap32(entry_2);
6279     return 0;
6280 }
6281 
6282 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6283 {
6284     struct target_modify_ldt_ldt_s *target_ldt_info;
6285     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6286     uint32_t base_addr, limit, flags;
6287     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6288     int seg_not_present, useable, lm;
6289     uint32_t *lp, entry_1, entry_2;
6290 
6291     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6292     if (!target_ldt_info)
6293         return -TARGET_EFAULT;
6294     idx = tswap32(target_ldt_info->entry_number);
6295     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6296         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6297         unlock_user_struct(target_ldt_info, ptr, 1);
6298         return -TARGET_EINVAL;
6299     }
6300     lp = (uint32_t *)(gdt_table + idx);
6301     entry_1 = tswap32(lp[0]);
6302     entry_2 = tswap32(lp[1]);
6303 
6304     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6305     contents = (entry_2 >> 10) & 3;
6306     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6307     seg_32bit = (entry_2 >> 22) & 1;
6308     limit_in_pages = (entry_2 >> 23) & 1;
6309     useable = (entry_2 >> 20) & 1;
6310 #ifdef TARGET_ABI32
6311     lm = 0;
6312 #else
6313     lm = (entry_2 >> 21) & 1;
6314 #endif
6315     flags = (seg_32bit << 0) | (contents << 1) |
6316         (read_exec_only << 3) | (limit_in_pages << 4) |
6317         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6318     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6319     base_addr = (entry_1 >> 16) |
6320         (entry_2 & 0xff000000) |
6321         ((entry_2 & 0xff) << 16);
6322     target_ldt_info->base_addr = tswapal(base_addr);
6323     target_ldt_info->limit = tswap32(limit);
6324     target_ldt_info->flags = tswap32(flags);
6325     unlock_user_struct(target_ldt_info, ptr, 1);
6326     return 0;
6327 }
6328 
6329 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6330 {
6331     return -TARGET_ENOSYS;
6332 }
6333 #else
6334 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6335 {
6336     abi_long ret = 0;
6337     abi_ulong val;
6338     int idx;
6339 
6340     switch(code) {
6341     case TARGET_ARCH_SET_GS:
6342     case TARGET_ARCH_SET_FS:
6343         if (code == TARGET_ARCH_SET_GS)
6344             idx = R_GS;
6345         else
6346             idx = R_FS;
6347         cpu_x86_load_seg(env, idx, 0);
6348         env->segs[idx].base = addr;
6349         break;
6350     case TARGET_ARCH_GET_GS:
6351     case TARGET_ARCH_GET_FS:
6352         if (code == TARGET_ARCH_GET_GS)
6353             idx = R_GS;
6354         else
6355             idx = R_FS;
6356         val = env->segs[idx].base;
6357         if (put_user(val, addr, abi_ulong))
6358             ret = -TARGET_EFAULT;
6359         break;
6360     default:
6361         ret = -TARGET_EINVAL;
6362         break;
6363     }
6364     return ret;
6365 }
6366 #endif /* defined(TARGET_ABI32 */
6367 #endif /* defined(TARGET_I386) */
6368 
6369 /*
6370  * These constants are generic.  Supply any that are missing from the host.
6371  */
6372 #ifndef PR_SET_NAME
6373 # define PR_SET_NAME    15
6374 # define PR_GET_NAME    16
6375 #endif
6376 #ifndef PR_SET_FP_MODE
6377 # define PR_SET_FP_MODE 45
6378 # define PR_GET_FP_MODE 46
6379 # define PR_FP_MODE_FR   (1 << 0)
6380 # define PR_FP_MODE_FRE  (1 << 1)
6381 #endif
6382 #ifndef PR_SVE_SET_VL
6383 # define PR_SVE_SET_VL  50
6384 # define PR_SVE_GET_VL  51
6385 # define PR_SVE_VL_LEN_MASK  0xffff
6386 # define PR_SVE_VL_INHERIT   (1 << 17)
6387 #endif
6388 #ifndef PR_PAC_RESET_KEYS
6389 # define PR_PAC_RESET_KEYS  54
6390 # define PR_PAC_APIAKEY   (1 << 0)
6391 # define PR_PAC_APIBKEY   (1 << 1)
6392 # define PR_PAC_APDAKEY   (1 << 2)
6393 # define PR_PAC_APDBKEY   (1 << 3)
6394 # define PR_PAC_APGAKEY   (1 << 4)
6395 #endif
6396 #ifndef PR_SET_TAGGED_ADDR_CTRL
6397 # define PR_SET_TAGGED_ADDR_CTRL 55
6398 # define PR_GET_TAGGED_ADDR_CTRL 56
6399 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6400 #endif
6401 #ifndef PR_MTE_TCF_SHIFT
6402 # define PR_MTE_TCF_SHIFT       1
6403 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6404 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6405 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6406 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6407 # define PR_MTE_TAG_SHIFT       3
6408 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6409 #endif
6410 #ifndef PR_SET_IO_FLUSHER
6411 # define PR_SET_IO_FLUSHER 57
6412 # define PR_GET_IO_FLUSHER 58
6413 #endif
6414 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6415 # define PR_SET_SYSCALL_USER_DISPATCH 59
6416 #endif
6417 #ifndef PR_SME_SET_VL
6418 # define PR_SME_SET_VL  63
6419 # define PR_SME_GET_VL  64
6420 # define PR_SME_VL_LEN_MASK  0xffff
6421 # define PR_SME_VL_INHERIT   (1 << 17)
6422 #endif
6423 
6424 #include "target_prctl.h"
6425 
6426 static abi_long do_prctl_inval0(CPUArchState *env)
6427 {
6428     return -TARGET_EINVAL;
6429 }
6430 
6431 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6432 {
6433     return -TARGET_EINVAL;
6434 }
6435 
6436 #ifndef do_prctl_get_fp_mode
6437 #define do_prctl_get_fp_mode do_prctl_inval0
6438 #endif
6439 #ifndef do_prctl_set_fp_mode
6440 #define do_prctl_set_fp_mode do_prctl_inval1
6441 #endif
6442 #ifndef do_prctl_sve_get_vl
6443 #define do_prctl_sve_get_vl do_prctl_inval0
6444 #endif
6445 #ifndef do_prctl_sve_set_vl
6446 #define do_prctl_sve_set_vl do_prctl_inval1
6447 #endif
6448 #ifndef do_prctl_reset_keys
6449 #define do_prctl_reset_keys do_prctl_inval1
6450 #endif
6451 #ifndef do_prctl_set_tagged_addr_ctrl
6452 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6453 #endif
6454 #ifndef do_prctl_get_tagged_addr_ctrl
6455 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6456 #endif
6457 #ifndef do_prctl_get_unalign
6458 #define do_prctl_get_unalign do_prctl_inval1
6459 #endif
6460 #ifndef do_prctl_set_unalign
6461 #define do_prctl_set_unalign do_prctl_inval1
6462 #endif
6463 #ifndef do_prctl_sme_get_vl
6464 #define do_prctl_sme_get_vl do_prctl_inval0
6465 #endif
6466 #ifndef do_prctl_sme_set_vl
6467 #define do_prctl_sme_set_vl do_prctl_inval1
6468 #endif
6469 
6470 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6471                          abi_long arg3, abi_long arg4, abi_long arg5)
6472 {
6473     abi_long ret;
6474 
6475     switch (option) {
6476     case PR_GET_PDEATHSIG:
6477         {
6478             int deathsig;
6479             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6480                                   arg3, arg4, arg5));
6481             if (!is_error(ret) &&
6482                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6483                 return -TARGET_EFAULT;
6484             }
6485             return ret;
6486         }
6487     case PR_SET_PDEATHSIG:
6488         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6489                                arg3, arg4, arg5));
6490     case PR_GET_NAME:
6491         {
6492             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6493             if (!name) {
6494                 return -TARGET_EFAULT;
6495             }
6496             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6497                                   arg3, arg4, arg5));
6498             unlock_user(name, arg2, 16);
6499             return ret;
6500         }
6501     case PR_SET_NAME:
6502         {
6503             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6504             if (!name) {
6505                 return -TARGET_EFAULT;
6506             }
6507             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6508                                   arg3, arg4, arg5));
6509             unlock_user(name, arg2, 0);
6510             return ret;
6511         }
6512     case PR_GET_FP_MODE:
6513         return do_prctl_get_fp_mode(env);
6514     case PR_SET_FP_MODE:
6515         return do_prctl_set_fp_mode(env, arg2);
6516     case PR_SVE_GET_VL:
6517         return do_prctl_sve_get_vl(env);
6518     case PR_SVE_SET_VL:
6519         return do_prctl_sve_set_vl(env, arg2);
6520     case PR_SME_GET_VL:
6521         return do_prctl_sme_get_vl(env);
6522     case PR_SME_SET_VL:
6523         return do_prctl_sme_set_vl(env, arg2);
6524     case PR_PAC_RESET_KEYS:
6525         if (arg3 || arg4 || arg5) {
6526             return -TARGET_EINVAL;
6527         }
6528         return do_prctl_reset_keys(env, arg2);
6529     case PR_SET_TAGGED_ADDR_CTRL:
6530         if (arg3 || arg4 || arg5) {
6531             return -TARGET_EINVAL;
6532         }
6533         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6534     case PR_GET_TAGGED_ADDR_CTRL:
6535         if (arg2 || arg3 || arg4 || arg5) {
6536             return -TARGET_EINVAL;
6537         }
6538         return do_prctl_get_tagged_addr_ctrl(env);
6539 
6540     case PR_GET_UNALIGN:
6541         return do_prctl_get_unalign(env, arg2);
6542     case PR_SET_UNALIGN:
6543         return do_prctl_set_unalign(env, arg2);
6544 
6545     case PR_CAP_AMBIENT:
6546     case PR_CAPBSET_READ:
6547     case PR_CAPBSET_DROP:
6548     case PR_GET_DUMPABLE:
6549     case PR_SET_DUMPABLE:
6550     case PR_GET_KEEPCAPS:
6551     case PR_SET_KEEPCAPS:
6552     case PR_GET_SECUREBITS:
6553     case PR_SET_SECUREBITS:
6554     case PR_GET_TIMING:
6555     case PR_SET_TIMING:
6556     case PR_GET_TIMERSLACK:
6557     case PR_SET_TIMERSLACK:
6558     case PR_MCE_KILL:
6559     case PR_MCE_KILL_GET:
6560     case PR_GET_NO_NEW_PRIVS:
6561     case PR_SET_NO_NEW_PRIVS:
6562     case PR_GET_IO_FLUSHER:
6563     case PR_SET_IO_FLUSHER:
6564         /* Some prctl options have no pointer arguments and we can pass on. */
6565         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6566 
6567     case PR_GET_CHILD_SUBREAPER:
6568     case PR_SET_CHILD_SUBREAPER:
6569     case PR_GET_SPECULATION_CTRL:
6570     case PR_SET_SPECULATION_CTRL:
6571     case PR_GET_TID_ADDRESS:
6572         /* TODO */
6573         return -TARGET_EINVAL;
6574 
6575     case PR_GET_FPEXC:
6576     case PR_SET_FPEXC:
6577         /* Was used for SPE on PowerPC. */
6578         return -TARGET_EINVAL;
6579 
6580     case PR_GET_ENDIAN:
6581     case PR_SET_ENDIAN:
6582     case PR_GET_FPEMU:
6583     case PR_SET_FPEMU:
6584     case PR_SET_MM:
6585     case PR_GET_SECCOMP:
6586     case PR_SET_SECCOMP:
6587     case PR_SET_SYSCALL_USER_DISPATCH:
6588     case PR_GET_THP_DISABLE:
6589     case PR_SET_THP_DISABLE:
6590     case PR_GET_TSC:
6591     case PR_SET_TSC:
6592         /* Disable to prevent the target disabling stuff we need. */
6593         return -TARGET_EINVAL;
6594 
6595     default:
6596         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6597                       option);
6598         return -TARGET_EINVAL;
6599     }
6600 }
6601 
6602 #define NEW_STACK_SIZE 0x40000
6603 
6604 
6605 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6606 typedef struct {
6607     CPUArchState *env;
6608     pthread_mutex_t mutex;
6609     pthread_cond_t cond;
6610     pthread_t thread;
6611     uint32_t tid;
6612     abi_ulong child_tidptr;
6613     abi_ulong parent_tidptr;
6614     sigset_t sigmask;
6615 } new_thread_info;
6616 
6617 static void *clone_func(void *arg)
6618 {
6619     new_thread_info *info = arg;
6620     CPUArchState *env;
6621     CPUState *cpu;
6622     TaskState *ts;
6623 
6624     rcu_register_thread();
6625     tcg_register_thread();
6626     env = info->env;
6627     cpu = env_cpu(env);
6628     thread_cpu = cpu;
6629     ts = (TaskState *)cpu->opaque;
6630     info->tid = sys_gettid();
6631     task_settid(ts);
6632     if (info->child_tidptr)
6633         put_user_u32(info->tid, info->child_tidptr);
6634     if (info->parent_tidptr)
6635         put_user_u32(info->tid, info->parent_tidptr);
6636     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6637     /* Enable signals.  */
6638     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6639     /* Signal to the parent that we're ready.  */
6640     pthread_mutex_lock(&info->mutex);
6641     pthread_cond_broadcast(&info->cond);
6642     pthread_mutex_unlock(&info->mutex);
6643     /* Wait until the parent has finished initializing the tls state.  */
6644     pthread_mutex_lock(&clone_lock);
6645     pthread_mutex_unlock(&clone_lock);
6646     cpu_loop(env);
6647     /* never exits */
6648     return NULL;
6649 }
6650 
6651 /* do_fork() Must return host values and target errnos (unlike most
6652    do_*() functions). */
6653 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6654                    abi_ulong parent_tidptr, target_ulong newtls,
6655                    abi_ulong child_tidptr)
6656 {
6657     CPUState *cpu = env_cpu(env);
6658     int ret;
6659     TaskState *ts;
6660     CPUState *new_cpu;
6661     CPUArchState *new_env;
6662     sigset_t sigmask;
6663 
6664     flags &= ~CLONE_IGNORED_FLAGS;
6665 
6666     /* Emulate vfork() with fork() */
6667     if (flags & CLONE_VFORK)
6668         flags &= ~(CLONE_VFORK | CLONE_VM);
6669 
6670     if (flags & CLONE_VM) {
6671         TaskState *parent_ts = (TaskState *)cpu->opaque;
6672         new_thread_info info;
6673         pthread_attr_t attr;
6674 
6675         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6676             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6677             return -TARGET_EINVAL;
6678         }
6679 
6680         ts = g_new0(TaskState, 1);
6681         init_task_state(ts);
6682 
6683         /* Grab a mutex so that thread setup appears atomic.  */
6684         pthread_mutex_lock(&clone_lock);
6685 
6686         /*
6687          * If this is our first additional thread, we need to ensure we
6688          * generate code for parallel execution and flush old translations.
6689          * Do this now so that the copy gets CF_PARALLEL too.
6690          */
6691         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6692             cpu->tcg_cflags |= CF_PARALLEL;
6693             tb_flush(cpu);
6694         }
6695 
6696         /* we create a new CPU instance. */
6697         new_env = cpu_copy(env);
6698         /* Init regs that differ from the parent.  */
6699         cpu_clone_regs_child(new_env, newsp, flags);
6700         cpu_clone_regs_parent(env, flags);
6701         new_cpu = env_cpu(new_env);
6702         new_cpu->opaque = ts;
6703         ts->bprm = parent_ts->bprm;
6704         ts->info = parent_ts->info;
6705         ts->signal_mask = parent_ts->signal_mask;
6706 
6707         if (flags & CLONE_CHILD_CLEARTID) {
6708             ts->child_tidptr = child_tidptr;
6709         }
6710 
6711         if (flags & CLONE_SETTLS) {
6712             cpu_set_tls (new_env, newtls);
6713         }
6714 
6715         memset(&info, 0, sizeof(info));
6716         pthread_mutex_init(&info.mutex, NULL);
6717         pthread_mutex_lock(&info.mutex);
6718         pthread_cond_init(&info.cond, NULL);
6719         info.env = new_env;
6720         if (flags & CLONE_CHILD_SETTID) {
6721             info.child_tidptr = child_tidptr;
6722         }
6723         if (flags & CLONE_PARENT_SETTID) {
6724             info.parent_tidptr = parent_tidptr;
6725         }
6726 
6727         ret = pthread_attr_init(&attr);
6728         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6729         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6730         /* It is not safe to deliver signals until the child has finished
6731            initializing, so temporarily block all signals.  */
6732         sigfillset(&sigmask);
6733         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6734         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6735 
6736         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6737         /* TODO: Free new CPU state if thread creation failed.  */
6738 
6739         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6740         pthread_attr_destroy(&attr);
6741         if (ret == 0) {
6742             /* Wait for the child to initialize.  */
6743             pthread_cond_wait(&info.cond, &info.mutex);
6744             ret = info.tid;
6745         } else {
6746             ret = -1;
6747         }
6748         pthread_mutex_unlock(&info.mutex);
6749         pthread_cond_destroy(&info.cond);
6750         pthread_mutex_destroy(&info.mutex);
6751         pthread_mutex_unlock(&clone_lock);
6752     } else {
6753         /* if no CLONE_VM, we consider it is a fork */
6754         if (flags & CLONE_INVALID_FORK_FLAGS) {
6755             return -TARGET_EINVAL;
6756         }
6757 
6758         /* We can't support custom termination signals */
6759         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6760             return -TARGET_EINVAL;
6761         }
6762 
6763 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6764         if (flags & CLONE_PIDFD) {
6765             return -TARGET_EINVAL;
6766         }
6767 #endif
6768 
6769         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6770         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6771             return -TARGET_EINVAL;
6772         }
6773 
6774         if (block_signals()) {
6775             return -QEMU_ERESTARTSYS;
6776         }
6777 
6778         fork_start();
6779         ret = fork();
6780         if (ret == 0) {
6781             /* Child Process.  */
6782             cpu_clone_regs_child(env, newsp, flags);
6783             fork_end(1);
6784             /* There is a race condition here.  The parent process could
6785                theoretically read the TID in the child process before the child
6786                tid is set.  This would require using either ptrace
6787                (not implemented) or having *_tidptr to point at a shared memory
6788                mapping.  We can't repeat the spinlock hack used above because
6789                the child process gets its own copy of the lock.  */
6790             if (flags & CLONE_CHILD_SETTID)
6791                 put_user_u32(sys_gettid(), child_tidptr);
6792             if (flags & CLONE_PARENT_SETTID)
6793                 put_user_u32(sys_gettid(), parent_tidptr);
6794             ts = (TaskState *)cpu->opaque;
6795             if (flags & CLONE_SETTLS)
6796                 cpu_set_tls (env, newtls);
6797             if (flags & CLONE_CHILD_CLEARTID)
6798                 ts->child_tidptr = child_tidptr;
6799         } else {
6800             cpu_clone_regs_parent(env, flags);
6801             if (flags & CLONE_PIDFD) {
6802                 int pid_fd = 0;
6803 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6804                 int pid_child = ret;
6805                 pid_fd = pidfd_open(pid_child, 0);
6806                 if (pid_fd >= 0) {
6807                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6808                                                | FD_CLOEXEC);
6809                 } else {
6810                         pid_fd = 0;
6811                 }
6812 #endif
6813                 put_user_u32(pid_fd, parent_tidptr);
6814                 }
6815             fork_end(0);
6816         }
6817         g_assert(!cpu_in_exclusive_context(cpu));
6818     }
6819     return ret;
6820 }
6821 
6822 /* warning : doesn't handle linux specific flags... */
6823 static int target_to_host_fcntl_cmd(int cmd)
6824 {
6825     int ret;
6826 
6827     switch(cmd) {
6828     case TARGET_F_DUPFD:
6829     case TARGET_F_GETFD:
6830     case TARGET_F_SETFD:
6831     case TARGET_F_GETFL:
6832     case TARGET_F_SETFL:
6833     case TARGET_F_OFD_GETLK:
6834     case TARGET_F_OFD_SETLK:
6835     case TARGET_F_OFD_SETLKW:
6836         ret = cmd;
6837         break;
6838     case TARGET_F_GETLK:
6839         ret = F_GETLK64;
6840         break;
6841     case TARGET_F_SETLK:
6842         ret = F_SETLK64;
6843         break;
6844     case TARGET_F_SETLKW:
6845         ret = F_SETLKW64;
6846         break;
6847     case TARGET_F_GETOWN:
6848         ret = F_GETOWN;
6849         break;
6850     case TARGET_F_SETOWN:
6851         ret = F_SETOWN;
6852         break;
6853     case TARGET_F_GETSIG:
6854         ret = F_GETSIG;
6855         break;
6856     case TARGET_F_SETSIG:
6857         ret = F_SETSIG;
6858         break;
6859 #if TARGET_ABI_BITS == 32
6860     case TARGET_F_GETLK64:
6861         ret = F_GETLK64;
6862         break;
6863     case TARGET_F_SETLK64:
6864         ret = F_SETLK64;
6865         break;
6866     case TARGET_F_SETLKW64:
6867         ret = F_SETLKW64;
6868         break;
6869 #endif
6870     case TARGET_F_SETLEASE:
6871         ret = F_SETLEASE;
6872         break;
6873     case TARGET_F_GETLEASE:
6874         ret = F_GETLEASE;
6875         break;
6876 #ifdef F_DUPFD_CLOEXEC
6877     case TARGET_F_DUPFD_CLOEXEC:
6878         ret = F_DUPFD_CLOEXEC;
6879         break;
6880 #endif
6881     case TARGET_F_NOTIFY:
6882         ret = F_NOTIFY;
6883         break;
6884 #ifdef F_GETOWN_EX
6885     case TARGET_F_GETOWN_EX:
6886         ret = F_GETOWN_EX;
6887         break;
6888 #endif
6889 #ifdef F_SETOWN_EX
6890     case TARGET_F_SETOWN_EX:
6891         ret = F_SETOWN_EX;
6892         break;
6893 #endif
6894 #ifdef F_SETPIPE_SZ
6895     case TARGET_F_SETPIPE_SZ:
6896         ret = F_SETPIPE_SZ;
6897         break;
6898     case TARGET_F_GETPIPE_SZ:
6899         ret = F_GETPIPE_SZ;
6900         break;
6901 #endif
6902 #ifdef F_ADD_SEALS
6903     case TARGET_F_ADD_SEALS:
6904         ret = F_ADD_SEALS;
6905         break;
6906     case TARGET_F_GET_SEALS:
6907         ret = F_GET_SEALS;
6908         break;
6909 #endif
6910     default:
6911         ret = -TARGET_EINVAL;
6912         break;
6913     }
6914 
6915 #if defined(__powerpc64__)
6916     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6917      * is not supported by kernel. The glibc fcntl call actually adjusts
6918      * them to 5, 6 and 7 before making the syscall(). Since we make the
6919      * syscall directly, adjust to what is supported by the kernel.
6920      */
6921     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6922         ret -= F_GETLK64 - 5;
6923     }
6924 #endif
6925 
6926     return ret;
6927 }
6928 
6929 #define FLOCK_TRANSTBL \
6930     switch (type) { \
6931     TRANSTBL_CONVERT(F_RDLCK); \
6932     TRANSTBL_CONVERT(F_WRLCK); \
6933     TRANSTBL_CONVERT(F_UNLCK); \
6934     }
6935 
6936 static int target_to_host_flock(int type)
6937 {
6938 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6939     FLOCK_TRANSTBL
6940 #undef  TRANSTBL_CONVERT
6941     return -TARGET_EINVAL;
6942 }
6943 
6944 static int host_to_target_flock(int type)
6945 {
6946 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6947     FLOCK_TRANSTBL
6948 #undef  TRANSTBL_CONVERT
6949     /* if we don't know how to convert the value coming
6950      * from the host we copy to the target field as-is
6951      */
6952     return type;
6953 }
6954 
6955 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6956                                             abi_ulong target_flock_addr)
6957 {
6958     struct target_flock *target_fl;
6959     int l_type;
6960 
6961     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6962         return -TARGET_EFAULT;
6963     }
6964 
6965     __get_user(l_type, &target_fl->l_type);
6966     l_type = target_to_host_flock(l_type);
6967     if (l_type < 0) {
6968         return l_type;
6969     }
6970     fl->l_type = l_type;
6971     __get_user(fl->l_whence, &target_fl->l_whence);
6972     __get_user(fl->l_start, &target_fl->l_start);
6973     __get_user(fl->l_len, &target_fl->l_len);
6974     __get_user(fl->l_pid, &target_fl->l_pid);
6975     unlock_user_struct(target_fl, target_flock_addr, 0);
6976     return 0;
6977 }
6978 
6979 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6980                                           const struct flock64 *fl)
6981 {
6982     struct target_flock *target_fl;
6983     short l_type;
6984 
6985     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6986         return -TARGET_EFAULT;
6987     }
6988 
6989     l_type = host_to_target_flock(fl->l_type);
6990     __put_user(l_type, &target_fl->l_type);
6991     __put_user(fl->l_whence, &target_fl->l_whence);
6992     __put_user(fl->l_start, &target_fl->l_start);
6993     __put_user(fl->l_len, &target_fl->l_len);
6994     __put_user(fl->l_pid, &target_fl->l_pid);
6995     unlock_user_struct(target_fl, target_flock_addr, 1);
6996     return 0;
6997 }
6998 
6999 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
7000 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
7001 
7002 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
7003 struct target_oabi_flock64 {
7004     abi_short l_type;
7005     abi_short l_whence;
7006     abi_llong l_start;
7007     abi_llong l_len;
7008     abi_int   l_pid;
7009 } QEMU_PACKED;
7010 
7011 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
7012                                                    abi_ulong target_flock_addr)
7013 {
7014     struct target_oabi_flock64 *target_fl;
7015     int l_type;
7016 
7017     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7018         return -TARGET_EFAULT;
7019     }
7020 
7021     __get_user(l_type, &target_fl->l_type);
7022     l_type = target_to_host_flock(l_type);
7023     if (l_type < 0) {
7024         return l_type;
7025     }
7026     fl->l_type = l_type;
7027     __get_user(fl->l_whence, &target_fl->l_whence);
7028     __get_user(fl->l_start, &target_fl->l_start);
7029     __get_user(fl->l_len, &target_fl->l_len);
7030     __get_user(fl->l_pid, &target_fl->l_pid);
7031     unlock_user_struct(target_fl, target_flock_addr, 0);
7032     return 0;
7033 }
7034 
7035 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7036                                                  const struct flock64 *fl)
7037 {
7038     struct target_oabi_flock64 *target_fl;
7039     short l_type;
7040 
7041     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7042         return -TARGET_EFAULT;
7043     }
7044 
7045     l_type = host_to_target_flock(fl->l_type);
7046     __put_user(l_type, &target_fl->l_type);
7047     __put_user(fl->l_whence, &target_fl->l_whence);
7048     __put_user(fl->l_start, &target_fl->l_start);
7049     __put_user(fl->l_len, &target_fl->l_len);
7050     __put_user(fl->l_pid, &target_fl->l_pid);
7051     unlock_user_struct(target_fl, target_flock_addr, 1);
7052     return 0;
7053 }
7054 #endif
7055 
7056 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7057                                               abi_ulong target_flock_addr)
7058 {
7059     struct target_flock64 *target_fl;
7060     int l_type;
7061 
7062     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7063         return -TARGET_EFAULT;
7064     }
7065 
7066     __get_user(l_type, &target_fl->l_type);
7067     l_type = target_to_host_flock(l_type);
7068     if (l_type < 0) {
7069         return l_type;
7070     }
7071     fl->l_type = l_type;
7072     __get_user(fl->l_whence, &target_fl->l_whence);
7073     __get_user(fl->l_start, &target_fl->l_start);
7074     __get_user(fl->l_len, &target_fl->l_len);
7075     __get_user(fl->l_pid, &target_fl->l_pid);
7076     unlock_user_struct(target_fl, target_flock_addr, 0);
7077     return 0;
7078 }
7079 
7080 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7081                                             const struct flock64 *fl)
7082 {
7083     struct target_flock64 *target_fl;
7084     short l_type;
7085 
7086     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7087         return -TARGET_EFAULT;
7088     }
7089 
7090     l_type = host_to_target_flock(fl->l_type);
7091     __put_user(l_type, &target_fl->l_type);
7092     __put_user(fl->l_whence, &target_fl->l_whence);
7093     __put_user(fl->l_start, &target_fl->l_start);
7094     __put_user(fl->l_len, &target_fl->l_len);
7095     __put_user(fl->l_pid, &target_fl->l_pid);
7096     unlock_user_struct(target_fl, target_flock_addr, 1);
7097     return 0;
7098 }
7099 
7100 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7101 {
7102     struct flock64 fl64;
7103 #ifdef F_GETOWN_EX
7104     struct f_owner_ex fox;
7105     struct target_f_owner_ex *target_fox;
7106 #endif
7107     abi_long ret;
7108     int host_cmd = target_to_host_fcntl_cmd(cmd);
7109 
7110     if (host_cmd == -TARGET_EINVAL)
7111 	    return host_cmd;
7112 
7113     switch(cmd) {
7114     case TARGET_F_GETLK:
7115         ret = copy_from_user_flock(&fl64, arg);
7116         if (ret) {
7117             return ret;
7118         }
7119         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7120         if (ret == 0) {
7121             ret = copy_to_user_flock(arg, &fl64);
7122         }
7123         break;
7124 
7125     case TARGET_F_SETLK:
7126     case TARGET_F_SETLKW:
7127         ret = copy_from_user_flock(&fl64, arg);
7128         if (ret) {
7129             return ret;
7130         }
7131         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7132         break;
7133 
7134     case TARGET_F_GETLK64:
7135     case TARGET_F_OFD_GETLK:
7136         ret = copy_from_user_flock64(&fl64, arg);
7137         if (ret) {
7138             return ret;
7139         }
7140         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7141         if (ret == 0) {
7142             ret = copy_to_user_flock64(arg, &fl64);
7143         }
7144         break;
7145     case TARGET_F_SETLK64:
7146     case TARGET_F_SETLKW64:
7147     case TARGET_F_OFD_SETLK:
7148     case TARGET_F_OFD_SETLKW:
7149         ret = copy_from_user_flock64(&fl64, arg);
7150         if (ret) {
7151             return ret;
7152         }
7153         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7154         break;
7155 
7156     case TARGET_F_GETFL:
7157         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7158         if (ret >= 0) {
7159             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7160             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7161             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7162                 ret |= TARGET_O_LARGEFILE;
7163             }
7164         }
7165         break;
7166 
7167     case TARGET_F_SETFL:
7168         ret = get_errno(safe_fcntl(fd, host_cmd,
7169                                    target_to_host_bitmask(arg,
7170                                                           fcntl_flags_tbl)));
7171         break;
7172 
7173 #ifdef F_GETOWN_EX
7174     case TARGET_F_GETOWN_EX:
7175         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7176         if (ret >= 0) {
7177             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7178                 return -TARGET_EFAULT;
7179             target_fox->type = tswap32(fox.type);
7180             target_fox->pid = tswap32(fox.pid);
7181             unlock_user_struct(target_fox, arg, 1);
7182         }
7183         break;
7184 #endif
7185 
7186 #ifdef F_SETOWN_EX
7187     case TARGET_F_SETOWN_EX:
7188         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7189             return -TARGET_EFAULT;
7190         fox.type = tswap32(target_fox->type);
7191         fox.pid = tswap32(target_fox->pid);
7192         unlock_user_struct(target_fox, arg, 0);
7193         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7194         break;
7195 #endif
7196 
7197     case TARGET_F_SETSIG:
7198         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7199         break;
7200 
7201     case TARGET_F_GETSIG:
7202         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7203         break;
7204 
7205     case TARGET_F_SETOWN:
7206     case TARGET_F_GETOWN:
7207     case TARGET_F_SETLEASE:
7208     case TARGET_F_GETLEASE:
7209     case TARGET_F_SETPIPE_SZ:
7210     case TARGET_F_GETPIPE_SZ:
7211     case TARGET_F_ADD_SEALS:
7212     case TARGET_F_GET_SEALS:
7213         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7214         break;
7215 
7216     default:
7217         ret = get_errno(safe_fcntl(fd, cmd, arg));
7218         break;
7219     }
7220     return ret;
7221 }
7222 
7223 #ifdef USE_UID16
7224 
7225 static inline int high2lowuid(int uid)
7226 {
7227     if (uid > 65535)
7228         return 65534;
7229     else
7230         return uid;
7231 }
7232 
7233 static inline int high2lowgid(int gid)
7234 {
7235     if (gid > 65535)
7236         return 65534;
7237     else
7238         return gid;
7239 }
7240 
7241 static inline int low2highuid(int uid)
7242 {
7243     if ((int16_t)uid == -1)
7244         return -1;
7245     else
7246         return uid;
7247 }
7248 
7249 static inline int low2highgid(int gid)
7250 {
7251     if ((int16_t)gid == -1)
7252         return -1;
7253     else
7254         return gid;
7255 }
7256 static inline int tswapid(int id)
7257 {
7258     return tswap16(id);
7259 }
7260 
7261 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7262 
7263 #else /* !USE_UID16 */
7264 static inline int high2lowuid(int uid)
7265 {
7266     return uid;
7267 }
7268 static inline int high2lowgid(int gid)
7269 {
7270     return gid;
7271 }
7272 static inline int low2highuid(int uid)
7273 {
7274     return uid;
7275 }
7276 static inline int low2highgid(int gid)
7277 {
7278     return gid;
7279 }
7280 static inline int tswapid(int id)
7281 {
7282     return tswap32(id);
7283 }
7284 
7285 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7286 
7287 #endif /* USE_UID16 */
7288 
7289 /* We must do direct syscalls for setting UID/GID, because we want to
7290  * implement the Linux system call semantics of "change only for this thread",
7291  * not the libc/POSIX semantics of "change for all threads in process".
7292  * (See http://ewontfix.com/17/ for more details.)
7293  * We use the 32-bit version of the syscalls if present; if it is not
7294  * then either the host architecture supports 32-bit UIDs natively with
7295  * the standard syscall, or the 16-bit UID is the best we can do.
7296  */
7297 #ifdef __NR_setuid32
7298 #define __NR_sys_setuid __NR_setuid32
7299 #else
7300 #define __NR_sys_setuid __NR_setuid
7301 #endif
7302 #ifdef __NR_setgid32
7303 #define __NR_sys_setgid __NR_setgid32
7304 #else
7305 #define __NR_sys_setgid __NR_setgid
7306 #endif
7307 #ifdef __NR_setresuid32
7308 #define __NR_sys_setresuid __NR_setresuid32
7309 #else
7310 #define __NR_sys_setresuid __NR_setresuid
7311 #endif
7312 #ifdef __NR_setresgid32
7313 #define __NR_sys_setresgid __NR_setresgid32
7314 #else
7315 #define __NR_sys_setresgid __NR_setresgid
7316 #endif
7317 
7318 _syscall1(int, sys_setuid, uid_t, uid)
7319 _syscall1(int, sys_setgid, gid_t, gid)
7320 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7321 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7322 
7323 void syscall_init(void)
7324 {
7325     IOCTLEntry *ie;
7326     const argtype *arg_type;
7327     int size;
7328 
7329     thunk_init(STRUCT_MAX);
7330 
7331 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7332 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7333 #include "syscall_types.h"
7334 #undef STRUCT
7335 #undef STRUCT_SPECIAL
7336 
7337     /* we patch the ioctl size if necessary. We rely on the fact that
7338        no ioctl has all the bits at '1' in the size field */
7339     ie = ioctl_entries;
7340     while (ie->target_cmd != 0) {
7341         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7342             TARGET_IOC_SIZEMASK) {
7343             arg_type = ie->arg_type;
7344             if (arg_type[0] != TYPE_PTR) {
7345                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7346                         ie->target_cmd);
7347                 exit(1);
7348             }
7349             arg_type++;
7350             size = thunk_type_size(arg_type, 0);
7351             ie->target_cmd = (ie->target_cmd &
7352                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7353                 (size << TARGET_IOC_SIZESHIFT);
7354         }
7355 
7356         /* automatic consistency check if same arch */
7357 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7358     (defined(__x86_64__) && defined(TARGET_X86_64))
7359         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7360             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7361                     ie->name, ie->target_cmd, ie->host_cmd);
7362         }
7363 #endif
7364         ie++;
7365     }
7366 }
7367 
7368 #ifdef TARGET_NR_truncate64
7369 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7370                                          abi_long arg2,
7371                                          abi_long arg3,
7372                                          abi_long arg4)
7373 {
7374     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7375         arg2 = arg3;
7376         arg3 = arg4;
7377     }
7378     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7379 }
7380 #endif
7381 
7382 #ifdef TARGET_NR_ftruncate64
7383 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7384                                           abi_long arg2,
7385                                           abi_long arg3,
7386                                           abi_long arg4)
7387 {
7388     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7389         arg2 = arg3;
7390         arg3 = arg4;
7391     }
7392     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7393 }
7394 #endif
7395 
7396 #if defined(TARGET_NR_timer_settime) || \
7397     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7398 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7399                                                  abi_ulong target_addr)
7400 {
7401     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7402                                 offsetof(struct target_itimerspec,
7403                                          it_interval)) ||
7404         target_to_host_timespec(&host_its->it_value, target_addr +
7405                                 offsetof(struct target_itimerspec,
7406                                          it_value))) {
7407         return -TARGET_EFAULT;
7408     }
7409 
7410     return 0;
7411 }
7412 #endif
7413 
7414 #if defined(TARGET_NR_timer_settime64) || \
7415     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7416 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7417                                                    abi_ulong target_addr)
7418 {
7419     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7420                                   offsetof(struct target__kernel_itimerspec,
7421                                            it_interval)) ||
7422         target_to_host_timespec64(&host_its->it_value, target_addr +
7423                                   offsetof(struct target__kernel_itimerspec,
7424                                            it_value))) {
7425         return -TARGET_EFAULT;
7426     }
7427 
7428     return 0;
7429 }
7430 #endif
7431 
7432 #if ((defined(TARGET_NR_timerfd_gettime) || \
7433       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7434       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7435 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7436                                                  struct itimerspec *host_its)
7437 {
7438     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7439                                                        it_interval),
7440                                 &host_its->it_interval) ||
7441         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7442                                                        it_value),
7443                                 &host_its->it_value)) {
7444         return -TARGET_EFAULT;
7445     }
7446     return 0;
7447 }
7448 #endif
7449 
7450 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7451       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7452       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7453 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7454                                                    struct itimerspec *host_its)
7455 {
7456     if (host_to_target_timespec64(target_addr +
7457                                   offsetof(struct target__kernel_itimerspec,
7458                                            it_interval),
7459                                   &host_its->it_interval) ||
7460         host_to_target_timespec64(target_addr +
7461                                   offsetof(struct target__kernel_itimerspec,
7462                                            it_value),
7463                                   &host_its->it_value)) {
7464         return -TARGET_EFAULT;
7465     }
7466     return 0;
7467 }
7468 #endif
7469 
7470 #if defined(TARGET_NR_adjtimex) || \
7471     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7472 static inline abi_long target_to_host_timex(struct timex *host_tx,
7473                                             abi_long target_addr)
7474 {
7475     struct target_timex *target_tx;
7476 
7477     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7478         return -TARGET_EFAULT;
7479     }
7480 
7481     __get_user(host_tx->modes, &target_tx->modes);
7482     __get_user(host_tx->offset, &target_tx->offset);
7483     __get_user(host_tx->freq, &target_tx->freq);
7484     __get_user(host_tx->maxerror, &target_tx->maxerror);
7485     __get_user(host_tx->esterror, &target_tx->esterror);
7486     __get_user(host_tx->status, &target_tx->status);
7487     __get_user(host_tx->constant, &target_tx->constant);
7488     __get_user(host_tx->precision, &target_tx->precision);
7489     __get_user(host_tx->tolerance, &target_tx->tolerance);
7490     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7491     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7492     __get_user(host_tx->tick, &target_tx->tick);
7493     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7494     __get_user(host_tx->jitter, &target_tx->jitter);
7495     __get_user(host_tx->shift, &target_tx->shift);
7496     __get_user(host_tx->stabil, &target_tx->stabil);
7497     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7498     __get_user(host_tx->calcnt, &target_tx->calcnt);
7499     __get_user(host_tx->errcnt, &target_tx->errcnt);
7500     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7501     __get_user(host_tx->tai, &target_tx->tai);
7502 
7503     unlock_user_struct(target_tx, target_addr, 0);
7504     return 0;
7505 }
7506 
7507 static inline abi_long host_to_target_timex(abi_long target_addr,
7508                                             struct timex *host_tx)
7509 {
7510     struct target_timex *target_tx;
7511 
7512     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7513         return -TARGET_EFAULT;
7514     }
7515 
7516     __put_user(host_tx->modes, &target_tx->modes);
7517     __put_user(host_tx->offset, &target_tx->offset);
7518     __put_user(host_tx->freq, &target_tx->freq);
7519     __put_user(host_tx->maxerror, &target_tx->maxerror);
7520     __put_user(host_tx->esterror, &target_tx->esterror);
7521     __put_user(host_tx->status, &target_tx->status);
7522     __put_user(host_tx->constant, &target_tx->constant);
7523     __put_user(host_tx->precision, &target_tx->precision);
7524     __put_user(host_tx->tolerance, &target_tx->tolerance);
7525     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7526     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7527     __put_user(host_tx->tick, &target_tx->tick);
7528     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7529     __put_user(host_tx->jitter, &target_tx->jitter);
7530     __put_user(host_tx->shift, &target_tx->shift);
7531     __put_user(host_tx->stabil, &target_tx->stabil);
7532     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7533     __put_user(host_tx->calcnt, &target_tx->calcnt);
7534     __put_user(host_tx->errcnt, &target_tx->errcnt);
7535     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7536     __put_user(host_tx->tai, &target_tx->tai);
7537 
7538     unlock_user_struct(target_tx, target_addr, 1);
7539     return 0;
7540 }
7541 #endif
7542 
7543 
7544 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7545 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7546                                               abi_long target_addr)
7547 {
7548     struct target__kernel_timex *target_tx;
7549 
7550     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7551                                  offsetof(struct target__kernel_timex,
7552                                           time))) {
7553         return -TARGET_EFAULT;
7554     }
7555 
7556     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7557         return -TARGET_EFAULT;
7558     }
7559 
7560     __get_user(host_tx->modes, &target_tx->modes);
7561     __get_user(host_tx->offset, &target_tx->offset);
7562     __get_user(host_tx->freq, &target_tx->freq);
7563     __get_user(host_tx->maxerror, &target_tx->maxerror);
7564     __get_user(host_tx->esterror, &target_tx->esterror);
7565     __get_user(host_tx->status, &target_tx->status);
7566     __get_user(host_tx->constant, &target_tx->constant);
7567     __get_user(host_tx->precision, &target_tx->precision);
7568     __get_user(host_tx->tolerance, &target_tx->tolerance);
7569     __get_user(host_tx->tick, &target_tx->tick);
7570     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7571     __get_user(host_tx->jitter, &target_tx->jitter);
7572     __get_user(host_tx->shift, &target_tx->shift);
7573     __get_user(host_tx->stabil, &target_tx->stabil);
7574     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7575     __get_user(host_tx->calcnt, &target_tx->calcnt);
7576     __get_user(host_tx->errcnt, &target_tx->errcnt);
7577     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7578     __get_user(host_tx->tai, &target_tx->tai);
7579 
7580     unlock_user_struct(target_tx, target_addr, 0);
7581     return 0;
7582 }
7583 
7584 static inline abi_long host_to_target_timex64(abi_long target_addr,
7585                                               struct timex *host_tx)
7586 {
7587     struct target__kernel_timex *target_tx;
7588 
7589    if (copy_to_user_timeval64(target_addr +
7590                               offsetof(struct target__kernel_timex, time),
7591                               &host_tx->time)) {
7592         return -TARGET_EFAULT;
7593     }
7594 
7595     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7596         return -TARGET_EFAULT;
7597     }
7598 
7599     __put_user(host_tx->modes, &target_tx->modes);
7600     __put_user(host_tx->offset, &target_tx->offset);
7601     __put_user(host_tx->freq, &target_tx->freq);
7602     __put_user(host_tx->maxerror, &target_tx->maxerror);
7603     __put_user(host_tx->esterror, &target_tx->esterror);
7604     __put_user(host_tx->status, &target_tx->status);
7605     __put_user(host_tx->constant, &target_tx->constant);
7606     __put_user(host_tx->precision, &target_tx->precision);
7607     __put_user(host_tx->tolerance, &target_tx->tolerance);
7608     __put_user(host_tx->tick, &target_tx->tick);
7609     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7610     __put_user(host_tx->jitter, &target_tx->jitter);
7611     __put_user(host_tx->shift, &target_tx->shift);
7612     __put_user(host_tx->stabil, &target_tx->stabil);
7613     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7614     __put_user(host_tx->calcnt, &target_tx->calcnt);
7615     __put_user(host_tx->errcnt, &target_tx->errcnt);
7616     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7617     __put_user(host_tx->tai, &target_tx->tai);
7618 
7619     unlock_user_struct(target_tx, target_addr, 1);
7620     return 0;
7621 }
7622 #endif
7623 
7624 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7625 #define sigev_notify_thread_id _sigev_un._tid
7626 #endif
7627 
7628 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7629                                                abi_ulong target_addr)
7630 {
7631     struct target_sigevent *target_sevp;
7632 
7633     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7634         return -TARGET_EFAULT;
7635     }
7636 
7637     /* This union is awkward on 64 bit systems because it has a 32 bit
7638      * integer and a pointer in it; we follow the conversion approach
7639      * used for handling sigval types in signal.c so the guest should get
7640      * the correct value back even if we did a 64 bit byteswap and it's
7641      * using the 32 bit integer.
7642      */
7643     host_sevp->sigev_value.sival_ptr =
7644         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7645     host_sevp->sigev_signo =
7646         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7647     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7648     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7649 
7650     unlock_user_struct(target_sevp, target_addr, 1);
7651     return 0;
7652 }
7653 
7654 #if defined(TARGET_NR_mlockall)
7655 static inline int target_to_host_mlockall_arg(int arg)
7656 {
7657     int result = 0;
7658 
7659     if (arg & TARGET_MCL_CURRENT) {
7660         result |= MCL_CURRENT;
7661     }
7662     if (arg & TARGET_MCL_FUTURE) {
7663         result |= MCL_FUTURE;
7664     }
7665 #ifdef MCL_ONFAULT
7666     if (arg & TARGET_MCL_ONFAULT) {
7667         result |= MCL_ONFAULT;
7668     }
7669 #endif
7670 
7671     return result;
7672 }
7673 #endif
7674 
7675 static inline int target_to_host_msync_arg(abi_long arg)
7676 {
7677     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7678            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7679            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7680            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7681 }
7682 
7683 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7684      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7685      defined(TARGET_NR_newfstatat))
7686 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7687                                              abi_ulong target_addr,
7688                                              struct stat *host_st)
7689 {
7690 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7691     if (cpu_env->eabi) {
7692         struct target_eabi_stat64 *target_st;
7693 
7694         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7695             return -TARGET_EFAULT;
7696         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7697         __put_user(host_st->st_dev, &target_st->st_dev);
7698         __put_user(host_st->st_ino, &target_st->st_ino);
7699 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7700         __put_user(host_st->st_ino, &target_st->__st_ino);
7701 #endif
7702         __put_user(host_st->st_mode, &target_st->st_mode);
7703         __put_user(host_st->st_nlink, &target_st->st_nlink);
7704         __put_user(host_st->st_uid, &target_st->st_uid);
7705         __put_user(host_st->st_gid, &target_st->st_gid);
7706         __put_user(host_st->st_rdev, &target_st->st_rdev);
7707         __put_user(host_st->st_size, &target_st->st_size);
7708         __put_user(host_st->st_blksize, &target_st->st_blksize);
7709         __put_user(host_st->st_blocks, &target_st->st_blocks);
7710         __put_user(host_st->st_atime, &target_st->target_st_atime);
7711         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7712         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7713 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7714         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7715         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7716         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7717 #endif
7718         unlock_user_struct(target_st, target_addr, 1);
7719     } else
7720 #endif
7721     {
7722 #if defined(TARGET_HAS_STRUCT_STAT64)
7723         struct target_stat64 *target_st;
7724 #else
7725         struct target_stat *target_st;
7726 #endif
7727 
7728         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7729             return -TARGET_EFAULT;
7730         memset(target_st, 0, sizeof(*target_st));
7731         __put_user(host_st->st_dev, &target_st->st_dev);
7732         __put_user(host_st->st_ino, &target_st->st_ino);
7733 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7734         __put_user(host_st->st_ino, &target_st->__st_ino);
7735 #endif
7736         __put_user(host_st->st_mode, &target_st->st_mode);
7737         __put_user(host_st->st_nlink, &target_st->st_nlink);
7738         __put_user(host_st->st_uid, &target_st->st_uid);
7739         __put_user(host_st->st_gid, &target_st->st_gid);
7740         __put_user(host_st->st_rdev, &target_st->st_rdev);
7741         /* XXX: better use of kernel struct */
7742         __put_user(host_st->st_size, &target_st->st_size);
7743         __put_user(host_st->st_blksize, &target_st->st_blksize);
7744         __put_user(host_st->st_blocks, &target_st->st_blocks);
7745         __put_user(host_st->st_atime, &target_st->target_st_atime);
7746         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7747         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7748 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7749         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7750         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7751         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7752 #endif
7753         unlock_user_struct(target_st, target_addr, 1);
7754     }
7755 
7756     return 0;
7757 }
7758 #endif
7759 
7760 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7761 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7762                                             abi_ulong target_addr)
7763 {
7764     struct target_statx *target_stx;
7765 
7766     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7767         return -TARGET_EFAULT;
7768     }
7769     memset(target_stx, 0, sizeof(*target_stx));
7770 
7771     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7772     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7773     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7774     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7775     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7776     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7777     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7778     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7779     __put_user(host_stx->stx_size, &target_stx->stx_size);
7780     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7781     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7782     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7783     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7784     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7785     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7786     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7787     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7788     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7789     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7790     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7791     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7792     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7793     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7794 
7795     unlock_user_struct(target_stx, target_addr, 1);
7796 
7797     return 0;
7798 }
7799 #endif
7800 
7801 static int do_sys_futex(int *uaddr, int op, int val,
7802                          const struct timespec *timeout, int *uaddr2,
7803                          int val3)
7804 {
7805 #if HOST_LONG_BITS == 64
7806 #if defined(__NR_futex)
7807     /* always a 64-bit time_t, it doesn't define _time64 version  */
7808     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7809 
7810 #endif
7811 #else /* HOST_LONG_BITS == 64 */
7812 #if defined(__NR_futex_time64)
7813     if (sizeof(timeout->tv_sec) == 8) {
7814         /* _time64 function on 32bit arch */
7815         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7816     }
7817 #endif
7818 #if defined(__NR_futex)
7819     /* old function on 32bit arch */
7820     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7821 #endif
7822 #endif /* HOST_LONG_BITS == 64 */
7823     g_assert_not_reached();
7824 }
7825 
7826 static int do_safe_futex(int *uaddr, int op, int val,
7827                          const struct timespec *timeout, int *uaddr2,
7828                          int val3)
7829 {
7830 #if HOST_LONG_BITS == 64
7831 #if defined(__NR_futex)
7832     /* always a 64-bit time_t, it doesn't define _time64 version  */
7833     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7834 #endif
7835 #else /* HOST_LONG_BITS == 64 */
7836 #if defined(__NR_futex_time64)
7837     if (sizeof(timeout->tv_sec) == 8) {
7838         /* _time64 function on 32bit arch */
7839         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7840                                            val3));
7841     }
7842 #endif
7843 #if defined(__NR_futex)
7844     /* old function on 32bit arch */
7845     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7846 #endif
7847 #endif /* HOST_LONG_BITS == 64 */
7848     return -TARGET_ENOSYS;
7849 }
7850 
7851 /* ??? Using host futex calls even when target atomic operations
7852    are not really atomic probably breaks things.  However implementing
7853    futexes locally would make futexes shared between multiple processes
7854    tricky.  However they're probably useless because guest atomic
7855    operations won't work either.  */
7856 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7857 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7858                     int op, int val, target_ulong timeout,
7859                     target_ulong uaddr2, int val3)
7860 {
7861     struct timespec ts, *pts = NULL;
7862     void *haddr2 = NULL;
7863     int base_op;
7864 
7865     /* We assume FUTEX_* constants are the same on both host and target. */
7866 #ifdef FUTEX_CMD_MASK
7867     base_op = op & FUTEX_CMD_MASK;
7868 #else
7869     base_op = op;
7870 #endif
7871     switch (base_op) {
7872     case FUTEX_WAIT:
7873     case FUTEX_WAIT_BITSET:
7874         val = tswap32(val);
7875         break;
7876     case FUTEX_WAIT_REQUEUE_PI:
7877         val = tswap32(val);
7878         haddr2 = g2h(cpu, uaddr2);
7879         break;
7880     case FUTEX_LOCK_PI:
7881     case FUTEX_LOCK_PI2:
7882         break;
7883     case FUTEX_WAKE:
7884     case FUTEX_WAKE_BITSET:
7885     case FUTEX_TRYLOCK_PI:
7886     case FUTEX_UNLOCK_PI:
7887         timeout = 0;
7888         break;
7889     case FUTEX_FD:
7890         val = target_to_host_signal(val);
7891         timeout = 0;
7892         break;
7893     case FUTEX_CMP_REQUEUE:
7894     case FUTEX_CMP_REQUEUE_PI:
7895         val3 = tswap32(val3);
7896         /* fall through */
7897     case FUTEX_REQUEUE:
7898     case FUTEX_WAKE_OP:
7899         /*
7900          * For these, the 4th argument is not TIMEOUT, but VAL2.
7901          * But the prototype of do_safe_futex takes a pointer, so
7902          * insert casts to satisfy the compiler.  We do not need
7903          * to tswap VAL2 since it's not compared to guest memory.
7904           */
7905         pts = (struct timespec *)(uintptr_t)timeout;
7906         timeout = 0;
7907         haddr2 = g2h(cpu, uaddr2);
7908         break;
7909     default:
7910         return -TARGET_ENOSYS;
7911     }
7912     if (timeout) {
7913         pts = &ts;
7914         if (time64
7915             ? target_to_host_timespec64(pts, timeout)
7916             : target_to_host_timespec(pts, timeout)) {
7917             return -TARGET_EFAULT;
7918         }
7919     }
7920     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7921 }
7922 #endif
7923 
7924 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7925 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7926                                      abi_long handle, abi_long mount_id,
7927                                      abi_long flags)
7928 {
7929     struct file_handle *target_fh;
7930     struct file_handle *fh;
7931     int mid = 0;
7932     abi_long ret;
7933     char *name;
7934     unsigned int size, total_size;
7935 
7936     if (get_user_s32(size, handle)) {
7937         return -TARGET_EFAULT;
7938     }
7939 
7940     name = lock_user_string(pathname);
7941     if (!name) {
7942         return -TARGET_EFAULT;
7943     }
7944 
7945     total_size = sizeof(struct file_handle) + size;
7946     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7947     if (!target_fh) {
7948         unlock_user(name, pathname, 0);
7949         return -TARGET_EFAULT;
7950     }
7951 
7952     fh = g_malloc0(total_size);
7953     fh->handle_bytes = size;
7954 
7955     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7956     unlock_user(name, pathname, 0);
7957 
7958     /* man name_to_handle_at(2):
7959      * Other than the use of the handle_bytes field, the caller should treat
7960      * the file_handle structure as an opaque data type
7961      */
7962 
7963     memcpy(target_fh, fh, total_size);
7964     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7965     target_fh->handle_type = tswap32(fh->handle_type);
7966     g_free(fh);
7967     unlock_user(target_fh, handle, total_size);
7968 
7969     if (put_user_s32(mid, mount_id)) {
7970         return -TARGET_EFAULT;
7971     }
7972 
7973     return ret;
7974 
7975 }
7976 #endif
7977 
7978 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7979 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7980                                      abi_long flags)
7981 {
7982     struct file_handle *target_fh;
7983     struct file_handle *fh;
7984     unsigned int size, total_size;
7985     abi_long ret;
7986 
7987     if (get_user_s32(size, handle)) {
7988         return -TARGET_EFAULT;
7989     }
7990 
7991     total_size = sizeof(struct file_handle) + size;
7992     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7993     if (!target_fh) {
7994         return -TARGET_EFAULT;
7995     }
7996 
7997     fh = g_memdup(target_fh, total_size);
7998     fh->handle_bytes = size;
7999     fh->handle_type = tswap32(target_fh->handle_type);
8000 
8001     ret = get_errno(open_by_handle_at(mount_fd, fh,
8002                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
8003 
8004     g_free(fh);
8005 
8006     unlock_user(target_fh, handle, total_size);
8007 
8008     return ret;
8009 }
8010 #endif
8011 
8012 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
8013 
8014 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
8015 {
8016     int host_flags;
8017     target_sigset_t *target_mask;
8018     sigset_t host_mask;
8019     abi_long ret;
8020 
8021     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
8022         return -TARGET_EINVAL;
8023     }
8024     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8025         return -TARGET_EFAULT;
8026     }
8027 
8028     target_to_host_sigset(&host_mask, target_mask);
8029 
8030     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8031 
8032     ret = get_errno(signalfd(fd, &host_mask, host_flags));
8033     if (ret >= 0) {
8034         fd_trans_register(ret, &target_signalfd_trans);
8035     }
8036 
8037     unlock_user_struct(target_mask, mask, 0);
8038 
8039     return ret;
8040 }
8041 #endif
8042 
8043 /* Map host to target signal numbers for the wait family of syscalls.
8044    Assume all other status bits are the same.  */
8045 int host_to_target_waitstatus(int status)
8046 {
8047     if (WIFSIGNALED(status)) {
8048         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8049     }
8050     if (WIFSTOPPED(status)) {
8051         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8052                | (status & 0xff);
8053     }
8054     return status;
8055 }
8056 
8057 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8058 {
8059     CPUState *cpu = env_cpu(cpu_env);
8060     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8061     int i;
8062 
8063     for (i = 0; i < bprm->argc; i++) {
8064         size_t len = strlen(bprm->argv[i]) + 1;
8065 
8066         if (write(fd, bprm->argv[i], len) != len) {
8067             return -1;
8068         }
8069     }
8070 
8071     return 0;
8072 }
8073 
8074 static void show_smaps(int fd, unsigned long size)
8075 {
8076     unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8077     unsigned long size_kb = size >> 10;
8078 
8079     dprintf(fd, "Size:                  %lu kB\n"
8080                 "KernelPageSize:        %lu kB\n"
8081                 "MMUPageSize:           %lu kB\n"
8082                 "Rss:                   0 kB\n"
8083                 "Pss:                   0 kB\n"
8084                 "Pss_Dirty:             0 kB\n"
8085                 "Shared_Clean:          0 kB\n"
8086                 "Shared_Dirty:          0 kB\n"
8087                 "Private_Clean:         0 kB\n"
8088                 "Private_Dirty:         0 kB\n"
8089                 "Referenced:            0 kB\n"
8090                 "Anonymous:             0 kB\n"
8091                 "LazyFree:              0 kB\n"
8092                 "AnonHugePages:         0 kB\n"
8093                 "ShmemPmdMapped:        0 kB\n"
8094                 "FilePmdMapped:         0 kB\n"
8095                 "Shared_Hugetlb:        0 kB\n"
8096                 "Private_Hugetlb:       0 kB\n"
8097                 "Swap:                  0 kB\n"
8098                 "SwapPss:               0 kB\n"
8099                 "Locked:                0 kB\n"
8100                 "THPeligible:    0\n", size_kb, page_size_kb, page_size_kb);
8101 }
8102 
8103 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8104 {
8105     CPUState *cpu = env_cpu(cpu_env);
8106     TaskState *ts = cpu->opaque;
8107     GSList *map_info = read_self_maps();
8108     GSList *s;
8109     int count;
8110 
8111     for (s = map_info; s; s = g_slist_next(s)) {
8112         MapInfo *e = (MapInfo *) s->data;
8113 
8114         if (h2g_valid(e->start)) {
8115             unsigned long min = e->start;
8116             unsigned long max = e->end;
8117             int flags = page_get_flags(h2g(min));
8118             const char *path;
8119 
8120             max = h2g_valid(max - 1) ?
8121                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8122 
8123             if (!page_check_range(h2g(min), max - min, flags)) {
8124                 continue;
8125             }
8126 
8127 #ifdef TARGET_HPPA
8128             if (h2g(max) == ts->info->stack_limit) {
8129 #else
8130             if (h2g(min) == ts->info->stack_limit) {
8131 #endif
8132                 path = "[stack]";
8133             } else {
8134                 path = e->path;
8135             }
8136 
8137             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8138                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8139                             h2g(min), h2g(max - 1) + 1,
8140                             (flags & PAGE_READ) ? 'r' : '-',
8141                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8142                             (flags & PAGE_EXEC) ? 'x' : '-',
8143                             e->is_priv ? 'p' : 's',
8144                             (uint64_t) e->offset, e->dev, e->inode);
8145             if (path) {
8146                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8147             } else {
8148                 dprintf(fd, "\n");
8149             }
8150             if (smaps) {
8151                 show_smaps(fd, max - min);
8152                 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8153                         (flags & PAGE_READ) ? " rd" : "",
8154                         (flags & PAGE_WRITE_ORG) ? " wr" : "",
8155                         (flags & PAGE_EXEC) ? " ex" : "",
8156                         e->is_priv ? "" : " sh",
8157                         (flags & PAGE_READ) ? " mr" : "",
8158                         (flags & PAGE_WRITE_ORG) ? " mw" : "",
8159                         (flags & PAGE_EXEC) ? " me" : "",
8160                         e->is_priv ? "" : " ms");
8161             }
8162         }
8163     }
8164 
8165     free_self_maps(map_info);
8166 
8167 #ifdef TARGET_VSYSCALL_PAGE
8168     /*
8169      * We only support execution from the vsyscall page.
8170      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8171      */
8172     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8173                     " --xp 00000000 00:00 0",
8174                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8175     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8176     if (smaps) {
8177         show_smaps(fd, TARGET_PAGE_SIZE);
8178         dprintf(fd, "VmFlags: ex\n");
8179     }
8180 #endif
8181 
8182     return 0;
8183 }
8184 
8185 static int open_self_maps(CPUArchState *cpu_env, int fd)
8186 {
8187     return open_self_maps_1(cpu_env, fd, false);
8188 }
8189 
8190 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8191 {
8192     return open_self_maps_1(cpu_env, fd, true);
8193 }
8194 
8195 static int open_self_stat(CPUArchState *cpu_env, int fd)
8196 {
8197     CPUState *cpu = env_cpu(cpu_env);
8198     TaskState *ts = cpu->opaque;
8199     g_autoptr(GString) buf = g_string_new(NULL);
8200     int i;
8201 
8202     for (i = 0; i < 44; i++) {
8203         if (i == 0) {
8204             /* pid */
8205             g_string_printf(buf, FMT_pid " ", getpid());
8206         } else if (i == 1) {
8207             /* app name */
8208             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8209             bin = bin ? bin + 1 : ts->bprm->argv[0];
8210             g_string_printf(buf, "(%.15s) ", bin);
8211         } else if (i == 2) {
8212             /* task state */
8213             g_string_assign(buf, "R "); /* we are running right now */
8214         } else if (i == 3) {
8215             /* ppid */
8216             g_string_printf(buf, FMT_pid " ", getppid());
8217         } else if (i == 21) {
8218             /* starttime */
8219             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8220         } else if (i == 27) {
8221             /* stack bottom */
8222             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8223         } else {
8224             /* for the rest, there is MasterCard */
8225             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8226         }
8227 
8228         if (write(fd, buf->str, buf->len) != buf->len) {
8229             return -1;
8230         }
8231     }
8232 
8233     return 0;
8234 }
8235 
8236 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8237 {
8238     CPUState *cpu = env_cpu(cpu_env);
8239     TaskState *ts = cpu->opaque;
8240     abi_ulong auxv = ts->info->saved_auxv;
8241     abi_ulong len = ts->info->auxv_len;
8242     char *ptr;
8243 
8244     /*
8245      * Auxiliary vector is stored in target process stack.
8246      * read in whole auxv vector and copy it to file
8247      */
8248     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8249     if (ptr != NULL) {
8250         while (len > 0) {
8251             ssize_t r;
8252             r = write(fd, ptr, len);
8253             if (r <= 0) {
8254                 break;
8255             }
8256             len -= r;
8257             ptr += r;
8258         }
8259         lseek(fd, 0, SEEK_SET);
8260         unlock_user(ptr, auxv, len);
8261     }
8262 
8263     return 0;
8264 }
8265 
8266 static int is_proc_myself(const char *filename, const char *entry)
8267 {
8268     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8269         filename += strlen("/proc/");
8270         if (!strncmp(filename, "self/", strlen("self/"))) {
8271             filename += strlen("self/");
8272         } else if (*filename >= '1' && *filename <= '9') {
8273             char myself[80];
8274             snprintf(myself, sizeof(myself), "%d/", getpid());
8275             if (!strncmp(filename, myself, strlen(myself))) {
8276                 filename += strlen(myself);
8277             } else {
8278                 return 0;
8279             }
8280         } else {
8281             return 0;
8282         }
8283         if (!strcmp(filename, entry)) {
8284             return 1;
8285         }
8286     }
8287     return 0;
8288 }
8289 
8290 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8291                       const char *fmt, int code)
8292 {
8293     if (logfile) {
8294         CPUState *cs = env_cpu(env);
8295 
8296         fprintf(logfile, fmt, code);
8297         fprintf(logfile, "Failing executable: %s\n", exec_path);
8298         cpu_dump_state(cs, logfile, 0);
8299         open_self_maps(env, fileno(logfile));
8300     }
8301 }
8302 
8303 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8304 {
8305     /* dump to console */
8306     excp_dump_file(stderr, env, fmt, code);
8307 
8308     /* dump to log file */
8309     if (qemu_log_separate()) {
8310         FILE *logfile = qemu_log_trylock();
8311 
8312         excp_dump_file(logfile, env, fmt, code);
8313         qemu_log_unlock(logfile);
8314     }
8315 }
8316 
8317 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8318     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8319     defined(TARGET_RISCV) || defined(TARGET_S390X)
8320 static int is_proc(const char *filename, const char *entry)
8321 {
8322     return strcmp(filename, entry) == 0;
8323 }
8324 #endif
8325 
8326 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8327 static int open_net_route(CPUArchState *cpu_env, int fd)
8328 {
8329     FILE *fp;
8330     char *line = NULL;
8331     size_t len = 0;
8332     ssize_t read;
8333 
8334     fp = fopen("/proc/net/route", "r");
8335     if (fp == NULL) {
8336         return -1;
8337     }
8338 
8339     /* read header */
8340 
8341     read = getline(&line, &len, fp);
8342     dprintf(fd, "%s", line);
8343 
8344     /* read routes */
8345 
8346     while ((read = getline(&line, &len, fp)) != -1) {
8347         char iface[16];
8348         uint32_t dest, gw, mask;
8349         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8350         int fields;
8351 
8352         fields = sscanf(line,
8353                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8354                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8355                         &mask, &mtu, &window, &irtt);
8356         if (fields != 11) {
8357             continue;
8358         }
8359         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8360                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8361                 metric, tswap32(mask), mtu, window, irtt);
8362     }
8363 
8364     free(line);
8365     fclose(fp);
8366 
8367     return 0;
8368 }
8369 #endif
8370 
8371 #if defined(TARGET_SPARC)
8372 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8373 {
8374     dprintf(fd, "type\t\t: sun4u\n");
8375     return 0;
8376 }
8377 #endif
8378 
8379 #if defined(TARGET_HPPA)
8380 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8381 {
8382     int i, num_cpus;
8383 
8384     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8385     for (i = 0; i < num_cpus; i++) {
8386         dprintf(fd, "processor\t: %d\n", i);
8387         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8388         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8389         dprintf(fd, "capabilities\t: os32\n");
8390         dprintf(fd, "model\t\t: 9000/778/B160L - "
8391                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8392     }
8393     return 0;
8394 }
8395 #endif
8396 
8397 #if defined(TARGET_RISCV)
8398 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8399 {
8400     int i;
8401     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8402     RISCVCPU *cpu = env_archcpu(cpu_env);
8403     const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8404     char *isa_string = riscv_isa_string(cpu);
8405     const char *mmu;
8406 
8407     if (cfg->mmu) {
8408         mmu = (cpu_env->xl == MXL_RV32) ? "sv32"  : "sv48";
8409     } else {
8410         mmu = "none";
8411     }
8412 
8413     for (i = 0; i < num_cpus; i++) {
8414         dprintf(fd, "processor\t: %d\n", i);
8415         dprintf(fd, "hart\t\t: %d\n", i);
8416         dprintf(fd, "isa\t\t: %s\n", isa_string);
8417         dprintf(fd, "mmu\t\t: %s\n", mmu);
8418         dprintf(fd, "uarch\t\t: qemu\n\n");
8419     }
8420 
8421     g_free(isa_string);
8422     return 0;
8423 }
8424 #endif
8425 
8426 #if defined(TARGET_S390X)
8427 /*
8428  * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8429  * show in /proc/cpuinfo.
8430  *
8431  * Skip the following in order to match the missing support in op_ecag():
8432  * - show_cacheinfo().
8433  * - show_cpu_topology().
8434  * - show_cpu_mhz().
8435  *
8436  * Use fixed values for certain fields:
8437  * - bogomips per cpu - from a qemu-system-s390x run.
8438  * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8439  *
8440  * Keep the code structure close to arch/s390/kernel/processor.c.
8441  */
8442 
8443 static void show_facilities(int fd)
8444 {
8445     size_t sizeof_stfl_bytes = 2048;
8446     g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8447     unsigned int bit;
8448 
8449     dprintf(fd, "facilities      :");
8450     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8451     for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8452         if (test_be_bit(bit, stfl_bytes)) {
8453             dprintf(fd, " %d", bit);
8454         }
8455     }
8456     dprintf(fd, "\n");
8457 }
8458 
8459 static int cpu_ident(unsigned long n)
8460 {
8461     return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8462                      n);
8463 }
8464 
8465 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8466 {
8467     S390CPUModel *model = env_archcpu(cpu_env)->model;
8468     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8469     uint32_t elf_hwcap = get_elf_hwcap();
8470     const char *hwcap_str;
8471     int i;
8472 
8473     dprintf(fd, "vendor_id       : IBM/S390\n"
8474                 "# processors    : %i\n"
8475                 "bogomips per cpu: 13370.00\n",
8476             num_cpus);
8477     dprintf(fd, "max thread id   : 0\n");
8478     dprintf(fd, "features\t: ");
8479     for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8480         if (!(elf_hwcap & (1 << i))) {
8481             continue;
8482         }
8483         hwcap_str = elf_hwcap_str(i);
8484         if (hwcap_str) {
8485             dprintf(fd, "%s ", hwcap_str);
8486         }
8487     }
8488     dprintf(fd, "\n");
8489     show_facilities(fd);
8490     for (i = 0; i < num_cpus; i++) {
8491         dprintf(fd, "processor %d: "
8492                "version = %02X,  "
8493                "identification = %06X,  "
8494                "machine = %04X\n",
8495                i, model->cpu_ver, cpu_ident(i), model->def->type);
8496     }
8497 }
8498 
8499 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8500 {
8501     S390CPUModel *model = env_archcpu(cpu_env)->model;
8502 
8503     dprintf(fd, "version         : %02X\n", model->cpu_ver);
8504     dprintf(fd, "identification  : %06X\n", cpu_ident(n));
8505     dprintf(fd, "machine         : %04X\n", model->def->type);
8506 }
8507 
8508 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8509 {
8510     dprintf(fd, "\ncpu number      : %ld\n", n);
8511     show_cpu_ids(cpu_env, fd, n);
8512 }
8513 
8514 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8515 {
8516     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8517     int i;
8518 
8519     show_cpu_summary(cpu_env, fd);
8520     for (i = 0; i < num_cpus; i++) {
8521         show_cpuinfo(cpu_env, fd, i);
8522     }
8523     return 0;
8524 }
8525 #endif
8526 
8527 #if defined(TARGET_M68K)
8528 static int open_hardware(CPUArchState *cpu_env, int fd)
8529 {
8530     dprintf(fd, "Model:\t\tqemu-m68k\n");
8531     return 0;
8532 }
8533 #endif
8534 
8535 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8536                     int flags, mode_t mode, bool safe)
8537 {
8538     struct fake_open {
8539         const char *filename;
8540         int (*fill)(CPUArchState *cpu_env, int fd);
8541         int (*cmp)(const char *s1, const char *s2);
8542     };
8543     const struct fake_open *fake_open;
8544     static const struct fake_open fakes[] = {
8545         { "maps", open_self_maps, is_proc_myself },
8546         { "smaps", open_self_smaps, is_proc_myself },
8547         { "stat", open_self_stat, is_proc_myself },
8548         { "auxv", open_self_auxv, is_proc_myself },
8549         { "cmdline", open_self_cmdline, is_proc_myself },
8550 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8551         { "/proc/net/route", open_net_route, is_proc },
8552 #endif
8553 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8554     defined(TARGET_RISCV) || defined(TARGET_S390X)
8555         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8556 #endif
8557 #if defined(TARGET_M68K)
8558         { "/proc/hardware", open_hardware, is_proc },
8559 #endif
8560         { NULL, NULL, NULL }
8561     };
8562 
8563     if (is_proc_myself(pathname, "exe")) {
8564         if (safe) {
8565             return safe_openat(dirfd, exec_path, flags, mode);
8566         } else {
8567             return openat(dirfd, exec_path, flags, mode);
8568         }
8569     }
8570 
8571     for (fake_open = fakes; fake_open->filename; fake_open++) {
8572         if (fake_open->cmp(pathname, fake_open->filename)) {
8573             break;
8574         }
8575     }
8576 
8577     if (fake_open->filename) {
8578         const char *tmpdir;
8579         char filename[PATH_MAX];
8580         int fd, r;
8581 
8582         fd = memfd_create("qemu-open", 0);
8583         if (fd < 0) {
8584             if (errno != ENOSYS) {
8585                 return fd;
8586             }
8587             /* create temporary file to map stat to */
8588             tmpdir = getenv("TMPDIR");
8589             if (!tmpdir)
8590                 tmpdir = "/tmp";
8591             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8592             fd = mkstemp(filename);
8593             if (fd < 0) {
8594                 return fd;
8595             }
8596             unlink(filename);
8597         }
8598 
8599         if ((r = fake_open->fill(cpu_env, fd))) {
8600             int e = errno;
8601             close(fd);
8602             errno = e;
8603             return r;
8604         }
8605         lseek(fd, 0, SEEK_SET);
8606 
8607         return fd;
8608     }
8609 
8610     if (safe) {
8611         return safe_openat(dirfd, path(pathname), flags, mode);
8612     } else {
8613         return openat(dirfd, path(pathname), flags, mode);
8614     }
8615 }
8616 
8617 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8618 {
8619     ssize_t ret;
8620 
8621     if (!pathname || !buf) {
8622         errno = EFAULT;
8623         return -1;
8624     }
8625 
8626     if (!bufsiz) {
8627         /* Short circuit this for the magic exe check. */
8628         errno = EINVAL;
8629         return -1;
8630     }
8631 
8632     if (is_proc_myself((const char *)pathname, "exe")) {
8633         /*
8634          * Don't worry about sign mismatch as earlier mapping
8635          * logic would have thrown a bad address error.
8636          */
8637         ret = MIN(strlen(exec_path), bufsiz);
8638         /* We cannot NUL terminate the string. */
8639         memcpy(buf, exec_path, ret);
8640     } else {
8641         ret = readlink(path(pathname), buf, bufsiz);
8642     }
8643 
8644     return ret;
8645 }
8646 
8647 static int do_execv(CPUArchState *cpu_env, int dirfd,
8648                     abi_long pathname, abi_long guest_argp,
8649                     abi_long guest_envp, int flags, bool is_execveat)
8650 {
8651     int ret;
8652     char **argp, **envp;
8653     int argc, envc;
8654     abi_ulong gp;
8655     abi_ulong addr;
8656     char **q;
8657     void *p;
8658 
8659     argc = 0;
8660 
8661     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8662         if (get_user_ual(addr, gp)) {
8663             return -TARGET_EFAULT;
8664         }
8665         if (!addr) {
8666             break;
8667         }
8668         argc++;
8669     }
8670     envc = 0;
8671     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8672         if (get_user_ual(addr, gp)) {
8673             return -TARGET_EFAULT;
8674         }
8675         if (!addr) {
8676             break;
8677         }
8678         envc++;
8679     }
8680 
8681     argp = g_new0(char *, argc + 1);
8682     envp = g_new0(char *, envc + 1);
8683 
8684     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8685         if (get_user_ual(addr, gp)) {
8686             goto execve_efault;
8687         }
8688         if (!addr) {
8689             break;
8690         }
8691         *q = lock_user_string(addr);
8692         if (!*q) {
8693             goto execve_efault;
8694         }
8695     }
8696     *q = NULL;
8697 
8698     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8699         if (get_user_ual(addr, gp)) {
8700             goto execve_efault;
8701         }
8702         if (!addr) {
8703             break;
8704         }
8705         *q = lock_user_string(addr);
8706         if (!*q) {
8707             goto execve_efault;
8708         }
8709     }
8710     *q = NULL;
8711 
8712     /*
8713      * Although execve() is not an interruptible syscall it is
8714      * a special case where we must use the safe_syscall wrapper:
8715      * if we allow a signal to happen before we make the host
8716      * syscall then we will 'lose' it, because at the point of
8717      * execve the process leaves QEMU's control. So we use the
8718      * safe syscall wrapper to ensure that we either take the
8719      * signal as a guest signal, or else it does not happen
8720      * before the execve completes and makes it the other
8721      * program's problem.
8722      */
8723     p = lock_user_string(pathname);
8724     if (!p) {
8725         goto execve_efault;
8726     }
8727 
8728     const char *exe = p;
8729     if (is_proc_myself(p, "exe")) {
8730         exe = exec_path;
8731     }
8732     ret = is_execveat
8733         ? safe_execveat(dirfd, exe, argp, envp, flags)
8734         : safe_execve(exe, argp, envp);
8735     ret = get_errno(ret);
8736 
8737     unlock_user(p, pathname, 0);
8738 
8739     goto execve_end;
8740 
8741 execve_efault:
8742     ret = -TARGET_EFAULT;
8743 
8744 execve_end:
8745     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8746         if (get_user_ual(addr, gp) || !addr) {
8747             break;
8748         }
8749         unlock_user(*q, addr, 0);
8750     }
8751     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8752         if (get_user_ual(addr, gp) || !addr) {
8753             break;
8754         }
8755         unlock_user(*q, addr, 0);
8756     }
8757 
8758     g_free(argp);
8759     g_free(envp);
8760     return ret;
8761 }
8762 
8763 #define TIMER_MAGIC 0x0caf0000
8764 #define TIMER_MAGIC_MASK 0xffff0000
8765 
8766 /* Convert QEMU provided timer ID back to internal 16bit index format */
8767 static target_timer_t get_timer_id(abi_long arg)
8768 {
8769     target_timer_t timerid = arg;
8770 
8771     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8772         return -TARGET_EINVAL;
8773     }
8774 
8775     timerid &= 0xffff;
8776 
8777     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8778         return -TARGET_EINVAL;
8779     }
8780 
8781     return timerid;
8782 }
8783 
8784 static int target_to_host_cpu_mask(unsigned long *host_mask,
8785                                    size_t host_size,
8786                                    abi_ulong target_addr,
8787                                    size_t target_size)
8788 {
8789     unsigned target_bits = sizeof(abi_ulong) * 8;
8790     unsigned host_bits = sizeof(*host_mask) * 8;
8791     abi_ulong *target_mask;
8792     unsigned i, j;
8793 
8794     assert(host_size >= target_size);
8795 
8796     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8797     if (!target_mask) {
8798         return -TARGET_EFAULT;
8799     }
8800     memset(host_mask, 0, host_size);
8801 
8802     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8803         unsigned bit = i * target_bits;
8804         abi_ulong val;
8805 
8806         __get_user(val, &target_mask[i]);
8807         for (j = 0; j < target_bits; j++, bit++) {
8808             if (val & (1UL << j)) {
8809                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8810             }
8811         }
8812     }
8813 
8814     unlock_user(target_mask, target_addr, 0);
8815     return 0;
8816 }
8817 
8818 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8819                                    size_t host_size,
8820                                    abi_ulong target_addr,
8821                                    size_t target_size)
8822 {
8823     unsigned target_bits = sizeof(abi_ulong) * 8;
8824     unsigned host_bits = sizeof(*host_mask) * 8;
8825     abi_ulong *target_mask;
8826     unsigned i, j;
8827 
8828     assert(host_size >= target_size);
8829 
8830     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8831     if (!target_mask) {
8832         return -TARGET_EFAULT;
8833     }
8834 
8835     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8836         unsigned bit = i * target_bits;
8837         abi_ulong val = 0;
8838 
8839         for (j = 0; j < target_bits; j++, bit++) {
8840             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8841                 val |= 1UL << j;
8842             }
8843         }
8844         __put_user(val, &target_mask[i]);
8845     }
8846 
8847     unlock_user(target_mask, target_addr, target_size);
8848     return 0;
8849 }
8850 
8851 #ifdef TARGET_NR_getdents
8852 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8853 {
8854     g_autofree void *hdirp = NULL;
8855     void *tdirp;
8856     int hlen, hoff, toff;
8857     int hreclen, treclen;
8858     off64_t prev_diroff = 0;
8859 
8860     hdirp = g_try_malloc(count);
8861     if (!hdirp) {
8862         return -TARGET_ENOMEM;
8863     }
8864 
8865 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8866     hlen = sys_getdents(dirfd, hdirp, count);
8867 #else
8868     hlen = sys_getdents64(dirfd, hdirp, count);
8869 #endif
8870 
8871     hlen = get_errno(hlen);
8872     if (is_error(hlen)) {
8873         return hlen;
8874     }
8875 
8876     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8877     if (!tdirp) {
8878         return -TARGET_EFAULT;
8879     }
8880 
8881     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8882 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8883         struct linux_dirent *hde = hdirp + hoff;
8884 #else
8885         struct linux_dirent64 *hde = hdirp + hoff;
8886 #endif
8887         struct target_dirent *tde = tdirp + toff;
8888         int namelen;
8889         uint8_t type;
8890 
8891         namelen = strlen(hde->d_name);
8892         hreclen = hde->d_reclen;
8893         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8894         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8895 
8896         if (toff + treclen > count) {
8897             /*
8898              * If the host struct is smaller than the target struct, or
8899              * requires less alignment and thus packs into less space,
8900              * then the host can return more entries than we can pass
8901              * on to the guest.
8902              */
8903             if (toff == 0) {
8904                 toff = -TARGET_EINVAL; /* result buffer is too small */
8905                 break;
8906             }
8907             /*
8908              * Return what we have, resetting the file pointer to the
8909              * location of the first record not returned.
8910              */
8911             lseek64(dirfd, prev_diroff, SEEK_SET);
8912             break;
8913         }
8914 
8915         prev_diroff = hde->d_off;
8916         tde->d_ino = tswapal(hde->d_ino);
8917         tde->d_off = tswapal(hde->d_off);
8918         tde->d_reclen = tswap16(treclen);
8919         memcpy(tde->d_name, hde->d_name, namelen + 1);
8920 
8921         /*
8922          * The getdents type is in what was formerly a padding byte at the
8923          * end of the structure.
8924          */
8925 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8926         type = *((uint8_t *)hde + hreclen - 1);
8927 #else
8928         type = hde->d_type;
8929 #endif
8930         *((uint8_t *)tde + treclen - 1) = type;
8931     }
8932 
8933     unlock_user(tdirp, arg2, toff);
8934     return toff;
8935 }
8936 #endif /* TARGET_NR_getdents */
8937 
8938 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8939 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8940 {
8941     g_autofree void *hdirp = NULL;
8942     void *tdirp;
8943     int hlen, hoff, toff;
8944     int hreclen, treclen;
8945     off64_t prev_diroff = 0;
8946 
8947     hdirp = g_try_malloc(count);
8948     if (!hdirp) {
8949         return -TARGET_ENOMEM;
8950     }
8951 
8952     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8953     if (is_error(hlen)) {
8954         return hlen;
8955     }
8956 
8957     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8958     if (!tdirp) {
8959         return -TARGET_EFAULT;
8960     }
8961 
8962     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8963         struct linux_dirent64 *hde = hdirp + hoff;
8964         struct target_dirent64 *tde = tdirp + toff;
8965         int namelen;
8966 
8967         namelen = strlen(hde->d_name) + 1;
8968         hreclen = hde->d_reclen;
8969         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8970         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8971 
8972         if (toff + treclen > count) {
8973             /*
8974              * If the host struct is smaller than the target struct, or
8975              * requires less alignment and thus packs into less space,
8976              * then the host can return more entries than we can pass
8977              * on to the guest.
8978              */
8979             if (toff == 0) {
8980                 toff = -TARGET_EINVAL; /* result buffer is too small */
8981                 break;
8982             }
8983             /*
8984              * Return what we have, resetting the file pointer to the
8985              * location of the first record not returned.
8986              */
8987             lseek64(dirfd, prev_diroff, SEEK_SET);
8988             break;
8989         }
8990 
8991         prev_diroff = hde->d_off;
8992         tde->d_ino = tswap64(hde->d_ino);
8993         tde->d_off = tswap64(hde->d_off);
8994         tde->d_reclen = tswap16(treclen);
8995         tde->d_type = hde->d_type;
8996         memcpy(tde->d_name, hde->d_name, namelen);
8997     }
8998 
8999     unlock_user(tdirp, arg2, toff);
9000     return toff;
9001 }
9002 #endif /* TARGET_NR_getdents64 */
9003 
9004 #if defined(TARGET_NR_riscv_hwprobe)
9005 
9006 #define RISCV_HWPROBE_KEY_MVENDORID     0
9007 #define RISCV_HWPROBE_KEY_MARCHID       1
9008 #define RISCV_HWPROBE_KEY_MIMPID        2
9009 
9010 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
9011 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
9012 
9013 #define RISCV_HWPROBE_KEY_IMA_EXT_0     4
9014 #define     RISCV_HWPROBE_IMA_FD       (1 << 0)
9015 #define     RISCV_HWPROBE_IMA_C        (1 << 1)
9016 
9017 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
9018 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
9019 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
9020 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
9021 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
9022 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9023 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9024 
9025 struct riscv_hwprobe {
9026     abi_llong  key;
9027     abi_ullong value;
9028 };
9029 
9030 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9031                                     struct riscv_hwprobe *pair,
9032                                     size_t pair_count)
9033 {
9034     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9035 
9036     for (; pair_count > 0; pair_count--, pair++) {
9037         abi_llong key;
9038         abi_ullong value;
9039         __put_user(0, &pair->value);
9040         __get_user(key, &pair->key);
9041         switch (key) {
9042         case RISCV_HWPROBE_KEY_MVENDORID:
9043             __put_user(cfg->mvendorid, &pair->value);
9044             break;
9045         case RISCV_HWPROBE_KEY_MARCHID:
9046             __put_user(cfg->marchid, &pair->value);
9047             break;
9048         case RISCV_HWPROBE_KEY_MIMPID:
9049             __put_user(cfg->mimpid, &pair->value);
9050             break;
9051         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9052             value = riscv_has_ext(env, RVI) &&
9053                     riscv_has_ext(env, RVM) &&
9054                     riscv_has_ext(env, RVA) ?
9055                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9056             __put_user(value, &pair->value);
9057             break;
9058         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9059             value = riscv_has_ext(env, RVF) &&
9060                     riscv_has_ext(env, RVD) ?
9061                     RISCV_HWPROBE_IMA_FD : 0;
9062             value |= riscv_has_ext(env, RVC) ?
9063                      RISCV_HWPROBE_IMA_C : pair->value;
9064             __put_user(value, &pair->value);
9065             break;
9066         case RISCV_HWPROBE_KEY_CPUPERF_0:
9067             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9068             break;
9069         default:
9070             __put_user(-1, &pair->key);
9071             break;
9072         }
9073     }
9074 }
9075 
9076 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9077 {
9078     int ret, i, tmp;
9079     size_t host_mask_size, target_mask_size;
9080     unsigned long *host_mask;
9081 
9082     /*
9083      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9084      * arg3 contains the cpu count.
9085      */
9086     tmp = (8 * sizeof(abi_ulong));
9087     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9088     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9089                      ~(sizeof(*host_mask) - 1);
9090 
9091     host_mask = alloca(host_mask_size);
9092 
9093     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9094                                   arg4, target_mask_size);
9095     if (ret != 0) {
9096         return ret;
9097     }
9098 
9099     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9100         if (host_mask[i] != 0) {
9101             return 0;
9102         }
9103     }
9104     return -TARGET_EINVAL;
9105 }
9106 
9107 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9108                                  abi_long arg2, abi_long arg3,
9109                                  abi_long arg4, abi_long arg5)
9110 {
9111     int ret;
9112     struct riscv_hwprobe *host_pairs;
9113 
9114     /* flags must be 0 */
9115     if (arg5 != 0) {
9116         return -TARGET_EINVAL;
9117     }
9118 
9119     /* check cpu_set */
9120     if (arg3 != 0) {
9121         ret = cpu_set_valid(arg3, arg4);
9122         if (ret != 0) {
9123             return ret;
9124         }
9125     } else if (arg4 != 0) {
9126         return -TARGET_EINVAL;
9127     }
9128 
9129     /* no pairs */
9130     if (arg2 == 0) {
9131         return 0;
9132     }
9133 
9134     host_pairs = lock_user(VERIFY_WRITE, arg1,
9135                            sizeof(*host_pairs) * (size_t)arg2, 0);
9136     if (host_pairs == NULL) {
9137         return -TARGET_EFAULT;
9138     }
9139     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9140     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9141     return 0;
9142 }
9143 #endif /* TARGET_NR_riscv_hwprobe */
9144 
9145 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9146 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9147 #endif
9148 
9149 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9150 #define __NR_sys_open_tree __NR_open_tree
9151 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9152           unsigned int, __flags)
9153 #endif
9154 
9155 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9156 #define __NR_sys_move_mount __NR_move_mount
9157 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9158            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9159 #endif
9160 
9161 /* This is an internal helper for do_syscall so that it is easier
9162  * to have a single return point, so that actions, such as logging
9163  * of syscall results, can be performed.
9164  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9165  */
9166 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9167                             abi_long arg2, abi_long arg3, abi_long arg4,
9168                             abi_long arg5, abi_long arg6, abi_long arg7,
9169                             abi_long arg8)
9170 {
9171     CPUState *cpu = env_cpu(cpu_env);
9172     abi_long ret;
9173 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9174     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9175     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9176     || defined(TARGET_NR_statx)
9177     struct stat st;
9178 #endif
9179 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9180     || defined(TARGET_NR_fstatfs)
9181     struct statfs stfs;
9182 #endif
9183     void *p;
9184 
9185     switch(num) {
9186     case TARGET_NR_exit:
9187         /* In old applications this may be used to implement _exit(2).
9188            However in threaded applications it is used for thread termination,
9189            and _exit_group is used for application termination.
9190            Do thread termination if we have more then one thread.  */
9191 
9192         if (block_signals()) {
9193             return -QEMU_ERESTARTSYS;
9194         }
9195 
9196         pthread_mutex_lock(&clone_lock);
9197 
9198         if (CPU_NEXT(first_cpu)) {
9199             TaskState *ts = cpu->opaque;
9200 
9201             if (ts->child_tidptr) {
9202                 put_user_u32(0, ts->child_tidptr);
9203                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9204                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9205             }
9206 
9207             object_unparent(OBJECT(cpu));
9208             object_unref(OBJECT(cpu));
9209             /*
9210              * At this point the CPU should be unrealized and removed
9211              * from cpu lists. We can clean-up the rest of the thread
9212              * data without the lock held.
9213              */
9214 
9215             pthread_mutex_unlock(&clone_lock);
9216 
9217             thread_cpu = NULL;
9218             g_free(ts);
9219             rcu_unregister_thread();
9220             pthread_exit(NULL);
9221         }
9222 
9223         pthread_mutex_unlock(&clone_lock);
9224         preexit_cleanup(cpu_env, arg1);
9225         _exit(arg1);
9226         return 0; /* avoid warning */
9227     case TARGET_NR_read:
9228         if (arg2 == 0 && arg3 == 0) {
9229             return get_errno(safe_read(arg1, 0, 0));
9230         } else {
9231             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9232                 return -TARGET_EFAULT;
9233             ret = get_errno(safe_read(arg1, p, arg3));
9234             if (ret >= 0 &&
9235                 fd_trans_host_to_target_data(arg1)) {
9236                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9237             }
9238             unlock_user(p, arg2, ret);
9239         }
9240         return ret;
9241     case TARGET_NR_write:
9242         if (arg2 == 0 && arg3 == 0) {
9243             return get_errno(safe_write(arg1, 0, 0));
9244         }
9245         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9246             return -TARGET_EFAULT;
9247         if (fd_trans_target_to_host_data(arg1)) {
9248             void *copy = g_malloc(arg3);
9249             memcpy(copy, p, arg3);
9250             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9251             if (ret >= 0) {
9252                 ret = get_errno(safe_write(arg1, copy, ret));
9253             }
9254             g_free(copy);
9255         } else {
9256             ret = get_errno(safe_write(arg1, p, arg3));
9257         }
9258         unlock_user(p, arg2, 0);
9259         return ret;
9260 
9261 #ifdef TARGET_NR_open
9262     case TARGET_NR_open:
9263         if (!(p = lock_user_string(arg1)))
9264             return -TARGET_EFAULT;
9265         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9266                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9267                                   arg3, true));
9268         fd_trans_unregister(ret);
9269         unlock_user(p, arg1, 0);
9270         return ret;
9271 #endif
9272     case TARGET_NR_openat:
9273         if (!(p = lock_user_string(arg2)))
9274             return -TARGET_EFAULT;
9275         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9276                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9277                                   arg4, true));
9278         fd_trans_unregister(ret);
9279         unlock_user(p, arg2, 0);
9280         return ret;
9281 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9282     case TARGET_NR_name_to_handle_at:
9283         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9284         return ret;
9285 #endif
9286 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9287     case TARGET_NR_open_by_handle_at:
9288         ret = do_open_by_handle_at(arg1, arg2, arg3);
9289         fd_trans_unregister(ret);
9290         return ret;
9291 #endif
9292 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9293     case TARGET_NR_pidfd_open:
9294         return get_errno(pidfd_open(arg1, arg2));
9295 #endif
9296 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9297     case TARGET_NR_pidfd_send_signal:
9298         {
9299             siginfo_t uinfo, *puinfo;
9300 
9301             if (arg3) {
9302                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9303                 if (!p) {
9304                     return -TARGET_EFAULT;
9305                  }
9306                  target_to_host_siginfo(&uinfo, p);
9307                  unlock_user(p, arg3, 0);
9308                  puinfo = &uinfo;
9309             } else {
9310                  puinfo = NULL;
9311             }
9312             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9313                                               puinfo, arg4));
9314         }
9315         return ret;
9316 #endif
9317 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9318     case TARGET_NR_pidfd_getfd:
9319         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9320 #endif
9321     case TARGET_NR_close:
9322         fd_trans_unregister(arg1);
9323         return get_errno(close(arg1));
9324 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9325     case TARGET_NR_close_range:
9326         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9327         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9328             abi_long fd, maxfd;
9329             maxfd = MIN(arg2, target_fd_max);
9330             for (fd = arg1; fd < maxfd; fd++) {
9331                 fd_trans_unregister(fd);
9332             }
9333         }
9334         return ret;
9335 #endif
9336 
9337     case TARGET_NR_brk:
9338         return do_brk(arg1);
9339 #ifdef TARGET_NR_fork
9340     case TARGET_NR_fork:
9341         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9342 #endif
9343 #ifdef TARGET_NR_waitpid
9344     case TARGET_NR_waitpid:
9345         {
9346             int status;
9347             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9348             if (!is_error(ret) && arg2 && ret
9349                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9350                 return -TARGET_EFAULT;
9351         }
9352         return ret;
9353 #endif
9354 #ifdef TARGET_NR_waitid
9355     case TARGET_NR_waitid:
9356         {
9357             siginfo_t info;
9358             info.si_pid = 0;
9359             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9360             if (!is_error(ret) && arg3 && info.si_pid != 0) {
9361                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9362                     return -TARGET_EFAULT;
9363                 host_to_target_siginfo(p, &info);
9364                 unlock_user(p, arg3, sizeof(target_siginfo_t));
9365             }
9366         }
9367         return ret;
9368 #endif
9369 #ifdef TARGET_NR_creat /* not on alpha */
9370     case TARGET_NR_creat:
9371         if (!(p = lock_user_string(arg1)))
9372             return -TARGET_EFAULT;
9373         ret = get_errno(creat(p, arg2));
9374         fd_trans_unregister(ret);
9375         unlock_user(p, arg1, 0);
9376         return ret;
9377 #endif
9378 #ifdef TARGET_NR_link
9379     case TARGET_NR_link:
9380         {
9381             void * p2;
9382             p = lock_user_string(arg1);
9383             p2 = lock_user_string(arg2);
9384             if (!p || !p2)
9385                 ret = -TARGET_EFAULT;
9386             else
9387                 ret = get_errno(link(p, p2));
9388             unlock_user(p2, arg2, 0);
9389             unlock_user(p, arg1, 0);
9390         }
9391         return ret;
9392 #endif
9393 #if defined(TARGET_NR_linkat)
9394     case TARGET_NR_linkat:
9395         {
9396             void * p2 = NULL;
9397             if (!arg2 || !arg4)
9398                 return -TARGET_EFAULT;
9399             p  = lock_user_string(arg2);
9400             p2 = lock_user_string(arg4);
9401             if (!p || !p2)
9402                 ret = -TARGET_EFAULT;
9403             else
9404                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9405             unlock_user(p, arg2, 0);
9406             unlock_user(p2, arg4, 0);
9407         }
9408         return ret;
9409 #endif
9410 #ifdef TARGET_NR_unlink
9411     case TARGET_NR_unlink:
9412         if (!(p = lock_user_string(arg1)))
9413             return -TARGET_EFAULT;
9414         ret = get_errno(unlink(p));
9415         unlock_user(p, arg1, 0);
9416         return ret;
9417 #endif
9418 #if defined(TARGET_NR_unlinkat)
9419     case TARGET_NR_unlinkat:
9420         if (!(p = lock_user_string(arg2)))
9421             return -TARGET_EFAULT;
9422         ret = get_errno(unlinkat(arg1, p, arg3));
9423         unlock_user(p, arg2, 0);
9424         return ret;
9425 #endif
9426     case TARGET_NR_execveat:
9427         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9428     case TARGET_NR_execve:
9429         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9430     case TARGET_NR_chdir:
9431         if (!(p = lock_user_string(arg1)))
9432             return -TARGET_EFAULT;
9433         ret = get_errno(chdir(p));
9434         unlock_user(p, arg1, 0);
9435         return ret;
9436 #ifdef TARGET_NR_time
9437     case TARGET_NR_time:
9438         {
9439             time_t host_time;
9440             ret = get_errno(time(&host_time));
9441             if (!is_error(ret)
9442                 && arg1
9443                 && put_user_sal(host_time, arg1))
9444                 return -TARGET_EFAULT;
9445         }
9446         return ret;
9447 #endif
9448 #ifdef TARGET_NR_mknod
9449     case TARGET_NR_mknod:
9450         if (!(p = lock_user_string(arg1)))
9451             return -TARGET_EFAULT;
9452         ret = get_errno(mknod(p, arg2, arg3));
9453         unlock_user(p, arg1, 0);
9454         return ret;
9455 #endif
9456 #if defined(TARGET_NR_mknodat)
9457     case TARGET_NR_mknodat:
9458         if (!(p = lock_user_string(arg2)))
9459             return -TARGET_EFAULT;
9460         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9461         unlock_user(p, arg2, 0);
9462         return ret;
9463 #endif
9464 #ifdef TARGET_NR_chmod
9465     case TARGET_NR_chmod:
9466         if (!(p = lock_user_string(arg1)))
9467             return -TARGET_EFAULT;
9468         ret = get_errno(chmod(p, arg2));
9469         unlock_user(p, arg1, 0);
9470         return ret;
9471 #endif
9472 #ifdef TARGET_NR_lseek
9473     case TARGET_NR_lseek:
9474         return get_errno(lseek(arg1, arg2, arg3));
9475 #endif
9476 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9477     /* Alpha specific */
9478     case TARGET_NR_getxpid:
9479         cpu_env->ir[IR_A4] = getppid();
9480         return get_errno(getpid());
9481 #endif
9482 #ifdef TARGET_NR_getpid
9483     case TARGET_NR_getpid:
9484         return get_errno(getpid());
9485 #endif
9486     case TARGET_NR_mount:
9487         {
9488             /* need to look at the data field */
9489             void *p2, *p3;
9490 
9491             if (arg1) {
9492                 p = lock_user_string(arg1);
9493                 if (!p) {
9494                     return -TARGET_EFAULT;
9495                 }
9496             } else {
9497                 p = NULL;
9498             }
9499 
9500             p2 = lock_user_string(arg2);
9501             if (!p2) {
9502                 if (arg1) {
9503                     unlock_user(p, arg1, 0);
9504                 }
9505                 return -TARGET_EFAULT;
9506             }
9507 
9508             if (arg3) {
9509                 p3 = lock_user_string(arg3);
9510                 if (!p3) {
9511                     if (arg1) {
9512                         unlock_user(p, arg1, 0);
9513                     }
9514                     unlock_user(p2, arg2, 0);
9515                     return -TARGET_EFAULT;
9516                 }
9517             } else {
9518                 p3 = NULL;
9519             }
9520 
9521             /* FIXME - arg5 should be locked, but it isn't clear how to
9522              * do that since it's not guaranteed to be a NULL-terminated
9523              * string.
9524              */
9525             if (!arg5) {
9526                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9527             } else {
9528                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9529             }
9530             ret = get_errno(ret);
9531 
9532             if (arg1) {
9533                 unlock_user(p, arg1, 0);
9534             }
9535             unlock_user(p2, arg2, 0);
9536             if (arg3) {
9537                 unlock_user(p3, arg3, 0);
9538             }
9539         }
9540         return ret;
9541 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9542 #if defined(TARGET_NR_umount)
9543     case TARGET_NR_umount:
9544 #endif
9545 #if defined(TARGET_NR_oldumount)
9546     case TARGET_NR_oldumount:
9547 #endif
9548         if (!(p = lock_user_string(arg1)))
9549             return -TARGET_EFAULT;
9550         ret = get_errno(umount(p));
9551         unlock_user(p, arg1, 0);
9552         return ret;
9553 #endif
9554 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9555     case TARGET_NR_move_mount:
9556         {
9557             void *p2, *p4;
9558 
9559             if (!arg2 || !arg4) {
9560                 return -TARGET_EFAULT;
9561             }
9562 
9563             p2 = lock_user_string(arg2);
9564             if (!p2) {
9565                 return -TARGET_EFAULT;
9566             }
9567 
9568             p4 = lock_user_string(arg4);
9569             if (!p4) {
9570                 unlock_user(p2, arg2, 0);
9571                 return -TARGET_EFAULT;
9572             }
9573             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9574 
9575             unlock_user(p2, arg2, 0);
9576             unlock_user(p4, arg4, 0);
9577 
9578             return ret;
9579         }
9580 #endif
9581 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9582     case TARGET_NR_open_tree:
9583         {
9584             void *p2;
9585             int host_flags;
9586 
9587             if (!arg2) {
9588                 return -TARGET_EFAULT;
9589             }
9590 
9591             p2 = lock_user_string(arg2);
9592             if (!p2) {
9593                 return -TARGET_EFAULT;
9594             }
9595 
9596             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9597             if (arg3 & TARGET_O_CLOEXEC) {
9598                 host_flags |= O_CLOEXEC;
9599             }
9600 
9601             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9602 
9603             unlock_user(p2, arg2, 0);
9604 
9605             return ret;
9606         }
9607 #endif
9608 #ifdef TARGET_NR_stime /* not on alpha */
9609     case TARGET_NR_stime:
9610         {
9611             struct timespec ts;
9612             ts.tv_nsec = 0;
9613             if (get_user_sal(ts.tv_sec, arg1)) {
9614                 return -TARGET_EFAULT;
9615             }
9616             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9617         }
9618 #endif
9619 #ifdef TARGET_NR_alarm /* not on alpha */
9620     case TARGET_NR_alarm:
9621         return alarm(arg1);
9622 #endif
9623 #ifdef TARGET_NR_pause /* not on alpha */
9624     case TARGET_NR_pause:
9625         if (!block_signals()) {
9626             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9627         }
9628         return -TARGET_EINTR;
9629 #endif
9630 #ifdef TARGET_NR_utime
9631     case TARGET_NR_utime:
9632         {
9633             struct utimbuf tbuf, *host_tbuf;
9634             struct target_utimbuf *target_tbuf;
9635             if (arg2) {
9636                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9637                     return -TARGET_EFAULT;
9638                 tbuf.actime = tswapal(target_tbuf->actime);
9639                 tbuf.modtime = tswapal(target_tbuf->modtime);
9640                 unlock_user_struct(target_tbuf, arg2, 0);
9641                 host_tbuf = &tbuf;
9642             } else {
9643                 host_tbuf = NULL;
9644             }
9645             if (!(p = lock_user_string(arg1)))
9646                 return -TARGET_EFAULT;
9647             ret = get_errno(utime(p, host_tbuf));
9648             unlock_user(p, arg1, 0);
9649         }
9650         return ret;
9651 #endif
9652 #ifdef TARGET_NR_utimes
9653     case TARGET_NR_utimes:
9654         {
9655             struct timeval *tvp, tv[2];
9656             if (arg2) {
9657                 if (copy_from_user_timeval(&tv[0], arg2)
9658                     || copy_from_user_timeval(&tv[1],
9659                                               arg2 + sizeof(struct target_timeval)))
9660                     return -TARGET_EFAULT;
9661                 tvp = tv;
9662             } else {
9663                 tvp = NULL;
9664             }
9665             if (!(p = lock_user_string(arg1)))
9666                 return -TARGET_EFAULT;
9667             ret = get_errno(utimes(p, tvp));
9668             unlock_user(p, arg1, 0);
9669         }
9670         return ret;
9671 #endif
9672 #if defined(TARGET_NR_futimesat)
9673     case TARGET_NR_futimesat:
9674         {
9675             struct timeval *tvp, tv[2];
9676             if (arg3) {
9677                 if (copy_from_user_timeval(&tv[0], arg3)
9678                     || copy_from_user_timeval(&tv[1],
9679                                               arg3 + sizeof(struct target_timeval)))
9680                     return -TARGET_EFAULT;
9681                 tvp = tv;
9682             } else {
9683                 tvp = NULL;
9684             }
9685             if (!(p = lock_user_string(arg2))) {
9686                 return -TARGET_EFAULT;
9687             }
9688             ret = get_errno(futimesat(arg1, path(p), tvp));
9689             unlock_user(p, arg2, 0);
9690         }
9691         return ret;
9692 #endif
9693 #ifdef TARGET_NR_access
9694     case TARGET_NR_access:
9695         if (!(p = lock_user_string(arg1))) {
9696             return -TARGET_EFAULT;
9697         }
9698         ret = get_errno(access(path(p), arg2));
9699         unlock_user(p, arg1, 0);
9700         return ret;
9701 #endif
9702 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9703     case TARGET_NR_faccessat:
9704         if (!(p = lock_user_string(arg2))) {
9705             return -TARGET_EFAULT;
9706         }
9707         ret = get_errno(faccessat(arg1, p, arg3, 0));
9708         unlock_user(p, arg2, 0);
9709         return ret;
9710 #endif
9711 #if defined(TARGET_NR_faccessat2)
9712     case TARGET_NR_faccessat2:
9713         if (!(p = lock_user_string(arg2))) {
9714             return -TARGET_EFAULT;
9715         }
9716         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9717         unlock_user(p, arg2, 0);
9718         return ret;
9719 #endif
9720 #ifdef TARGET_NR_nice /* not on alpha */
9721     case TARGET_NR_nice:
9722         return get_errno(nice(arg1));
9723 #endif
9724     case TARGET_NR_sync:
9725         sync();
9726         return 0;
9727 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9728     case TARGET_NR_syncfs:
9729         return get_errno(syncfs(arg1));
9730 #endif
9731     case TARGET_NR_kill:
9732         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9733 #ifdef TARGET_NR_rename
9734     case TARGET_NR_rename:
9735         {
9736             void *p2;
9737             p = lock_user_string(arg1);
9738             p2 = lock_user_string(arg2);
9739             if (!p || !p2)
9740                 ret = -TARGET_EFAULT;
9741             else
9742                 ret = get_errno(rename(p, p2));
9743             unlock_user(p2, arg2, 0);
9744             unlock_user(p, arg1, 0);
9745         }
9746         return ret;
9747 #endif
9748 #if defined(TARGET_NR_renameat)
9749     case TARGET_NR_renameat:
9750         {
9751             void *p2;
9752             p  = lock_user_string(arg2);
9753             p2 = lock_user_string(arg4);
9754             if (!p || !p2)
9755                 ret = -TARGET_EFAULT;
9756             else
9757                 ret = get_errno(renameat(arg1, p, arg3, p2));
9758             unlock_user(p2, arg4, 0);
9759             unlock_user(p, arg2, 0);
9760         }
9761         return ret;
9762 #endif
9763 #if defined(TARGET_NR_renameat2)
9764     case TARGET_NR_renameat2:
9765         {
9766             void *p2;
9767             p  = lock_user_string(arg2);
9768             p2 = lock_user_string(arg4);
9769             if (!p || !p2) {
9770                 ret = -TARGET_EFAULT;
9771             } else {
9772                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9773             }
9774             unlock_user(p2, arg4, 0);
9775             unlock_user(p, arg2, 0);
9776         }
9777         return ret;
9778 #endif
9779 #ifdef TARGET_NR_mkdir
9780     case TARGET_NR_mkdir:
9781         if (!(p = lock_user_string(arg1)))
9782             return -TARGET_EFAULT;
9783         ret = get_errno(mkdir(p, arg2));
9784         unlock_user(p, arg1, 0);
9785         return ret;
9786 #endif
9787 #if defined(TARGET_NR_mkdirat)
9788     case TARGET_NR_mkdirat:
9789         if (!(p = lock_user_string(arg2)))
9790             return -TARGET_EFAULT;
9791         ret = get_errno(mkdirat(arg1, p, arg3));
9792         unlock_user(p, arg2, 0);
9793         return ret;
9794 #endif
9795 #ifdef TARGET_NR_rmdir
9796     case TARGET_NR_rmdir:
9797         if (!(p = lock_user_string(arg1)))
9798             return -TARGET_EFAULT;
9799         ret = get_errno(rmdir(p));
9800         unlock_user(p, arg1, 0);
9801         return ret;
9802 #endif
9803     case TARGET_NR_dup:
9804         ret = get_errno(dup(arg1));
9805         if (ret >= 0) {
9806             fd_trans_dup(arg1, ret);
9807         }
9808         return ret;
9809 #ifdef TARGET_NR_pipe
9810     case TARGET_NR_pipe:
9811         return do_pipe(cpu_env, arg1, 0, 0);
9812 #endif
9813 #ifdef TARGET_NR_pipe2
9814     case TARGET_NR_pipe2:
9815         return do_pipe(cpu_env, arg1,
9816                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9817 #endif
9818     case TARGET_NR_times:
9819         {
9820             struct target_tms *tmsp;
9821             struct tms tms;
9822             ret = get_errno(times(&tms));
9823             if (arg1) {
9824                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9825                 if (!tmsp)
9826                     return -TARGET_EFAULT;
9827                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9828                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9829                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9830                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9831             }
9832             if (!is_error(ret))
9833                 ret = host_to_target_clock_t(ret);
9834         }
9835         return ret;
9836     case TARGET_NR_acct:
9837         if (arg1 == 0) {
9838             ret = get_errno(acct(NULL));
9839         } else {
9840             if (!(p = lock_user_string(arg1))) {
9841                 return -TARGET_EFAULT;
9842             }
9843             ret = get_errno(acct(path(p)));
9844             unlock_user(p, arg1, 0);
9845         }
9846         return ret;
9847 #ifdef TARGET_NR_umount2
9848     case TARGET_NR_umount2:
9849         if (!(p = lock_user_string(arg1)))
9850             return -TARGET_EFAULT;
9851         ret = get_errno(umount2(p, arg2));
9852         unlock_user(p, arg1, 0);
9853         return ret;
9854 #endif
9855     case TARGET_NR_ioctl:
9856         return do_ioctl(arg1, arg2, arg3);
9857 #ifdef TARGET_NR_fcntl
9858     case TARGET_NR_fcntl:
9859         return do_fcntl(arg1, arg2, arg3);
9860 #endif
9861     case TARGET_NR_setpgid:
9862         return get_errno(setpgid(arg1, arg2));
9863     case TARGET_NR_umask:
9864         return get_errno(umask(arg1));
9865     case TARGET_NR_chroot:
9866         if (!(p = lock_user_string(arg1)))
9867             return -TARGET_EFAULT;
9868         ret = get_errno(chroot(p));
9869         unlock_user(p, arg1, 0);
9870         return ret;
9871 #ifdef TARGET_NR_dup2
9872     case TARGET_NR_dup2:
9873         ret = get_errno(dup2(arg1, arg2));
9874         if (ret >= 0) {
9875             fd_trans_dup(arg1, arg2);
9876         }
9877         return ret;
9878 #endif
9879 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9880     case TARGET_NR_dup3:
9881     {
9882         int host_flags;
9883 
9884         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9885             return -EINVAL;
9886         }
9887         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9888         ret = get_errno(dup3(arg1, arg2, host_flags));
9889         if (ret >= 0) {
9890             fd_trans_dup(arg1, arg2);
9891         }
9892         return ret;
9893     }
9894 #endif
9895 #ifdef TARGET_NR_getppid /* not on alpha */
9896     case TARGET_NR_getppid:
9897         return get_errno(getppid());
9898 #endif
9899 #ifdef TARGET_NR_getpgrp
9900     case TARGET_NR_getpgrp:
9901         return get_errno(getpgrp());
9902 #endif
9903     case TARGET_NR_setsid:
9904         return get_errno(setsid());
9905 #ifdef TARGET_NR_sigaction
9906     case TARGET_NR_sigaction:
9907         {
9908 #if defined(TARGET_MIPS)
9909 	    struct target_sigaction act, oact, *pact, *old_act;
9910 
9911 	    if (arg2) {
9912                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9913                     return -TARGET_EFAULT;
9914 		act._sa_handler = old_act->_sa_handler;
9915 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9916 		act.sa_flags = old_act->sa_flags;
9917 		unlock_user_struct(old_act, arg2, 0);
9918 		pact = &act;
9919 	    } else {
9920 		pact = NULL;
9921 	    }
9922 
9923         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9924 
9925 	    if (!is_error(ret) && arg3) {
9926                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9927                     return -TARGET_EFAULT;
9928 		old_act->_sa_handler = oact._sa_handler;
9929 		old_act->sa_flags = oact.sa_flags;
9930 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9931 		old_act->sa_mask.sig[1] = 0;
9932 		old_act->sa_mask.sig[2] = 0;
9933 		old_act->sa_mask.sig[3] = 0;
9934 		unlock_user_struct(old_act, arg3, 1);
9935 	    }
9936 #else
9937             struct target_old_sigaction *old_act;
9938             struct target_sigaction act, oact, *pact;
9939             if (arg2) {
9940                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9941                     return -TARGET_EFAULT;
9942                 act._sa_handler = old_act->_sa_handler;
9943                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9944                 act.sa_flags = old_act->sa_flags;
9945 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9946                 act.sa_restorer = old_act->sa_restorer;
9947 #endif
9948                 unlock_user_struct(old_act, arg2, 0);
9949                 pact = &act;
9950             } else {
9951                 pact = NULL;
9952             }
9953             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9954             if (!is_error(ret) && arg3) {
9955                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9956                     return -TARGET_EFAULT;
9957                 old_act->_sa_handler = oact._sa_handler;
9958                 old_act->sa_mask = oact.sa_mask.sig[0];
9959                 old_act->sa_flags = oact.sa_flags;
9960 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9961                 old_act->sa_restorer = oact.sa_restorer;
9962 #endif
9963                 unlock_user_struct(old_act, arg3, 1);
9964             }
9965 #endif
9966         }
9967         return ret;
9968 #endif
9969     case TARGET_NR_rt_sigaction:
9970         {
9971             /*
9972              * For Alpha and SPARC this is a 5 argument syscall, with
9973              * a 'restorer' parameter which must be copied into the
9974              * sa_restorer field of the sigaction struct.
9975              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9976              * and arg5 is the sigsetsize.
9977              */
9978 #if defined(TARGET_ALPHA)
9979             target_ulong sigsetsize = arg4;
9980             target_ulong restorer = arg5;
9981 #elif defined(TARGET_SPARC)
9982             target_ulong restorer = arg4;
9983             target_ulong sigsetsize = arg5;
9984 #else
9985             target_ulong sigsetsize = arg4;
9986             target_ulong restorer = 0;
9987 #endif
9988             struct target_sigaction *act = NULL;
9989             struct target_sigaction *oact = NULL;
9990 
9991             if (sigsetsize != sizeof(target_sigset_t)) {
9992                 return -TARGET_EINVAL;
9993             }
9994             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9995                 return -TARGET_EFAULT;
9996             }
9997             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9998                 ret = -TARGET_EFAULT;
9999             } else {
10000                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10001                 if (oact) {
10002                     unlock_user_struct(oact, arg3, 1);
10003                 }
10004             }
10005             if (act) {
10006                 unlock_user_struct(act, arg2, 0);
10007             }
10008         }
10009         return ret;
10010 #ifdef TARGET_NR_sgetmask /* not on alpha */
10011     case TARGET_NR_sgetmask:
10012         {
10013             sigset_t cur_set;
10014             abi_ulong target_set;
10015             ret = do_sigprocmask(0, NULL, &cur_set);
10016             if (!ret) {
10017                 host_to_target_old_sigset(&target_set, &cur_set);
10018                 ret = target_set;
10019             }
10020         }
10021         return ret;
10022 #endif
10023 #ifdef TARGET_NR_ssetmask /* not on alpha */
10024     case TARGET_NR_ssetmask:
10025         {
10026             sigset_t set, oset;
10027             abi_ulong target_set = arg1;
10028             target_to_host_old_sigset(&set, &target_set);
10029             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10030             if (!ret) {
10031                 host_to_target_old_sigset(&target_set, &oset);
10032                 ret = target_set;
10033             }
10034         }
10035         return ret;
10036 #endif
10037 #ifdef TARGET_NR_sigprocmask
10038     case TARGET_NR_sigprocmask:
10039         {
10040 #if defined(TARGET_ALPHA)
10041             sigset_t set, oldset;
10042             abi_ulong mask;
10043             int how;
10044 
10045             switch (arg1) {
10046             case TARGET_SIG_BLOCK:
10047                 how = SIG_BLOCK;
10048                 break;
10049             case TARGET_SIG_UNBLOCK:
10050                 how = SIG_UNBLOCK;
10051                 break;
10052             case TARGET_SIG_SETMASK:
10053                 how = SIG_SETMASK;
10054                 break;
10055             default:
10056                 return -TARGET_EINVAL;
10057             }
10058             mask = arg2;
10059             target_to_host_old_sigset(&set, &mask);
10060 
10061             ret = do_sigprocmask(how, &set, &oldset);
10062             if (!is_error(ret)) {
10063                 host_to_target_old_sigset(&mask, &oldset);
10064                 ret = mask;
10065                 cpu_env->ir[IR_V0] = 0; /* force no error */
10066             }
10067 #else
10068             sigset_t set, oldset, *set_ptr;
10069             int how;
10070 
10071             if (arg2) {
10072                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10073                 if (!p) {
10074                     return -TARGET_EFAULT;
10075                 }
10076                 target_to_host_old_sigset(&set, p);
10077                 unlock_user(p, arg2, 0);
10078                 set_ptr = &set;
10079                 switch (arg1) {
10080                 case TARGET_SIG_BLOCK:
10081                     how = SIG_BLOCK;
10082                     break;
10083                 case TARGET_SIG_UNBLOCK:
10084                     how = SIG_UNBLOCK;
10085                     break;
10086                 case TARGET_SIG_SETMASK:
10087                     how = SIG_SETMASK;
10088                     break;
10089                 default:
10090                     return -TARGET_EINVAL;
10091                 }
10092             } else {
10093                 how = 0;
10094                 set_ptr = NULL;
10095             }
10096             ret = do_sigprocmask(how, set_ptr, &oldset);
10097             if (!is_error(ret) && arg3) {
10098                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10099                     return -TARGET_EFAULT;
10100                 host_to_target_old_sigset(p, &oldset);
10101                 unlock_user(p, arg3, sizeof(target_sigset_t));
10102             }
10103 #endif
10104         }
10105         return ret;
10106 #endif
10107     case TARGET_NR_rt_sigprocmask:
10108         {
10109             int how = arg1;
10110             sigset_t set, oldset, *set_ptr;
10111 
10112             if (arg4 != sizeof(target_sigset_t)) {
10113                 return -TARGET_EINVAL;
10114             }
10115 
10116             if (arg2) {
10117                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10118                 if (!p) {
10119                     return -TARGET_EFAULT;
10120                 }
10121                 target_to_host_sigset(&set, p);
10122                 unlock_user(p, arg2, 0);
10123                 set_ptr = &set;
10124                 switch(how) {
10125                 case TARGET_SIG_BLOCK:
10126                     how = SIG_BLOCK;
10127                     break;
10128                 case TARGET_SIG_UNBLOCK:
10129                     how = SIG_UNBLOCK;
10130                     break;
10131                 case TARGET_SIG_SETMASK:
10132                     how = SIG_SETMASK;
10133                     break;
10134                 default:
10135                     return -TARGET_EINVAL;
10136                 }
10137             } else {
10138                 how = 0;
10139                 set_ptr = NULL;
10140             }
10141             ret = do_sigprocmask(how, set_ptr, &oldset);
10142             if (!is_error(ret) && arg3) {
10143                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10144                     return -TARGET_EFAULT;
10145                 host_to_target_sigset(p, &oldset);
10146                 unlock_user(p, arg3, sizeof(target_sigset_t));
10147             }
10148         }
10149         return ret;
10150 #ifdef TARGET_NR_sigpending
10151     case TARGET_NR_sigpending:
10152         {
10153             sigset_t set;
10154             ret = get_errno(sigpending(&set));
10155             if (!is_error(ret)) {
10156                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10157                     return -TARGET_EFAULT;
10158                 host_to_target_old_sigset(p, &set);
10159                 unlock_user(p, arg1, sizeof(target_sigset_t));
10160             }
10161         }
10162         return ret;
10163 #endif
10164     case TARGET_NR_rt_sigpending:
10165         {
10166             sigset_t set;
10167 
10168             /* Yes, this check is >, not != like most. We follow the kernel's
10169              * logic and it does it like this because it implements
10170              * NR_sigpending through the same code path, and in that case
10171              * the old_sigset_t is smaller in size.
10172              */
10173             if (arg2 > sizeof(target_sigset_t)) {
10174                 return -TARGET_EINVAL;
10175             }
10176 
10177             ret = get_errno(sigpending(&set));
10178             if (!is_error(ret)) {
10179                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10180                     return -TARGET_EFAULT;
10181                 host_to_target_sigset(p, &set);
10182                 unlock_user(p, arg1, sizeof(target_sigset_t));
10183             }
10184         }
10185         return ret;
10186 #ifdef TARGET_NR_sigsuspend
10187     case TARGET_NR_sigsuspend:
10188         {
10189             sigset_t *set;
10190 
10191 #if defined(TARGET_ALPHA)
10192             TaskState *ts = cpu->opaque;
10193             /* target_to_host_old_sigset will bswap back */
10194             abi_ulong mask = tswapal(arg1);
10195             set = &ts->sigsuspend_mask;
10196             target_to_host_old_sigset(set, &mask);
10197 #else
10198             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10199             if (ret != 0) {
10200                 return ret;
10201             }
10202 #endif
10203             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10204             finish_sigsuspend_mask(ret);
10205         }
10206         return ret;
10207 #endif
10208     case TARGET_NR_rt_sigsuspend:
10209         {
10210             sigset_t *set;
10211 
10212             ret = process_sigsuspend_mask(&set, arg1, arg2);
10213             if (ret != 0) {
10214                 return ret;
10215             }
10216             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10217             finish_sigsuspend_mask(ret);
10218         }
10219         return ret;
10220 #ifdef TARGET_NR_rt_sigtimedwait
10221     case TARGET_NR_rt_sigtimedwait:
10222         {
10223             sigset_t set;
10224             struct timespec uts, *puts;
10225             siginfo_t uinfo;
10226 
10227             if (arg4 != sizeof(target_sigset_t)) {
10228                 return -TARGET_EINVAL;
10229             }
10230 
10231             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10232                 return -TARGET_EFAULT;
10233             target_to_host_sigset(&set, p);
10234             unlock_user(p, arg1, 0);
10235             if (arg3) {
10236                 puts = &uts;
10237                 if (target_to_host_timespec(puts, arg3)) {
10238                     return -TARGET_EFAULT;
10239                 }
10240             } else {
10241                 puts = NULL;
10242             }
10243             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10244                                                  SIGSET_T_SIZE));
10245             if (!is_error(ret)) {
10246                 if (arg2) {
10247                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10248                                   0);
10249                     if (!p) {
10250                         return -TARGET_EFAULT;
10251                     }
10252                     host_to_target_siginfo(p, &uinfo);
10253                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10254                 }
10255                 ret = host_to_target_signal(ret);
10256             }
10257         }
10258         return ret;
10259 #endif
10260 #ifdef TARGET_NR_rt_sigtimedwait_time64
10261     case TARGET_NR_rt_sigtimedwait_time64:
10262         {
10263             sigset_t set;
10264             struct timespec uts, *puts;
10265             siginfo_t uinfo;
10266 
10267             if (arg4 != sizeof(target_sigset_t)) {
10268                 return -TARGET_EINVAL;
10269             }
10270 
10271             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10272             if (!p) {
10273                 return -TARGET_EFAULT;
10274             }
10275             target_to_host_sigset(&set, p);
10276             unlock_user(p, arg1, 0);
10277             if (arg3) {
10278                 puts = &uts;
10279                 if (target_to_host_timespec64(puts, arg3)) {
10280                     return -TARGET_EFAULT;
10281                 }
10282             } else {
10283                 puts = NULL;
10284             }
10285             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10286                                                  SIGSET_T_SIZE));
10287             if (!is_error(ret)) {
10288                 if (arg2) {
10289                     p = lock_user(VERIFY_WRITE, arg2,
10290                                   sizeof(target_siginfo_t), 0);
10291                     if (!p) {
10292                         return -TARGET_EFAULT;
10293                     }
10294                     host_to_target_siginfo(p, &uinfo);
10295                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10296                 }
10297                 ret = host_to_target_signal(ret);
10298             }
10299         }
10300         return ret;
10301 #endif
10302     case TARGET_NR_rt_sigqueueinfo:
10303         {
10304             siginfo_t uinfo;
10305 
10306             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10307             if (!p) {
10308                 return -TARGET_EFAULT;
10309             }
10310             target_to_host_siginfo(&uinfo, p);
10311             unlock_user(p, arg3, 0);
10312             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10313         }
10314         return ret;
10315     case TARGET_NR_rt_tgsigqueueinfo:
10316         {
10317             siginfo_t uinfo;
10318 
10319             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10320             if (!p) {
10321                 return -TARGET_EFAULT;
10322             }
10323             target_to_host_siginfo(&uinfo, p);
10324             unlock_user(p, arg4, 0);
10325             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10326         }
10327         return ret;
10328 #ifdef TARGET_NR_sigreturn
10329     case TARGET_NR_sigreturn:
10330         if (block_signals()) {
10331             return -QEMU_ERESTARTSYS;
10332         }
10333         return do_sigreturn(cpu_env);
10334 #endif
10335     case TARGET_NR_rt_sigreturn:
10336         if (block_signals()) {
10337             return -QEMU_ERESTARTSYS;
10338         }
10339         return do_rt_sigreturn(cpu_env);
10340     case TARGET_NR_sethostname:
10341         if (!(p = lock_user_string(arg1)))
10342             return -TARGET_EFAULT;
10343         ret = get_errno(sethostname(p, arg2));
10344         unlock_user(p, arg1, 0);
10345         return ret;
10346 #ifdef TARGET_NR_setrlimit
10347     case TARGET_NR_setrlimit:
10348         {
10349             int resource = target_to_host_resource(arg1);
10350             struct target_rlimit *target_rlim;
10351             struct rlimit rlim;
10352             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10353                 return -TARGET_EFAULT;
10354             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10355             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10356             unlock_user_struct(target_rlim, arg2, 0);
10357             /*
10358              * If we just passed through resource limit settings for memory then
10359              * they would also apply to QEMU's own allocations, and QEMU will
10360              * crash or hang or die if its allocations fail. Ideally we would
10361              * track the guest allocations in QEMU and apply the limits ourselves.
10362              * For now, just tell the guest the call succeeded but don't actually
10363              * limit anything.
10364              */
10365             if (resource != RLIMIT_AS &&
10366                 resource != RLIMIT_DATA &&
10367                 resource != RLIMIT_STACK) {
10368                 return get_errno(setrlimit(resource, &rlim));
10369             } else {
10370                 return 0;
10371             }
10372         }
10373 #endif
10374 #ifdef TARGET_NR_getrlimit
10375     case TARGET_NR_getrlimit:
10376         {
10377             int resource = target_to_host_resource(arg1);
10378             struct target_rlimit *target_rlim;
10379             struct rlimit rlim;
10380 
10381             ret = get_errno(getrlimit(resource, &rlim));
10382             if (!is_error(ret)) {
10383                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10384                     return -TARGET_EFAULT;
10385                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10386                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10387                 unlock_user_struct(target_rlim, arg2, 1);
10388             }
10389         }
10390         return ret;
10391 #endif
10392     case TARGET_NR_getrusage:
10393         {
10394             struct rusage rusage;
10395             ret = get_errno(getrusage(arg1, &rusage));
10396             if (!is_error(ret)) {
10397                 ret = host_to_target_rusage(arg2, &rusage);
10398             }
10399         }
10400         return ret;
10401 #if defined(TARGET_NR_gettimeofday)
10402     case TARGET_NR_gettimeofday:
10403         {
10404             struct timeval tv;
10405             struct timezone tz;
10406 
10407             ret = get_errno(gettimeofday(&tv, &tz));
10408             if (!is_error(ret)) {
10409                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10410                     return -TARGET_EFAULT;
10411                 }
10412                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10413                     return -TARGET_EFAULT;
10414                 }
10415             }
10416         }
10417         return ret;
10418 #endif
10419 #if defined(TARGET_NR_settimeofday)
10420     case TARGET_NR_settimeofday:
10421         {
10422             struct timeval tv, *ptv = NULL;
10423             struct timezone tz, *ptz = NULL;
10424 
10425             if (arg1) {
10426                 if (copy_from_user_timeval(&tv, arg1)) {
10427                     return -TARGET_EFAULT;
10428                 }
10429                 ptv = &tv;
10430             }
10431 
10432             if (arg2) {
10433                 if (copy_from_user_timezone(&tz, arg2)) {
10434                     return -TARGET_EFAULT;
10435                 }
10436                 ptz = &tz;
10437             }
10438 
10439             return get_errno(settimeofday(ptv, ptz));
10440         }
10441 #endif
10442 #if defined(TARGET_NR_select)
10443     case TARGET_NR_select:
10444 #if defined(TARGET_WANT_NI_OLD_SELECT)
10445         /* some architectures used to have old_select here
10446          * but now ENOSYS it.
10447          */
10448         ret = -TARGET_ENOSYS;
10449 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10450         ret = do_old_select(arg1);
10451 #else
10452         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10453 #endif
10454         return ret;
10455 #endif
10456 #ifdef TARGET_NR_pselect6
10457     case TARGET_NR_pselect6:
10458         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10459 #endif
10460 #ifdef TARGET_NR_pselect6_time64
10461     case TARGET_NR_pselect6_time64:
10462         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10463 #endif
10464 #ifdef TARGET_NR_symlink
10465     case TARGET_NR_symlink:
10466         {
10467             void *p2;
10468             p = lock_user_string(arg1);
10469             p2 = lock_user_string(arg2);
10470             if (!p || !p2)
10471                 ret = -TARGET_EFAULT;
10472             else
10473                 ret = get_errno(symlink(p, p2));
10474             unlock_user(p2, arg2, 0);
10475             unlock_user(p, arg1, 0);
10476         }
10477         return ret;
10478 #endif
10479 #if defined(TARGET_NR_symlinkat)
10480     case TARGET_NR_symlinkat:
10481         {
10482             void *p2;
10483             p  = lock_user_string(arg1);
10484             p2 = lock_user_string(arg3);
10485             if (!p || !p2)
10486                 ret = -TARGET_EFAULT;
10487             else
10488                 ret = get_errno(symlinkat(p, arg2, p2));
10489             unlock_user(p2, arg3, 0);
10490             unlock_user(p, arg1, 0);
10491         }
10492         return ret;
10493 #endif
10494 #ifdef TARGET_NR_readlink
10495     case TARGET_NR_readlink:
10496         {
10497             void *p2;
10498             p = lock_user_string(arg1);
10499             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10500             ret = get_errno(do_guest_readlink(p, p2, arg3));
10501             unlock_user(p2, arg2, ret);
10502             unlock_user(p, arg1, 0);
10503         }
10504         return ret;
10505 #endif
10506 #if defined(TARGET_NR_readlinkat)
10507     case TARGET_NR_readlinkat:
10508         {
10509             void *p2;
10510             p  = lock_user_string(arg2);
10511             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10512             if (!p || !p2) {
10513                 ret = -TARGET_EFAULT;
10514             } else if (!arg4) {
10515                 /* Short circuit this for the magic exe check. */
10516                 ret = -TARGET_EINVAL;
10517             } else if (is_proc_myself((const char *)p, "exe")) {
10518                 /*
10519                  * Don't worry about sign mismatch as earlier mapping
10520                  * logic would have thrown a bad address error.
10521                  */
10522                 ret = MIN(strlen(exec_path), arg4);
10523                 /* We cannot NUL terminate the string. */
10524                 memcpy(p2, exec_path, ret);
10525             } else {
10526                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10527             }
10528             unlock_user(p2, arg3, ret);
10529             unlock_user(p, arg2, 0);
10530         }
10531         return ret;
10532 #endif
10533 #ifdef TARGET_NR_swapon
10534     case TARGET_NR_swapon:
10535         if (!(p = lock_user_string(arg1)))
10536             return -TARGET_EFAULT;
10537         ret = get_errno(swapon(p, arg2));
10538         unlock_user(p, arg1, 0);
10539         return ret;
10540 #endif
10541     case TARGET_NR_reboot:
10542         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10543            /* arg4 must be ignored in all other cases */
10544            p = lock_user_string(arg4);
10545            if (!p) {
10546                return -TARGET_EFAULT;
10547            }
10548            ret = get_errno(reboot(arg1, arg2, arg3, p));
10549            unlock_user(p, arg4, 0);
10550         } else {
10551            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10552         }
10553         return ret;
10554 #ifdef TARGET_NR_mmap
10555     case TARGET_NR_mmap:
10556 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10557     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10558     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10559     || defined(TARGET_S390X)
10560         {
10561             abi_ulong *v;
10562             abi_ulong v1, v2, v3, v4, v5, v6;
10563             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10564                 return -TARGET_EFAULT;
10565             v1 = tswapal(v[0]);
10566             v2 = tswapal(v[1]);
10567             v3 = tswapal(v[2]);
10568             v4 = tswapal(v[3]);
10569             v5 = tswapal(v[4]);
10570             v6 = tswapal(v[5]);
10571             unlock_user(v, arg1, 0);
10572             ret = get_errno(target_mmap(v1, v2, v3,
10573                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10574                                         v5, v6));
10575         }
10576 #else
10577         /* mmap pointers are always untagged */
10578         ret = get_errno(target_mmap(arg1, arg2, arg3,
10579                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10580                                     arg5,
10581                                     arg6));
10582 #endif
10583         return ret;
10584 #endif
10585 #ifdef TARGET_NR_mmap2
10586     case TARGET_NR_mmap2:
10587 #ifndef MMAP_SHIFT
10588 #define MMAP_SHIFT 12
10589 #endif
10590         ret = target_mmap(arg1, arg2, arg3,
10591                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10592                           arg5, (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10593         return get_errno(ret);
10594 #endif
10595     case TARGET_NR_munmap:
10596         arg1 = cpu_untagged_addr(cpu, arg1);
10597         return get_errno(target_munmap(arg1, arg2));
10598     case TARGET_NR_mprotect:
10599         arg1 = cpu_untagged_addr(cpu, arg1);
10600         {
10601             TaskState *ts = cpu->opaque;
10602             /* Special hack to detect libc making the stack executable.  */
10603             if ((arg3 & PROT_GROWSDOWN)
10604                 && arg1 >= ts->info->stack_limit
10605                 && arg1 <= ts->info->start_stack) {
10606                 arg3 &= ~PROT_GROWSDOWN;
10607                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10608                 arg1 = ts->info->stack_limit;
10609             }
10610         }
10611         return get_errno(target_mprotect(arg1, arg2, arg3));
10612 #ifdef TARGET_NR_mremap
10613     case TARGET_NR_mremap:
10614         arg1 = cpu_untagged_addr(cpu, arg1);
10615         /* mremap new_addr (arg5) is always untagged */
10616         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10617 #endif
10618         /* ??? msync/mlock/munlock are broken for softmmu.  */
10619 #ifdef TARGET_NR_msync
10620     case TARGET_NR_msync:
10621         return get_errno(msync(g2h(cpu, arg1), arg2,
10622                                target_to_host_msync_arg(arg3)));
10623 #endif
10624 #ifdef TARGET_NR_mlock
10625     case TARGET_NR_mlock:
10626         return get_errno(mlock(g2h(cpu, arg1), arg2));
10627 #endif
10628 #ifdef TARGET_NR_munlock
10629     case TARGET_NR_munlock:
10630         return get_errno(munlock(g2h(cpu, arg1), arg2));
10631 #endif
10632 #ifdef TARGET_NR_mlockall
10633     case TARGET_NR_mlockall:
10634         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10635 #endif
10636 #ifdef TARGET_NR_munlockall
10637     case TARGET_NR_munlockall:
10638         return get_errno(munlockall());
10639 #endif
10640 #ifdef TARGET_NR_truncate
10641     case TARGET_NR_truncate:
10642         if (!(p = lock_user_string(arg1)))
10643             return -TARGET_EFAULT;
10644         ret = get_errno(truncate(p, arg2));
10645         unlock_user(p, arg1, 0);
10646         return ret;
10647 #endif
10648 #ifdef TARGET_NR_ftruncate
10649     case TARGET_NR_ftruncate:
10650         return get_errno(ftruncate(arg1, arg2));
10651 #endif
10652     case TARGET_NR_fchmod:
10653         return get_errno(fchmod(arg1, arg2));
10654 #if defined(TARGET_NR_fchmodat)
10655     case TARGET_NR_fchmodat:
10656         if (!(p = lock_user_string(arg2)))
10657             return -TARGET_EFAULT;
10658         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10659         unlock_user(p, arg2, 0);
10660         return ret;
10661 #endif
10662     case TARGET_NR_getpriority:
10663         /* Note that negative values are valid for getpriority, so we must
10664            differentiate based on errno settings.  */
10665         errno = 0;
10666         ret = getpriority(arg1, arg2);
10667         if (ret == -1 && errno != 0) {
10668             return -host_to_target_errno(errno);
10669         }
10670 #ifdef TARGET_ALPHA
10671         /* Return value is the unbiased priority.  Signal no error.  */
10672         cpu_env->ir[IR_V0] = 0;
10673 #else
10674         /* Return value is a biased priority to avoid negative numbers.  */
10675         ret = 20 - ret;
10676 #endif
10677         return ret;
10678     case TARGET_NR_setpriority:
10679         return get_errno(setpriority(arg1, arg2, arg3));
10680 #ifdef TARGET_NR_statfs
10681     case TARGET_NR_statfs:
10682         if (!(p = lock_user_string(arg1))) {
10683             return -TARGET_EFAULT;
10684         }
10685         ret = get_errno(statfs(path(p), &stfs));
10686         unlock_user(p, arg1, 0);
10687     convert_statfs:
10688         if (!is_error(ret)) {
10689             struct target_statfs *target_stfs;
10690 
10691             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10692                 return -TARGET_EFAULT;
10693             __put_user(stfs.f_type, &target_stfs->f_type);
10694             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10695             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10696             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10697             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10698             __put_user(stfs.f_files, &target_stfs->f_files);
10699             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10700             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10701             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10702             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10703             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10704 #ifdef _STATFS_F_FLAGS
10705             __put_user(stfs.f_flags, &target_stfs->f_flags);
10706 #else
10707             __put_user(0, &target_stfs->f_flags);
10708 #endif
10709             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10710             unlock_user_struct(target_stfs, arg2, 1);
10711         }
10712         return ret;
10713 #endif
10714 #ifdef TARGET_NR_fstatfs
10715     case TARGET_NR_fstatfs:
10716         ret = get_errno(fstatfs(arg1, &stfs));
10717         goto convert_statfs;
10718 #endif
10719 #ifdef TARGET_NR_statfs64
10720     case TARGET_NR_statfs64:
10721         if (!(p = lock_user_string(arg1))) {
10722             return -TARGET_EFAULT;
10723         }
10724         ret = get_errno(statfs(path(p), &stfs));
10725         unlock_user(p, arg1, 0);
10726     convert_statfs64:
10727         if (!is_error(ret)) {
10728             struct target_statfs64 *target_stfs;
10729 
10730             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10731                 return -TARGET_EFAULT;
10732             __put_user(stfs.f_type, &target_stfs->f_type);
10733             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10734             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10735             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10736             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10737             __put_user(stfs.f_files, &target_stfs->f_files);
10738             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10739             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10740             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10741             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10742             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10743 #ifdef _STATFS_F_FLAGS
10744             __put_user(stfs.f_flags, &target_stfs->f_flags);
10745 #else
10746             __put_user(0, &target_stfs->f_flags);
10747 #endif
10748             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10749             unlock_user_struct(target_stfs, arg3, 1);
10750         }
10751         return ret;
10752     case TARGET_NR_fstatfs64:
10753         ret = get_errno(fstatfs(arg1, &stfs));
10754         goto convert_statfs64;
10755 #endif
10756 #ifdef TARGET_NR_socketcall
10757     case TARGET_NR_socketcall:
10758         return do_socketcall(arg1, arg2);
10759 #endif
10760 #ifdef TARGET_NR_accept
10761     case TARGET_NR_accept:
10762         return do_accept4(arg1, arg2, arg3, 0);
10763 #endif
10764 #ifdef TARGET_NR_accept4
10765     case TARGET_NR_accept4:
10766         return do_accept4(arg1, arg2, arg3, arg4);
10767 #endif
10768 #ifdef TARGET_NR_bind
10769     case TARGET_NR_bind:
10770         return do_bind(arg1, arg2, arg3);
10771 #endif
10772 #ifdef TARGET_NR_connect
10773     case TARGET_NR_connect:
10774         return do_connect(arg1, arg2, arg3);
10775 #endif
10776 #ifdef TARGET_NR_getpeername
10777     case TARGET_NR_getpeername:
10778         return do_getpeername(arg1, arg2, arg3);
10779 #endif
10780 #ifdef TARGET_NR_getsockname
10781     case TARGET_NR_getsockname:
10782         return do_getsockname(arg1, arg2, arg3);
10783 #endif
10784 #ifdef TARGET_NR_getsockopt
10785     case TARGET_NR_getsockopt:
10786         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10787 #endif
10788 #ifdef TARGET_NR_listen
10789     case TARGET_NR_listen:
10790         return get_errno(listen(arg1, arg2));
10791 #endif
10792 #ifdef TARGET_NR_recv
10793     case TARGET_NR_recv:
10794         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10795 #endif
10796 #ifdef TARGET_NR_recvfrom
10797     case TARGET_NR_recvfrom:
10798         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10799 #endif
10800 #ifdef TARGET_NR_recvmsg
10801     case TARGET_NR_recvmsg:
10802         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10803 #endif
10804 #ifdef TARGET_NR_send
10805     case TARGET_NR_send:
10806         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10807 #endif
10808 #ifdef TARGET_NR_sendmsg
10809     case TARGET_NR_sendmsg:
10810         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10811 #endif
10812 #ifdef TARGET_NR_sendmmsg
10813     case TARGET_NR_sendmmsg:
10814         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10815 #endif
10816 #ifdef TARGET_NR_recvmmsg
10817     case TARGET_NR_recvmmsg:
10818         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10819 #endif
10820 #ifdef TARGET_NR_sendto
10821     case TARGET_NR_sendto:
10822         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10823 #endif
10824 #ifdef TARGET_NR_shutdown
10825     case TARGET_NR_shutdown:
10826         return get_errno(shutdown(arg1, arg2));
10827 #endif
10828 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10829     case TARGET_NR_getrandom:
10830         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10831         if (!p) {
10832             return -TARGET_EFAULT;
10833         }
10834         ret = get_errno(getrandom(p, arg2, arg3));
10835         unlock_user(p, arg1, ret);
10836         return ret;
10837 #endif
10838 #ifdef TARGET_NR_socket
10839     case TARGET_NR_socket:
10840         return do_socket(arg1, arg2, arg3);
10841 #endif
10842 #ifdef TARGET_NR_socketpair
10843     case TARGET_NR_socketpair:
10844         return do_socketpair(arg1, arg2, arg3, arg4);
10845 #endif
10846 #ifdef TARGET_NR_setsockopt
10847     case TARGET_NR_setsockopt:
10848         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10849 #endif
10850 #if defined(TARGET_NR_syslog)
10851     case TARGET_NR_syslog:
10852         {
10853             int len = arg2;
10854 
10855             switch (arg1) {
10856             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10857             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10858             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10859             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10860             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10861             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10862             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10863             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10864                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10865             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10866             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10867             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10868                 {
10869                     if (len < 0) {
10870                         return -TARGET_EINVAL;
10871                     }
10872                     if (len == 0) {
10873                         return 0;
10874                     }
10875                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10876                     if (!p) {
10877                         return -TARGET_EFAULT;
10878                     }
10879                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10880                     unlock_user(p, arg2, arg3);
10881                 }
10882                 return ret;
10883             default:
10884                 return -TARGET_EINVAL;
10885             }
10886         }
10887         break;
10888 #endif
10889     case TARGET_NR_setitimer:
10890         {
10891             struct itimerval value, ovalue, *pvalue;
10892 
10893             if (arg2) {
10894                 pvalue = &value;
10895                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10896                     || copy_from_user_timeval(&pvalue->it_value,
10897                                               arg2 + sizeof(struct target_timeval)))
10898                     return -TARGET_EFAULT;
10899             } else {
10900                 pvalue = NULL;
10901             }
10902             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10903             if (!is_error(ret) && arg3) {
10904                 if (copy_to_user_timeval(arg3,
10905                                          &ovalue.it_interval)
10906                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10907                                             &ovalue.it_value))
10908                     return -TARGET_EFAULT;
10909             }
10910         }
10911         return ret;
10912     case TARGET_NR_getitimer:
10913         {
10914             struct itimerval value;
10915 
10916             ret = get_errno(getitimer(arg1, &value));
10917             if (!is_error(ret) && arg2) {
10918                 if (copy_to_user_timeval(arg2,
10919                                          &value.it_interval)
10920                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10921                                             &value.it_value))
10922                     return -TARGET_EFAULT;
10923             }
10924         }
10925         return ret;
10926 #ifdef TARGET_NR_stat
10927     case TARGET_NR_stat:
10928         if (!(p = lock_user_string(arg1))) {
10929             return -TARGET_EFAULT;
10930         }
10931         ret = get_errno(stat(path(p), &st));
10932         unlock_user(p, arg1, 0);
10933         goto do_stat;
10934 #endif
10935 #ifdef TARGET_NR_lstat
10936     case TARGET_NR_lstat:
10937         if (!(p = lock_user_string(arg1))) {
10938             return -TARGET_EFAULT;
10939         }
10940         ret = get_errno(lstat(path(p), &st));
10941         unlock_user(p, arg1, 0);
10942         goto do_stat;
10943 #endif
10944 #ifdef TARGET_NR_fstat
10945     case TARGET_NR_fstat:
10946         {
10947             ret = get_errno(fstat(arg1, &st));
10948 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10949         do_stat:
10950 #endif
10951             if (!is_error(ret)) {
10952                 struct target_stat *target_st;
10953 
10954                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10955                     return -TARGET_EFAULT;
10956                 memset(target_st, 0, sizeof(*target_st));
10957                 __put_user(st.st_dev, &target_st->st_dev);
10958                 __put_user(st.st_ino, &target_st->st_ino);
10959                 __put_user(st.st_mode, &target_st->st_mode);
10960                 __put_user(st.st_uid, &target_st->st_uid);
10961                 __put_user(st.st_gid, &target_st->st_gid);
10962                 __put_user(st.st_nlink, &target_st->st_nlink);
10963                 __put_user(st.st_rdev, &target_st->st_rdev);
10964                 __put_user(st.st_size, &target_st->st_size);
10965                 __put_user(st.st_blksize, &target_st->st_blksize);
10966                 __put_user(st.st_blocks, &target_st->st_blocks);
10967                 __put_user(st.st_atime, &target_st->target_st_atime);
10968                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10969                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10970 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10971                 __put_user(st.st_atim.tv_nsec,
10972                            &target_st->target_st_atime_nsec);
10973                 __put_user(st.st_mtim.tv_nsec,
10974                            &target_st->target_st_mtime_nsec);
10975                 __put_user(st.st_ctim.tv_nsec,
10976                            &target_st->target_st_ctime_nsec);
10977 #endif
10978                 unlock_user_struct(target_st, arg2, 1);
10979             }
10980         }
10981         return ret;
10982 #endif
10983     case TARGET_NR_vhangup:
10984         return get_errno(vhangup());
10985 #ifdef TARGET_NR_syscall
10986     case TARGET_NR_syscall:
10987         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10988                           arg6, arg7, arg8, 0);
10989 #endif
10990 #if defined(TARGET_NR_wait4)
10991     case TARGET_NR_wait4:
10992         {
10993             int status;
10994             abi_long status_ptr = arg2;
10995             struct rusage rusage, *rusage_ptr;
10996             abi_ulong target_rusage = arg4;
10997             abi_long rusage_err;
10998             if (target_rusage)
10999                 rusage_ptr = &rusage;
11000             else
11001                 rusage_ptr = NULL;
11002             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11003             if (!is_error(ret)) {
11004                 if (status_ptr && ret) {
11005                     status = host_to_target_waitstatus(status);
11006                     if (put_user_s32(status, status_ptr))
11007                         return -TARGET_EFAULT;
11008                 }
11009                 if (target_rusage) {
11010                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11011                     if (rusage_err) {
11012                         ret = rusage_err;
11013                     }
11014                 }
11015             }
11016         }
11017         return ret;
11018 #endif
11019 #ifdef TARGET_NR_swapoff
11020     case TARGET_NR_swapoff:
11021         if (!(p = lock_user_string(arg1)))
11022             return -TARGET_EFAULT;
11023         ret = get_errno(swapoff(p));
11024         unlock_user(p, arg1, 0);
11025         return ret;
11026 #endif
11027     case TARGET_NR_sysinfo:
11028         {
11029             struct target_sysinfo *target_value;
11030             struct sysinfo value;
11031             ret = get_errno(sysinfo(&value));
11032             if (!is_error(ret) && arg1)
11033             {
11034                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11035                     return -TARGET_EFAULT;
11036                 __put_user(value.uptime, &target_value->uptime);
11037                 __put_user(value.loads[0], &target_value->loads[0]);
11038                 __put_user(value.loads[1], &target_value->loads[1]);
11039                 __put_user(value.loads[2], &target_value->loads[2]);
11040                 __put_user(value.totalram, &target_value->totalram);
11041                 __put_user(value.freeram, &target_value->freeram);
11042                 __put_user(value.sharedram, &target_value->sharedram);
11043                 __put_user(value.bufferram, &target_value->bufferram);
11044                 __put_user(value.totalswap, &target_value->totalswap);
11045                 __put_user(value.freeswap, &target_value->freeswap);
11046                 __put_user(value.procs, &target_value->procs);
11047                 __put_user(value.totalhigh, &target_value->totalhigh);
11048                 __put_user(value.freehigh, &target_value->freehigh);
11049                 __put_user(value.mem_unit, &target_value->mem_unit);
11050                 unlock_user_struct(target_value, arg1, 1);
11051             }
11052         }
11053         return ret;
11054 #ifdef TARGET_NR_ipc
11055     case TARGET_NR_ipc:
11056         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11057 #endif
11058 #ifdef TARGET_NR_semget
11059     case TARGET_NR_semget:
11060         return get_errno(semget(arg1, arg2, arg3));
11061 #endif
11062 #ifdef TARGET_NR_semop
11063     case TARGET_NR_semop:
11064         return do_semtimedop(arg1, arg2, arg3, 0, false);
11065 #endif
11066 #ifdef TARGET_NR_semtimedop
11067     case TARGET_NR_semtimedop:
11068         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11069 #endif
11070 #ifdef TARGET_NR_semtimedop_time64
11071     case TARGET_NR_semtimedop_time64:
11072         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11073 #endif
11074 #ifdef TARGET_NR_semctl
11075     case TARGET_NR_semctl:
11076         return do_semctl(arg1, arg2, arg3, arg4);
11077 #endif
11078 #ifdef TARGET_NR_msgctl
11079     case TARGET_NR_msgctl:
11080         return do_msgctl(arg1, arg2, arg3);
11081 #endif
11082 #ifdef TARGET_NR_msgget
11083     case TARGET_NR_msgget:
11084         return get_errno(msgget(arg1, arg2));
11085 #endif
11086 #ifdef TARGET_NR_msgrcv
11087     case TARGET_NR_msgrcv:
11088         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11089 #endif
11090 #ifdef TARGET_NR_msgsnd
11091     case TARGET_NR_msgsnd:
11092         return do_msgsnd(arg1, arg2, arg3, arg4);
11093 #endif
11094 #ifdef TARGET_NR_shmget
11095     case TARGET_NR_shmget:
11096         return get_errno(shmget(arg1, arg2, arg3));
11097 #endif
11098 #ifdef TARGET_NR_shmctl
11099     case TARGET_NR_shmctl:
11100         return do_shmctl(arg1, arg2, arg3);
11101 #endif
11102 #ifdef TARGET_NR_shmat
11103     case TARGET_NR_shmat:
11104         return do_shmat(cpu_env, arg1, arg2, arg3);
11105 #endif
11106 #ifdef TARGET_NR_shmdt
11107     case TARGET_NR_shmdt:
11108         return do_shmdt(arg1);
11109 #endif
11110     case TARGET_NR_fsync:
11111         return get_errno(fsync(arg1));
11112     case TARGET_NR_clone:
11113         /* Linux manages to have three different orderings for its
11114          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11115          * match the kernel's CONFIG_CLONE_* settings.
11116          * Microblaze is further special in that it uses a sixth
11117          * implicit argument to clone for the TLS pointer.
11118          */
11119 #if defined(TARGET_MICROBLAZE)
11120         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11121 #elif defined(TARGET_CLONE_BACKWARDS)
11122         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11123 #elif defined(TARGET_CLONE_BACKWARDS2)
11124         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11125 #else
11126         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11127 #endif
11128         return ret;
11129 #ifdef __NR_exit_group
11130         /* new thread calls */
11131     case TARGET_NR_exit_group:
11132         preexit_cleanup(cpu_env, arg1);
11133         return get_errno(exit_group(arg1));
11134 #endif
11135     case TARGET_NR_setdomainname:
11136         if (!(p = lock_user_string(arg1)))
11137             return -TARGET_EFAULT;
11138         ret = get_errno(setdomainname(p, arg2));
11139         unlock_user(p, arg1, 0);
11140         return ret;
11141     case TARGET_NR_uname:
11142         /* no need to transcode because we use the linux syscall */
11143         {
11144             struct new_utsname * buf;
11145 
11146             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11147                 return -TARGET_EFAULT;
11148             ret = get_errno(sys_uname(buf));
11149             if (!is_error(ret)) {
11150                 /* Overwrite the native machine name with whatever is being
11151                    emulated. */
11152                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11153                           sizeof(buf->machine));
11154                 /* Allow the user to override the reported release.  */
11155                 if (qemu_uname_release && *qemu_uname_release) {
11156                     g_strlcpy(buf->release, qemu_uname_release,
11157                               sizeof(buf->release));
11158                 }
11159             }
11160             unlock_user_struct(buf, arg1, 1);
11161         }
11162         return ret;
11163 #ifdef TARGET_I386
11164     case TARGET_NR_modify_ldt:
11165         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11166 #if !defined(TARGET_X86_64)
11167     case TARGET_NR_vm86:
11168         return do_vm86(cpu_env, arg1, arg2);
11169 #endif
11170 #endif
11171 #if defined(TARGET_NR_adjtimex)
11172     case TARGET_NR_adjtimex:
11173         {
11174             struct timex host_buf;
11175 
11176             if (target_to_host_timex(&host_buf, arg1) != 0) {
11177                 return -TARGET_EFAULT;
11178             }
11179             ret = get_errno(adjtimex(&host_buf));
11180             if (!is_error(ret)) {
11181                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11182                     return -TARGET_EFAULT;
11183                 }
11184             }
11185         }
11186         return ret;
11187 #endif
11188 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11189     case TARGET_NR_clock_adjtime:
11190         {
11191             struct timex htx;
11192 
11193             if (target_to_host_timex(&htx, arg2) != 0) {
11194                 return -TARGET_EFAULT;
11195             }
11196             ret = get_errno(clock_adjtime(arg1, &htx));
11197             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11198                 return -TARGET_EFAULT;
11199             }
11200         }
11201         return ret;
11202 #endif
11203 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11204     case TARGET_NR_clock_adjtime64:
11205         {
11206             struct timex htx;
11207 
11208             if (target_to_host_timex64(&htx, arg2) != 0) {
11209                 return -TARGET_EFAULT;
11210             }
11211             ret = get_errno(clock_adjtime(arg1, &htx));
11212             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11213                     return -TARGET_EFAULT;
11214             }
11215         }
11216         return ret;
11217 #endif
11218     case TARGET_NR_getpgid:
11219         return get_errno(getpgid(arg1));
11220     case TARGET_NR_fchdir:
11221         return get_errno(fchdir(arg1));
11222     case TARGET_NR_personality:
11223         return get_errno(personality(arg1));
11224 #ifdef TARGET_NR__llseek /* Not on alpha */
11225     case TARGET_NR__llseek:
11226         {
11227             int64_t res;
11228 #if !defined(__NR_llseek)
11229             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11230             if (res == -1) {
11231                 ret = get_errno(res);
11232             } else {
11233                 ret = 0;
11234             }
11235 #else
11236             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11237 #endif
11238             if ((ret == 0) && put_user_s64(res, arg4)) {
11239                 return -TARGET_EFAULT;
11240             }
11241         }
11242         return ret;
11243 #endif
11244 #ifdef TARGET_NR_getdents
11245     case TARGET_NR_getdents:
11246         return do_getdents(arg1, arg2, arg3);
11247 #endif /* TARGET_NR_getdents */
11248 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11249     case TARGET_NR_getdents64:
11250         return do_getdents64(arg1, arg2, arg3);
11251 #endif /* TARGET_NR_getdents64 */
11252 #if defined(TARGET_NR__newselect)
11253     case TARGET_NR__newselect:
11254         return do_select(arg1, arg2, arg3, arg4, arg5);
11255 #endif
11256 #ifdef TARGET_NR_poll
11257     case TARGET_NR_poll:
11258         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11259 #endif
11260 #ifdef TARGET_NR_ppoll
11261     case TARGET_NR_ppoll:
11262         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11263 #endif
11264 #ifdef TARGET_NR_ppoll_time64
11265     case TARGET_NR_ppoll_time64:
11266         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11267 #endif
11268     case TARGET_NR_flock:
11269         /* NOTE: the flock constant seems to be the same for every
11270            Linux platform */
11271         return get_errno(safe_flock(arg1, arg2));
11272     case TARGET_NR_readv:
11273         {
11274             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11275             if (vec != NULL) {
11276                 ret = get_errno(safe_readv(arg1, vec, arg3));
11277                 unlock_iovec(vec, arg2, arg3, 1);
11278             } else {
11279                 ret = -host_to_target_errno(errno);
11280             }
11281         }
11282         return ret;
11283     case TARGET_NR_writev:
11284         {
11285             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11286             if (vec != NULL) {
11287                 ret = get_errno(safe_writev(arg1, vec, arg3));
11288                 unlock_iovec(vec, arg2, arg3, 0);
11289             } else {
11290                 ret = -host_to_target_errno(errno);
11291             }
11292         }
11293         return ret;
11294 #if defined(TARGET_NR_preadv)
11295     case TARGET_NR_preadv:
11296         {
11297             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11298             if (vec != NULL) {
11299                 unsigned long low, high;
11300 
11301                 target_to_host_low_high(arg4, arg5, &low, &high);
11302                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11303                 unlock_iovec(vec, arg2, arg3, 1);
11304             } else {
11305                 ret = -host_to_target_errno(errno);
11306            }
11307         }
11308         return ret;
11309 #endif
11310 #if defined(TARGET_NR_pwritev)
11311     case TARGET_NR_pwritev:
11312         {
11313             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11314             if (vec != NULL) {
11315                 unsigned long low, high;
11316 
11317                 target_to_host_low_high(arg4, arg5, &low, &high);
11318                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11319                 unlock_iovec(vec, arg2, arg3, 0);
11320             } else {
11321                 ret = -host_to_target_errno(errno);
11322            }
11323         }
11324         return ret;
11325 #endif
11326     case TARGET_NR_getsid:
11327         return get_errno(getsid(arg1));
11328 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11329     case TARGET_NR_fdatasync:
11330         return get_errno(fdatasync(arg1));
11331 #endif
11332     case TARGET_NR_sched_getaffinity:
11333         {
11334             unsigned int mask_size;
11335             unsigned long *mask;
11336 
11337             /*
11338              * sched_getaffinity needs multiples of ulong, so need to take
11339              * care of mismatches between target ulong and host ulong sizes.
11340              */
11341             if (arg2 & (sizeof(abi_ulong) - 1)) {
11342                 return -TARGET_EINVAL;
11343             }
11344             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11345 
11346             mask = alloca(mask_size);
11347             memset(mask, 0, mask_size);
11348             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11349 
11350             if (!is_error(ret)) {
11351                 if (ret > arg2) {
11352                     /* More data returned than the caller's buffer will fit.
11353                      * This only happens if sizeof(abi_long) < sizeof(long)
11354                      * and the caller passed us a buffer holding an odd number
11355                      * of abi_longs. If the host kernel is actually using the
11356                      * extra 4 bytes then fail EINVAL; otherwise we can just
11357                      * ignore them and only copy the interesting part.
11358                      */
11359                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11360                     if (numcpus > arg2 * 8) {
11361                         return -TARGET_EINVAL;
11362                     }
11363                     ret = arg2;
11364                 }
11365 
11366                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11367                     return -TARGET_EFAULT;
11368                 }
11369             }
11370         }
11371         return ret;
11372     case TARGET_NR_sched_setaffinity:
11373         {
11374             unsigned int mask_size;
11375             unsigned long *mask;
11376 
11377             /*
11378              * sched_setaffinity needs multiples of ulong, so need to take
11379              * care of mismatches between target ulong and host ulong sizes.
11380              */
11381             if (arg2 & (sizeof(abi_ulong) - 1)) {
11382                 return -TARGET_EINVAL;
11383             }
11384             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11385             mask = alloca(mask_size);
11386 
11387             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11388             if (ret) {
11389                 return ret;
11390             }
11391 
11392             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11393         }
11394     case TARGET_NR_getcpu:
11395         {
11396             unsigned cpu, node;
11397             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11398                                        arg2 ? &node : NULL,
11399                                        NULL));
11400             if (is_error(ret)) {
11401                 return ret;
11402             }
11403             if (arg1 && put_user_u32(cpu, arg1)) {
11404                 return -TARGET_EFAULT;
11405             }
11406             if (arg2 && put_user_u32(node, arg2)) {
11407                 return -TARGET_EFAULT;
11408             }
11409         }
11410         return ret;
11411     case TARGET_NR_sched_setparam:
11412         {
11413             struct target_sched_param *target_schp;
11414             struct sched_param schp;
11415 
11416             if (arg2 == 0) {
11417                 return -TARGET_EINVAL;
11418             }
11419             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11420                 return -TARGET_EFAULT;
11421             }
11422             schp.sched_priority = tswap32(target_schp->sched_priority);
11423             unlock_user_struct(target_schp, arg2, 0);
11424             return get_errno(sys_sched_setparam(arg1, &schp));
11425         }
11426     case TARGET_NR_sched_getparam:
11427         {
11428             struct target_sched_param *target_schp;
11429             struct sched_param schp;
11430 
11431             if (arg2 == 0) {
11432                 return -TARGET_EINVAL;
11433             }
11434             ret = get_errno(sys_sched_getparam(arg1, &schp));
11435             if (!is_error(ret)) {
11436                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11437                     return -TARGET_EFAULT;
11438                 }
11439                 target_schp->sched_priority = tswap32(schp.sched_priority);
11440                 unlock_user_struct(target_schp, arg2, 1);
11441             }
11442         }
11443         return ret;
11444     case TARGET_NR_sched_setscheduler:
11445         {
11446             struct target_sched_param *target_schp;
11447             struct sched_param schp;
11448             if (arg3 == 0) {
11449                 return -TARGET_EINVAL;
11450             }
11451             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11452                 return -TARGET_EFAULT;
11453             }
11454             schp.sched_priority = tswap32(target_schp->sched_priority);
11455             unlock_user_struct(target_schp, arg3, 0);
11456             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11457         }
11458     case TARGET_NR_sched_getscheduler:
11459         return get_errno(sys_sched_getscheduler(arg1));
11460     case TARGET_NR_sched_getattr:
11461         {
11462             struct target_sched_attr *target_scha;
11463             struct sched_attr scha;
11464             if (arg2 == 0) {
11465                 return -TARGET_EINVAL;
11466             }
11467             if (arg3 > sizeof(scha)) {
11468                 arg3 = sizeof(scha);
11469             }
11470             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11471             if (!is_error(ret)) {
11472                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11473                 if (!target_scha) {
11474                     return -TARGET_EFAULT;
11475                 }
11476                 target_scha->size = tswap32(scha.size);
11477                 target_scha->sched_policy = tswap32(scha.sched_policy);
11478                 target_scha->sched_flags = tswap64(scha.sched_flags);
11479                 target_scha->sched_nice = tswap32(scha.sched_nice);
11480                 target_scha->sched_priority = tswap32(scha.sched_priority);
11481                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11482                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11483                 target_scha->sched_period = tswap64(scha.sched_period);
11484                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11485                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11486                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11487                 }
11488                 unlock_user(target_scha, arg2, arg3);
11489             }
11490             return ret;
11491         }
11492     case TARGET_NR_sched_setattr:
11493         {
11494             struct target_sched_attr *target_scha;
11495             struct sched_attr scha;
11496             uint32_t size;
11497             int zeroed;
11498             if (arg2 == 0) {
11499                 return -TARGET_EINVAL;
11500             }
11501             if (get_user_u32(size, arg2)) {
11502                 return -TARGET_EFAULT;
11503             }
11504             if (!size) {
11505                 size = offsetof(struct target_sched_attr, sched_util_min);
11506             }
11507             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11508                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11509                     return -TARGET_EFAULT;
11510                 }
11511                 return -TARGET_E2BIG;
11512             }
11513 
11514             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11515             if (zeroed < 0) {
11516                 return zeroed;
11517             } else if (zeroed == 0) {
11518                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11519                     return -TARGET_EFAULT;
11520                 }
11521                 return -TARGET_E2BIG;
11522             }
11523             if (size > sizeof(struct target_sched_attr)) {
11524                 size = sizeof(struct target_sched_attr);
11525             }
11526 
11527             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11528             if (!target_scha) {
11529                 return -TARGET_EFAULT;
11530             }
11531             scha.size = size;
11532             scha.sched_policy = tswap32(target_scha->sched_policy);
11533             scha.sched_flags = tswap64(target_scha->sched_flags);
11534             scha.sched_nice = tswap32(target_scha->sched_nice);
11535             scha.sched_priority = tswap32(target_scha->sched_priority);
11536             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11537             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11538             scha.sched_period = tswap64(target_scha->sched_period);
11539             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11540                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11541                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11542             }
11543             unlock_user(target_scha, arg2, 0);
11544             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11545         }
11546     case TARGET_NR_sched_yield:
11547         return get_errno(sched_yield());
11548     case TARGET_NR_sched_get_priority_max:
11549         return get_errno(sched_get_priority_max(arg1));
11550     case TARGET_NR_sched_get_priority_min:
11551         return get_errno(sched_get_priority_min(arg1));
11552 #ifdef TARGET_NR_sched_rr_get_interval
11553     case TARGET_NR_sched_rr_get_interval:
11554         {
11555             struct timespec ts;
11556             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11557             if (!is_error(ret)) {
11558                 ret = host_to_target_timespec(arg2, &ts);
11559             }
11560         }
11561         return ret;
11562 #endif
11563 #ifdef TARGET_NR_sched_rr_get_interval_time64
11564     case TARGET_NR_sched_rr_get_interval_time64:
11565         {
11566             struct timespec ts;
11567             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11568             if (!is_error(ret)) {
11569                 ret = host_to_target_timespec64(arg2, &ts);
11570             }
11571         }
11572         return ret;
11573 #endif
11574 #if defined(TARGET_NR_nanosleep)
11575     case TARGET_NR_nanosleep:
11576         {
11577             struct timespec req, rem;
11578             target_to_host_timespec(&req, arg1);
11579             ret = get_errno(safe_nanosleep(&req, &rem));
11580             if (is_error(ret) && arg2) {
11581                 host_to_target_timespec(arg2, &rem);
11582             }
11583         }
11584         return ret;
11585 #endif
11586     case TARGET_NR_prctl:
11587         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11588         break;
11589 #ifdef TARGET_NR_arch_prctl
11590     case TARGET_NR_arch_prctl:
11591         return do_arch_prctl(cpu_env, arg1, arg2);
11592 #endif
11593 #ifdef TARGET_NR_pread64
11594     case TARGET_NR_pread64:
11595         if (regpairs_aligned(cpu_env, num)) {
11596             arg4 = arg5;
11597             arg5 = arg6;
11598         }
11599         if (arg2 == 0 && arg3 == 0) {
11600             /* Special-case NULL buffer and zero length, which should succeed */
11601             p = 0;
11602         } else {
11603             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11604             if (!p) {
11605                 return -TARGET_EFAULT;
11606             }
11607         }
11608         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11609         unlock_user(p, arg2, ret);
11610         return ret;
11611     case TARGET_NR_pwrite64:
11612         if (regpairs_aligned(cpu_env, num)) {
11613             arg4 = arg5;
11614             arg5 = arg6;
11615         }
11616         if (arg2 == 0 && arg3 == 0) {
11617             /* Special-case NULL buffer and zero length, which should succeed */
11618             p = 0;
11619         } else {
11620             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11621             if (!p) {
11622                 return -TARGET_EFAULT;
11623             }
11624         }
11625         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11626         unlock_user(p, arg2, 0);
11627         return ret;
11628 #endif
11629     case TARGET_NR_getcwd:
11630         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11631             return -TARGET_EFAULT;
11632         ret = get_errno(sys_getcwd1(p, arg2));
11633         unlock_user(p, arg1, ret);
11634         return ret;
11635     case TARGET_NR_capget:
11636     case TARGET_NR_capset:
11637     {
11638         struct target_user_cap_header *target_header;
11639         struct target_user_cap_data *target_data = NULL;
11640         struct __user_cap_header_struct header;
11641         struct __user_cap_data_struct data[2];
11642         struct __user_cap_data_struct *dataptr = NULL;
11643         int i, target_datalen;
11644         int data_items = 1;
11645 
11646         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11647             return -TARGET_EFAULT;
11648         }
11649         header.version = tswap32(target_header->version);
11650         header.pid = tswap32(target_header->pid);
11651 
11652         if (header.version != _LINUX_CAPABILITY_VERSION) {
11653             /* Version 2 and up takes pointer to two user_data structs */
11654             data_items = 2;
11655         }
11656 
11657         target_datalen = sizeof(*target_data) * data_items;
11658 
11659         if (arg2) {
11660             if (num == TARGET_NR_capget) {
11661                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11662             } else {
11663                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11664             }
11665             if (!target_data) {
11666                 unlock_user_struct(target_header, arg1, 0);
11667                 return -TARGET_EFAULT;
11668             }
11669 
11670             if (num == TARGET_NR_capset) {
11671                 for (i = 0; i < data_items; i++) {
11672                     data[i].effective = tswap32(target_data[i].effective);
11673                     data[i].permitted = tswap32(target_data[i].permitted);
11674                     data[i].inheritable = tswap32(target_data[i].inheritable);
11675                 }
11676             }
11677 
11678             dataptr = data;
11679         }
11680 
11681         if (num == TARGET_NR_capget) {
11682             ret = get_errno(capget(&header, dataptr));
11683         } else {
11684             ret = get_errno(capset(&header, dataptr));
11685         }
11686 
11687         /* The kernel always updates version for both capget and capset */
11688         target_header->version = tswap32(header.version);
11689         unlock_user_struct(target_header, arg1, 1);
11690 
11691         if (arg2) {
11692             if (num == TARGET_NR_capget) {
11693                 for (i = 0; i < data_items; i++) {
11694                     target_data[i].effective = tswap32(data[i].effective);
11695                     target_data[i].permitted = tswap32(data[i].permitted);
11696                     target_data[i].inheritable = tswap32(data[i].inheritable);
11697                 }
11698                 unlock_user(target_data, arg2, target_datalen);
11699             } else {
11700                 unlock_user(target_data, arg2, 0);
11701             }
11702         }
11703         return ret;
11704     }
11705     case TARGET_NR_sigaltstack:
11706         return do_sigaltstack(arg1, arg2, cpu_env);
11707 
11708 #ifdef CONFIG_SENDFILE
11709 #ifdef TARGET_NR_sendfile
11710     case TARGET_NR_sendfile:
11711     {
11712         off_t *offp = NULL;
11713         off_t off;
11714         if (arg3) {
11715             ret = get_user_sal(off, arg3);
11716             if (is_error(ret)) {
11717                 return ret;
11718             }
11719             offp = &off;
11720         }
11721         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11722         if (!is_error(ret) && arg3) {
11723             abi_long ret2 = put_user_sal(off, arg3);
11724             if (is_error(ret2)) {
11725                 ret = ret2;
11726             }
11727         }
11728         return ret;
11729     }
11730 #endif
11731 #ifdef TARGET_NR_sendfile64
11732     case TARGET_NR_sendfile64:
11733     {
11734         off_t *offp = NULL;
11735         off_t off;
11736         if (arg3) {
11737             ret = get_user_s64(off, arg3);
11738             if (is_error(ret)) {
11739                 return ret;
11740             }
11741             offp = &off;
11742         }
11743         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11744         if (!is_error(ret) && arg3) {
11745             abi_long ret2 = put_user_s64(off, arg3);
11746             if (is_error(ret2)) {
11747                 ret = ret2;
11748             }
11749         }
11750         return ret;
11751     }
11752 #endif
11753 #endif
11754 #ifdef TARGET_NR_vfork
11755     case TARGET_NR_vfork:
11756         return get_errno(do_fork(cpu_env,
11757                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11758                          0, 0, 0, 0));
11759 #endif
11760 #ifdef TARGET_NR_ugetrlimit
11761     case TARGET_NR_ugetrlimit:
11762     {
11763 	struct rlimit rlim;
11764 	int resource = target_to_host_resource(arg1);
11765 	ret = get_errno(getrlimit(resource, &rlim));
11766 	if (!is_error(ret)) {
11767 	    struct target_rlimit *target_rlim;
11768             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11769                 return -TARGET_EFAULT;
11770 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11771 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11772             unlock_user_struct(target_rlim, arg2, 1);
11773 	}
11774         return ret;
11775     }
11776 #endif
11777 #ifdef TARGET_NR_truncate64
11778     case TARGET_NR_truncate64:
11779         if (!(p = lock_user_string(arg1)))
11780             return -TARGET_EFAULT;
11781 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11782         unlock_user(p, arg1, 0);
11783         return ret;
11784 #endif
11785 #ifdef TARGET_NR_ftruncate64
11786     case TARGET_NR_ftruncate64:
11787         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11788 #endif
11789 #ifdef TARGET_NR_stat64
11790     case TARGET_NR_stat64:
11791         if (!(p = lock_user_string(arg1))) {
11792             return -TARGET_EFAULT;
11793         }
11794         ret = get_errno(stat(path(p), &st));
11795         unlock_user(p, arg1, 0);
11796         if (!is_error(ret))
11797             ret = host_to_target_stat64(cpu_env, arg2, &st);
11798         return ret;
11799 #endif
11800 #ifdef TARGET_NR_lstat64
11801     case TARGET_NR_lstat64:
11802         if (!(p = lock_user_string(arg1))) {
11803             return -TARGET_EFAULT;
11804         }
11805         ret = get_errno(lstat(path(p), &st));
11806         unlock_user(p, arg1, 0);
11807         if (!is_error(ret))
11808             ret = host_to_target_stat64(cpu_env, arg2, &st);
11809         return ret;
11810 #endif
11811 #ifdef TARGET_NR_fstat64
11812     case TARGET_NR_fstat64:
11813         ret = get_errno(fstat(arg1, &st));
11814         if (!is_error(ret))
11815             ret = host_to_target_stat64(cpu_env, arg2, &st);
11816         return ret;
11817 #endif
11818 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11819 #ifdef TARGET_NR_fstatat64
11820     case TARGET_NR_fstatat64:
11821 #endif
11822 #ifdef TARGET_NR_newfstatat
11823     case TARGET_NR_newfstatat:
11824 #endif
11825         if (!(p = lock_user_string(arg2))) {
11826             return -TARGET_EFAULT;
11827         }
11828         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11829         unlock_user(p, arg2, 0);
11830         if (!is_error(ret))
11831             ret = host_to_target_stat64(cpu_env, arg3, &st);
11832         return ret;
11833 #endif
11834 #if defined(TARGET_NR_statx)
11835     case TARGET_NR_statx:
11836         {
11837             struct target_statx *target_stx;
11838             int dirfd = arg1;
11839             int flags = arg3;
11840 
11841             p = lock_user_string(arg2);
11842             if (p == NULL) {
11843                 return -TARGET_EFAULT;
11844             }
11845 #if defined(__NR_statx)
11846             {
11847                 /*
11848                  * It is assumed that struct statx is architecture independent.
11849                  */
11850                 struct target_statx host_stx;
11851                 int mask = arg4;
11852 
11853                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11854                 if (!is_error(ret)) {
11855                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11856                         unlock_user(p, arg2, 0);
11857                         return -TARGET_EFAULT;
11858                     }
11859                 }
11860 
11861                 if (ret != -TARGET_ENOSYS) {
11862                     unlock_user(p, arg2, 0);
11863                     return ret;
11864                 }
11865             }
11866 #endif
11867             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11868             unlock_user(p, arg2, 0);
11869 
11870             if (!is_error(ret)) {
11871                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11872                     return -TARGET_EFAULT;
11873                 }
11874                 memset(target_stx, 0, sizeof(*target_stx));
11875                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11876                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11877                 __put_user(st.st_ino, &target_stx->stx_ino);
11878                 __put_user(st.st_mode, &target_stx->stx_mode);
11879                 __put_user(st.st_uid, &target_stx->stx_uid);
11880                 __put_user(st.st_gid, &target_stx->stx_gid);
11881                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11882                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11883                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11884                 __put_user(st.st_size, &target_stx->stx_size);
11885                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11886                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11887                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11888                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11889                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11890                 unlock_user_struct(target_stx, arg5, 1);
11891             }
11892         }
11893         return ret;
11894 #endif
11895 #ifdef TARGET_NR_lchown
11896     case TARGET_NR_lchown:
11897         if (!(p = lock_user_string(arg1)))
11898             return -TARGET_EFAULT;
11899         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11900         unlock_user(p, arg1, 0);
11901         return ret;
11902 #endif
11903 #ifdef TARGET_NR_getuid
11904     case TARGET_NR_getuid:
11905         return get_errno(high2lowuid(getuid()));
11906 #endif
11907 #ifdef TARGET_NR_getgid
11908     case TARGET_NR_getgid:
11909         return get_errno(high2lowgid(getgid()));
11910 #endif
11911 #ifdef TARGET_NR_geteuid
11912     case TARGET_NR_geteuid:
11913         return get_errno(high2lowuid(geteuid()));
11914 #endif
11915 #ifdef TARGET_NR_getegid
11916     case TARGET_NR_getegid:
11917         return get_errno(high2lowgid(getegid()));
11918 #endif
11919     case TARGET_NR_setreuid:
11920         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11921     case TARGET_NR_setregid:
11922         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11923     case TARGET_NR_getgroups:
11924         { /* the same code as for TARGET_NR_getgroups32 */
11925             int gidsetsize = arg1;
11926             target_id *target_grouplist;
11927             g_autofree gid_t *grouplist = NULL;
11928             int i;
11929 
11930             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11931                 return -TARGET_EINVAL;
11932             }
11933             if (gidsetsize > 0) {
11934                 grouplist = g_try_new(gid_t, gidsetsize);
11935                 if (!grouplist) {
11936                     return -TARGET_ENOMEM;
11937                 }
11938             }
11939             ret = get_errno(getgroups(gidsetsize, grouplist));
11940             if (!is_error(ret) && gidsetsize > 0) {
11941                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11942                                              gidsetsize * sizeof(target_id), 0);
11943                 if (!target_grouplist) {
11944                     return -TARGET_EFAULT;
11945                 }
11946                 for (i = 0; i < ret; i++) {
11947                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11948                 }
11949                 unlock_user(target_grouplist, arg2,
11950                             gidsetsize * sizeof(target_id));
11951             }
11952             return ret;
11953         }
11954     case TARGET_NR_setgroups:
11955         { /* the same code as for TARGET_NR_setgroups32 */
11956             int gidsetsize = arg1;
11957             target_id *target_grouplist;
11958             g_autofree gid_t *grouplist = NULL;
11959             int i;
11960 
11961             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11962                 return -TARGET_EINVAL;
11963             }
11964             if (gidsetsize > 0) {
11965                 grouplist = g_try_new(gid_t, gidsetsize);
11966                 if (!grouplist) {
11967                     return -TARGET_ENOMEM;
11968                 }
11969                 target_grouplist = lock_user(VERIFY_READ, arg2,
11970                                              gidsetsize * sizeof(target_id), 1);
11971                 if (!target_grouplist) {
11972                     return -TARGET_EFAULT;
11973                 }
11974                 for (i = 0; i < gidsetsize; i++) {
11975                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11976                 }
11977                 unlock_user(target_grouplist, arg2,
11978                             gidsetsize * sizeof(target_id));
11979             }
11980             return get_errno(setgroups(gidsetsize, grouplist));
11981         }
11982     case TARGET_NR_fchown:
11983         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11984 #if defined(TARGET_NR_fchownat)
11985     case TARGET_NR_fchownat:
11986         if (!(p = lock_user_string(arg2)))
11987             return -TARGET_EFAULT;
11988         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11989                                  low2highgid(arg4), arg5));
11990         unlock_user(p, arg2, 0);
11991         return ret;
11992 #endif
11993 #ifdef TARGET_NR_setresuid
11994     case TARGET_NR_setresuid:
11995         return get_errno(sys_setresuid(low2highuid(arg1),
11996                                        low2highuid(arg2),
11997                                        low2highuid(arg3)));
11998 #endif
11999 #ifdef TARGET_NR_getresuid
12000     case TARGET_NR_getresuid:
12001         {
12002             uid_t ruid, euid, suid;
12003             ret = get_errno(getresuid(&ruid, &euid, &suid));
12004             if (!is_error(ret)) {
12005                 if (put_user_id(high2lowuid(ruid), arg1)
12006                     || put_user_id(high2lowuid(euid), arg2)
12007                     || put_user_id(high2lowuid(suid), arg3))
12008                     return -TARGET_EFAULT;
12009             }
12010         }
12011         return ret;
12012 #endif
12013 #ifdef TARGET_NR_getresgid
12014     case TARGET_NR_setresgid:
12015         return get_errno(sys_setresgid(low2highgid(arg1),
12016                                        low2highgid(arg2),
12017                                        low2highgid(arg3)));
12018 #endif
12019 #ifdef TARGET_NR_getresgid
12020     case TARGET_NR_getresgid:
12021         {
12022             gid_t rgid, egid, sgid;
12023             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12024             if (!is_error(ret)) {
12025                 if (put_user_id(high2lowgid(rgid), arg1)
12026                     || put_user_id(high2lowgid(egid), arg2)
12027                     || put_user_id(high2lowgid(sgid), arg3))
12028                     return -TARGET_EFAULT;
12029             }
12030         }
12031         return ret;
12032 #endif
12033 #ifdef TARGET_NR_chown
12034     case TARGET_NR_chown:
12035         if (!(p = lock_user_string(arg1)))
12036             return -TARGET_EFAULT;
12037         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12038         unlock_user(p, arg1, 0);
12039         return ret;
12040 #endif
12041     case TARGET_NR_setuid:
12042         return get_errno(sys_setuid(low2highuid(arg1)));
12043     case TARGET_NR_setgid:
12044         return get_errno(sys_setgid(low2highgid(arg1)));
12045     case TARGET_NR_setfsuid:
12046         return get_errno(setfsuid(arg1));
12047     case TARGET_NR_setfsgid:
12048         return get_errno(setfsgid(arg1));
12049 
12050 #ifdef TARGET_NR_lchown32
12051     case TARGET_NR_lchown32:
12052         if (!(p = lock_user_string(arg1)))
12053             return -TARGET_EFAULT;
12054         ret = get_errno(lchown(p, arg2, arg3));
12055         unlock_user(p, arg1, 0);
12056         return ret;
12057 #endif
12058 #ifdef TARGET_NR_getuid32
12059     case TARGET_NR_getuid32:
12060         return get_errno(getuid());
12061 #endif
12062 
12063 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12064    /* Alpha specific */
12065     case TARGET_NR_getxuid:
12066          {
12067             uid_t euid;
12068             euid=geteuid();
12069             cpu_env->ir[IR_A4]=euid;
12070          }
12071         return get_errno(getuid());
12072 #endif
12073 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12074    /* Alpha specific */
12075     case TARGET_NR_getxgid:
12076          {
12077             uid_t egid;
12078             egid=getegid();
12079             cpu_env->ir[IR_A4]=egid;
12080          }
12081         return get_errno(getgid());
12082 #endif
12083 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12084     /* Alpha specific */
12085     case TARGET_NR_osf_getsysinfo:
12086         ret = -TARGET_EOPNOTSUPP;
12087         switch (arg1) {
12088           case TARGET_GSI_IEEE_FP_CONTROL:
12089             {
12090                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12091                 uint64_t swcr = cpu_env->swcr;
12092 
12093                 swcr &= ~SWCR_STATUS_MASK;
12094                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12095 
12096                 if (put_user_u64 (swcr, arg2))
12097                         return -TARGET_EFAULT;
12098                 ret = 0;
12099             }
12100             break;
12101 
12102           /* case GSI_IEEE_STATE_AT_SIGNAL:
12103              -- Not implemented in linux kernel.
12104              case GSI_UACPROC:
12105              -- Retrieves current unaligned access state; not much used.
12106              case GSI_PROC_TYPE:
12107              -- Retrieves implver information; surely not used.
12108              case GSI_GET_HWRPB:
12109              -- Grabs a copy of the HWRPB; surely not used.
12110           */
12111         }
12112         return ret;
12113 #endif
12114 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12115     /* Alpha specific */
12116     case TARGET_NR_osf_setsysinfo:
12117         ret = -TARGET_EOPNOTSUPP;
12118         switch (arg1) {
12119           case TARGET_SSI_IEEE_FP_CONTROL:
12120             {
12121                 uint64_t swcr, fpcr;
12122 
12123                 if (get_user_u64 (swcr, arg2)) {
12124                     return -TARGET_EFAULT;
12125                 }
12126 
12127                 /*
12128                  * The kernel calls swcr_update_status to update the
12129                  * status bits from the fpcr at every point that it
12130                  * could be queried.  Therefore, we store the status
12131                  * bits only in FPCR.
12132                  */
12133                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12134 
12135                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12136                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12137                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12138                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12139                 ret = 0;
12140             }
12141             break;
12142 
12143           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12144             {
12145                 uint64_t exc, fpcr, fex;
12146 
12147                 if (get_user_u64(exc, arg2)) {
12148                     return -TARGET_EFAULT;
12149                 }
12150                 exc &= SWCR_STATUS_MASK;
12151                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12152 
12153                 /* Old exceptions are not signaled.  */
12154                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12155                 fex = exc & ~fex;
12156                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12157                 fex &= (cpu_env)->swcr;
12158 
12159                 /* Update the hardware fpcr.  */
12160                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12161                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12162 
12163                 if (fex) {
12164                     int si_code = TARGET_FPE_FLTUNK;
12165                     target_siginfo_t info;
12166 
12167                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12168                         si_code = TARGET_FPE_FLTUND;
12169                     }
12170                     if (fex & SWCR_TRAP_ENABLE_INE) {
12171                         si_code = TARGET_FPE_FLTRES;
12172                     }
12173                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12174                         si_code = TARGET_FPE_FLTUND;
12175                     }
12176                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12177                         si_code = TARGET_FPE_FLTOVF;
12178                     }
12179                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12180                         si_code = TARGET_FPE_FLTDIV;
12181                     }
12182                     if (fex & SWCR_TRAP_ENABLE_INV) {
12183                         si_code = TARGET_FPE_FLTINV;
12184                     }
12185 
12186                     info.si_signo = SIGFPE;
12187                     info.si_errno = 0;
12188                     info.si_code = si_code;
12189                     info._sifields._sigfault._addr = (cpu_env)->pc;
12190                     queue_signal(cpu_env, info.si_signo,
12191                                  QEMU_SI_FAULT, &info);
12192                 }
12193                 ret = 0;
12194             }
12195             break;
12196 
12197           /* case SSI_NVPAIRS:
12198              -- Used with SSIN_UACPROC to enable unaligned accesses.
12199              case SSI_IEEE_STATE_AT_SIGNAL:
12200              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12201              -- Not implemented in linux kernel
12202           */
12203         }
12204         return ret;
12205 #endif
12206 #ifdef TARGET_NR_osf_sigprocmask
12207     /* Alpha specific.  */
12208     case TARGET_NR_osf_sigprocmask:
12209         {
12210             abi_ulong mask;
12211             int how;
12212             sigset_t set, oldset;
12213 
12214             switch(arg1) {
12215             case TARGET_SIG_BLOCK:
12216                 how = SIG_BLOCK;
12217                 break;
12218             case TARGET_SIG_UNBLOCK:
12219                 how = SIG_UNBLOCK;
12220                 break;
12221             case TARGET_SIG_SETMASK:
12222                 how = SIG_SETMASK;
12223                 break;
12224             default:
12225                 return -TARGET_EINVAL;
12226             }
12227             mask = arg2;
12228             target_to_host_old_sigset(&set, &mask);
12229             ret = do_sigprocmask(how, &set, &oldset);
12230             if (!ret) {
12231                 host_to_target_old_sigset(&mask, &oldset);
12232                 ret = mask;
12233             }
12234         }
12235         return ret;
12236 #endif
12237 
12238 #ifdef TARGET_NR_getgid32
12239     case TARGET_NR_getgid32:
12240         return get_errno(getgid());
12241 #endif
12242 #ifdef TARGET_NR_geteuid32
12243     case TARGET_NR_geteuid32:
12244         return get_errno(geteuid());
12245 #endif
12246 #ifdef TARGET_NR_getegid32
12247     case TARGET_NR_getegid32:
12248         return get_errno(getegid());
12249 #endif
12250 #ifdef TARGET_NR_setreuid32
12251     case TARGET_NR_setreuid32:
12252         return get_errno(setreuid(arg1, arg2));
12253 #endif
12254 #ifdef TARGET_NR_setregid32
12255     case TARGET_NR_setregid32:
12256         return get_errno(setregid(arg1, arg2));
12257 #endif
12258 #ifdef TARGET_NR_getgroups32
12259     case TARGET_NR_getgroups32:
12260         { /* the same code as for TARGET_NR_getgroups */
12261             int gidsetsize = arg1;
12262             uint32_t *target_grouplist;
12263             g_autofree gid_t *grouplist = NULL;
12264             int i;
12265 
12266             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12267                 return -TARGET_EINVAL;
12268             }
12269             if (gidsetsize > 0) {
12270                 grouplist = g_try_new(gid_t, gidsetsize);
12271                 if (!grouplist) {
12272                     return -TARGET_ENOMEM;
12273                 }
12274             }
12275             ret = get_errno(getgroups(gidsetsize, grouplist));
12276             if (!is_error(ret) && gidsetsize > 0) {
12277                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12278                                              gidsetsize * 4, 0);
12279                 if (!target_grouplist) {
12280                     return -TARGET_EFAULT;
12281                 }
12282                 for (i = 0; i < ret; i++) {
12283                     target_grouplist[i] = tswap32(grouplist[i]);
12284                 }
12285                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12286             }
12287             return ret;
12288         }
12289 #endif
12290 #ifdef TARGET_NR_setgroups32
12291     case TARGET_NR_setgroups32:
12292         { /* the same code as for TARGET_NR_setgroups */
12293             int gidsetsize = arg1;
12294             uint32_t *target_grouplist;
12295             g_autofree gid_t *grouplist = NULL;
12296             int i;
12297 
12298             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12299                 return -TARGET_EINVAL;
12300             }
12301             if (gidsetsize > 0) {
12302                 grouplist = g_try_new(gid_t, gidsetsize);
12303                 if (!grouplist) {
12304                     return -TARGET_ENOMEM;
12305                 }
12306                 target_grouplist = lock_user(VERIFY_READ, arg2,
12307                                              gidsetsize * 4, 1);
12308                 if (!target_grouplist) {
12309                     return -TARGET_EFAULT;
12310                 }
12311                 for (i = 0; i < gidsetsize; i++) {
12312                     grouplist[i] = tswap32(target_grouplist[i]);
12313                 }
12314                 unlock_user(target_grouplist, arg2, 0);
12315             }
12316             return get_errno(setgroups(gidsetsize, grouplist));
12317         }
12318 #endif
12319 #ifdef TARGET_NR_fchown32
12320     case TARGET_NR_fchown32:
12321         return get_errno(fchown(arg1, arg2, arg3));
12322 #endif
12323 #ifdef TARGET_NR_setresuid32
12324     case TARGET_NR_setresuid32:
12325         return get_errno(sys_setresuid(arg1, arg2, arg3));
12326 #endif
12327 #ifdef TARGET_NR_getresuid32
12328     case TARGET_NR_getresuid32:
12329         {
12330             uid_t ruid, euid, suid;
12331             ret = get_errno(getresuid(&ruid, &euid, &suid));
12332             if (!is_error(ret)) {
12333                 if (put_user_u32(ruid, arg1)
12334                     || put_user_u32(euid, arg2)
12335                     || put_user_u32(suid, arg3))
12336                     return -TARGET_EFAULT;
12337             }
12338         }
12339         return ret;
12340 #endif
12341 #ifdef TARGET_NR_setresgid32
12342     case TARGET_NR_setresgid32:
12343         return get_errno(sys_setresgid(arg1, arg2, arg3));
12344 #endif
12345 #ifdef TARGET_NR_getresgid32
12346     case TARGET_NR_getresgid32:
12347         {
12348             gid_t rgid, egid, sgid;
12349             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12350             if (!is_error(ret)) {
12351                 if (put_user_u32(rgid, arg1)
12352                     || put_user_u32(egid, arg2)
12353                     || put_user_u32(sgid, arg3))
12354                     return -TARGET_EFAULT;
12355             }
12356         }
12357         return ret;
12358 #endif
12359 #ifdef TARGET_NR_chown32
12360     case TARGET_NR_chown32:
12361         if (!(p = lock_user_string(arg1)))
12362             return -TARGET_EFAULT;
12363         ret = get_errno(chown(p, arg2, arg3));
12364         unlock_user(p, arg1, 0);
12365         return ret;
12366 #endif
12367 #ifdef TARGET_NR_setuid32
12368     case TARGET_NR_setuid32:
12369         return get_errno(sys_setuid(arg1));
12370 #endif
12371 #ifdef TARGET_NR_setgid32
12372     case TARGET_NR_setgid32:
12373         return get_errno(sys_setgid(arg1));
12374 #endif
12375 #ifdef TARGET_NR_setfsuid32
12376     case TARGET_NR_setfsuid32:
12377         return get_errno(setfsuid(arg1));
12378 #endif
12379 #ifdef TARGET_NR_setfsgid32
12380     case TARGET_NR_setfsgid32:
12381         return get_errno(setfsgid(arg1));
12382 #endif
12383 #ifdef TARGET_NR_mincore
12384     case TARGET_NR_mincore:
12385         {
12386             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12387             if (!a) {
12388                 return -TARGET_ENOMEM;
12389             }
12390             p = lock_user_string(arg3);
12391             if (!p) {
12392                 ret = -TARGET_EFAULT;
12393             } else {
12394                 ret = get_errno(mincore(a, arg2, p));
12395                 unlock_user(p, arg3, ret);
12396             }
12397             unlock_user(a, arg1, 0);
12398         }
12399         return ret;
12400 #endif
12401 #ifdef TARGET_NR_arm_fadvise64_64
12402     case TARGET_NR_arm_fadvise64_64:
12403         /* arm_fadvise64_64 looks like fadvise64_64 but
12404          * with different argument order: fd, advice, offset, len
12405          * rather than the usual fd, offset, len, advice.
12406          * Note that offset and len are both 64-bit so appear as
12407          * pairs of 32-bit registers.
12408          */
12409         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12410                             target_offset64(arg5, arg6), arg2);
12411         return -host_to_target_errno(ret);
12412 #endif
12413 
12414 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12415 
12416 #ifdef TARGET_NR_fadvise64_64
12417     case TARGET_NR_fadvise64_64:
12418 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12419         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12420         ret = arg2;
12421         arg2 = arg3;
12422         arg3 = arg4;
12423         arg4 = arg5;
12424         arg5 = arg6;
12425         arg6 = ret;
12426 #else
12427         /* 6 args: fd, offset (high, low), len (high, low), advice */
12428         if (regpairs_aligned(cpu_env, num)) {
12429             /* offset is in (3,4), len in (5,6) and advice in 7 */
12430             arg2 = arg3;
12431             arg3 = arg4;
12432             arg4 = arg5;
12433             arg5 = arg6;
12434             arg6 = arg7;
12435         }
12436 #endif
12437         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12438                             target_offset64(arg4, arg5), arg6);
12439         return -host_to_target_errno(ret);
12440 #endif
12441 
12442 #ifdef TARGET_NR_fadvise64
12443     case TARGET_NR_fadvise64:
12444         /* 5 args: fd, offset (high, low), len, advice */
12445         if (regpairs_aligned(cpu_env, num)) {
12446             /* offset is in (3,4), len in 5 and advice in 6 */
12447             arg2 = arg3;
12448             arg3 = arg4;
12449             arg4 = arg5;
12450             arg5 = arg6;
12451         }
12452         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12453         return -host_to_target_errno(ret);
12454 #endif
12455 
12456 #else /* not a 32-bit ABI */
12457 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12458 #ifdef TARGET_NR_fadvise64_64
12459     case TARGET_NR_fadvise64_64:
12460 #endif
12461 #ifdef TARGET_NR_fadvise64
12462     case TARGET_NR_fadvise64:
12463 #endif
12464 #ifdef TARGET_S390X
12465         switch (arg4) {
12466         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12467         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12468         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12469         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12470         default: break;
12471         }
12472 #endif
12473         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12474 #endif
12475 #endif /* end of 64-bit ABI fadvise handling */
12476 
12477 #ifdef TARGET_NR_madvise
12478     case TARGET_NR_madvise:
12479         return target_madvise(arg1, arg2, arg3);
12480 #endif
12481 #ifdef TARGET_NR_fcntl64
12482     case TARGET_NR_fcntl64:
12483     {
12484         int cmd;
12485         struct flock64 fl;
12486         from_flock64_fn *copyfrom = copy_from_user_flock64;
12487         to_flock64_fn *copyto = copy_to_user_flock64;
12488 
12489 #ifdef TARGET_ARM
12490         if (!cpu_env->eabi) {
12491             copyfrom = copy_from_user_oabi_flock64;
12492             copyto = copy_to_user_oabi_flock64;
12493         }
12494 #endif
12495 
12496         cmd = target_to_host_fcntl_cmd(arg2);
12497         if (cmd == -TARGET_EINVAL) {
12498             return cmd;
12499         }
12500 
12501         switch(arg2) {
12502         case TARGET_F_GETLK64:
12503             ret = copyfrom(&fl, arg3);
12504             if (ret) {
12505                 break;
12506             }
12507             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12508             if (ret == 0) {
12509                 ret = copyto(arg3, &fl);
12510             }
12511 	    break;
12512 
12513         case TARGET_F_SETLK64:
12514         case TARGET_F_SETLKW64:
12515             ret = copyfrom(&fl, arg3);
12516             if (ret) {
12517                 break;
12518             }
12519             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12520 	    break;
12521         default:
12522             ret = do_fcntl(arg1, arg2, arg3);
12523             break;
12524         }
12525         return ret;
12526     }
12527 #endif
12528 #ifdef TARGET_NR_cacheflush
12529     case TARGET_NR_cacheflush:
12530         /* self-modifying code is handled automatically, so nothing needed */
12531         return 0;
12532 #endif
12533 #ifdef TARGET_NR_getpagesize
12534     case TARGET_NR_getpagesize:
12535         return TARGET_PAGE_SIZE;
12536 #endif
12537     case TARGET_NR_gettid:
12538         return get_errno(sys_gettid());
12539 #ifdef TARGET_NR_readahead
12540     case TARGET_NR_readahead:
12541 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12542         if (regpairs_aligned(cpu_env, num)) {
12543             arg2 = arg3;
12544             arg3 = arg4;
12545             arg4 = arg5;
12546         }
12547         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12548 #else
12549         ret = get_errno(readahead(arg1, arg2, arg3));
12550 #endif
12551         return ret;
12552 #endif
12553 #ifdef CONFIG_ATTR
12554 #ifdef TARGET_NR_setxattr
12555     case TARGET_NR_listxattr:
12556     case TARGET_NR_llistxattr:
12557     {
12558         void *p, *b = 0;
12559         if (arg2) {
12560             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12561             if (!b) {
12562                 return -TARGET_EFAULT;
12563             }
12564         }
12565         p = lock_user_string(arg1);
12566         if (p) {
12567             if (num == TARGET_NR_listxattr) {
12568                 ret = get_errno(listxattr(p, b, arg3));
12569             } else {
12570                 ret = get_errno(llistxattr(p, b, arg3));
12571             }
12572         } else {
12573             ret = -TARGET_EFAULT;
12574         }
12575         unlock_user(p, arg1, 0);
12576         unlock_user(b, arg2, arg3);
12577         return ret;
12578     }
12579     case TARGET_NR_flistxattr:
12580     {
12581         void *b = 0;
12582         if (arg2) {
12583             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12584             if (!b) {
12585                 return -TARGET_EFAULT;
12586             }
12587         }
12588         ret = get_errno(flistxattr(arg1, b, arg3));
12589         unlock_user(b, arg2, arg3);
12590         return ret;
12591     }
12592     case TARGET_NR_setxattr:
12593     case TARGET_NR_lsetxattr:
12594         {
12595             void *p, *n, *v = 0;
12596             if (arg3) {
12597                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12598                 if (!v) {
12599                     return -TARGET_EFAULT;
12600                 }
12601             }
12602             p = lock_user_string(arg1);
12603             n = lock_user_string(arg2);
12604             if (p && n) {
12605                 if (num == TARGET_NR_setxattr) {
12606                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12607                 } else {
12608                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12609                 }
12610             } else {
12611                 ret = -TARGET_EFAULT;
12612             }
12613             unlock_user(p, arg1, 0);
12614             unlock_user(n, arg2, 0);
12615             unlock_user(v, arg3, 0);
12616         }
12617         return ret;
12618     case TARGET_NR_fsetxattr:
12619         {
12620             void *n, *v = 0;
12621             if (arg3) {
12622                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12623                 if (!v) {
12624                     return -TARGET_EFAULT;
12625                 }
12626             }
12627             n = lock_user_string(arg2);
12628             if (n) {
12629                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12630             } else {
12631                 ret = -TARGET_EFAULT;
12632             }
12633             unlock_user(n, arg2, 0);
12634             unlock_user(v, arg3, 0);
12635         }
12636         return ret;
12637     case TARGET_NR_getxattr:
12638     case TARGET_NR_lgetxattr:
12639         {
12640             void *p, *n, *v = 0;
12641             if (arg3) {
12642                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12643                 if (!v) {
12644                     return -TARGET_EFAULT;
12645                 }
12646             }
12647             p = lock_user_string(arg1);
12648             n = lock_user_string(arg2);
12649             if (p && n) {
12650                 if (num == TARGET_NR_getxattr) {
12651                     ret = get_errno(getxattr(p, n, v, arg4));
12652                 } else {
12653                     ret = get_errno(lgetxattr(p, n, v, arg4));
12654                 }
12655             } else {
12656                 ret = -TARGET_EFAULT;
12657             }
12658             unlock_user(p, arg1, 0);
12659             unlock_user(n, arg2, 0);
12660             unlock_user(v, arg3, arg4);
12661         }
12662         return ret;
12663     case TARGET_NR_fgetxattr:
12664         {
12665             void *n, *v = 0;
12666             if (arg3) {
12667                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12668                 if (!v) {
12669                     return -TARGET_EFAULT;
12670                 }
12671             }
12672             n = lock_user_string(arg2);
12673             if (n) {
12674                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12675             } else {
12676                 ret = -TARGET_EFAULT;
12677             }
12678             unlock_user(n, arg2, 0);
12679             unlock_user(v, arg3, arg4);
12680         }
12681         return ret;
12682     case TARGET_NR_removexattr:
12683     case TARGET_NR_lremovexattr:
12684         {
12685             void *p, *n;
12686             p = lock_user_string(arg1);
12687             n = lock_user_string(arg2);
12688             if (p && n) {
12689                 if (num == TARGET_NR_removexattr) {
12690                     ret = get_errno(removexattr(p, n));
12691                 } else {
12692                     ret = get_errno(lremovexattr(p, n));
12693                 }
12694             } else {
12695                 ret = -TARGET_EFAULT;
12696             }
12697             unlock_user(p, arg1, 0);
12698             unlock_user(n, arg2, 0);
12699         }
12700         return ret;
12701     case TARGET_NR_fremovexattr:
12702         {
12703             void *n;
12704             n = lock_user_string(arg2);
12705             if (n) {
12706                 ret = get_errno(fremovexattr(arg1, n));
12707             } else {
12708                 ret = -TARGET_EFAULT;
12709             }
12710             unlock_user(n, arg2, 0);
12711         }
12712         return ret;
12713 #endif
12714 #endif /* CONFIG_ATTR */
12715 #ifdef TARGET_NR_set_thread_area
12716     case TARGET_NR_set_thread_area:
12717 #if defined(TARGET_MIPS)
12718       cpu_env->active_tc.CP0_UserLocal = arg1;
12719       return 0;
12720 #elif defined(TARGET_CRIS)
12721       if (arg1 & 0xff)
12722           ret = -TARGET_EINVAL;
12723       else {
12724           cpu_env->pregs[PR_PID] = arg1;
12725           ret = 0;
12726       }
12727       return ret;
12728 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12729       return do_set_thread_area(cpu_env, arg1);
12730 #elif defined(TARGET_M68K)
12731       {
12732           TaskState *ts = cpu->opaque;
12733           ts->tp_value = arg1;
12734           return 0;
12735       }
12736 #else
12737       return -TARGET_ENOSYS;
12738 #endif
12739 #endif
12740 #ifdef TARGET_NR_get_thread_area
12741     case TARGET_NR_get_thread_area:
12742 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12743         return do_get_thread_area(cpu_env, arg1);
12744 #elif defined(TARGET_M68K)
12745         {
12746             TaskState *ts = cpu->opaque;
12747             return ts->tp_value;
12748         }
12749 #else
12750         return -TARGET_ENOSYS;
12751 #endif
12752 #endif
12753 #ifdef TARGET_NR_getdomainname
12754     case TARGET_NR_getdomainname:
12755         return -TARGET_ENOSYS;
12756 #endif
12757 
12758 #ifdef TARGET_NR_clock_settime
12759     case TARGET_NR_clock_settime:
12760     {
12761         struct timespec ts;
12762 
12763         ret = target_to_host_timespec(&ts, arg2);
12764         if (!is_error(ret)) {
12765             ret = get_errno(clock_settime(arg1, &ts));
12766         }
12767         return ret;
12768     }
12769 #endif
12770 #ifdef TARGET_NR_clock_settime64
12771     case TARGET_NR_clock_settime64:
12772     {
12773         struct timespec ts;
12774 
12775         ret = target_to_host_timespec64(&ts, arg2);
12776         if (!is_error(ret)) {
12777             ret = get_errno(clock_settime(arg1, &ts));
12778         }
12779         return ret;
12780     }
12781 #endif
12782 #ifdef TARGET_NR_clock_gettime
12783     case TARGET_NR_clock_gettime:
12784     {
12785         struct timespec ts;
12786         ret = get_errno(clock_gettime(arg1, &ts));
12787         if (!is_error(ret)) {
12788             ret = host_to_target_timespec(arg2, &ts);
12789         }
12790         return ret;
12791     }
12792 #endif
12793 #ifdef TARGET_NR_clock_gettime64
12794     case TARGET_NR_clock_gettime64:
12795     {
12796         struct timespec ts;
12797         ret = get_errno(clock_gettime(arg1, &ts));
12798         if (!is_error(ret)) {
12799             ret = host_to_target_timespec64(arg2, &ts);
12800         }
12801         return ret;
12802     }
12803 #endif
12804 #ifdef TARGET_NR_clock_getres
12805     case TARGET_NR_clock_getres:
12806     {
12807         struct timespec ts;
12808         ret = get_errno(clock_getres(arg1, &ts));
12809         if (!is_error(ret)) {
12810             host_to_target_timespec(arg2, &ts);
12811         }
12812         return ret;
12813     }
12814 #endif
12815 #ifdef TARGET_NR_clock_getres_time64
12816     case TARGET_NR_clock_getres_time64:
12817     {
12818         struct timespec ts;
12819         ret = get_errno(clock_getres(arg1, &ts));
12820         if (!is_error(ret)) {
12821             host_to_target_timespec64(arg2, &ts);
12822         }
12823         return ret;
12824     }
12825 #endif
12826 #ifdef TARGET_NR_clock_nanosleep
12827     case TARGET_NR_clock_nanosleep:
12828     {
12829         struct timespec ts;
12830         if (target_to_host_timespec(&ts, arg3)) {
12831             return -TARGET_EFAULT;
12832         }
12833         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12834                                              &ts, arg4 ? &ts : NULL));
12835         /*
12836          * if the call is interrupted by a signal handler, it fails
12837          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12838          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12839          */
12840         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12841             host_to_target_timespec(arg4, &ts)) {
12842               return -TARGET_EFAULT;
12843         }
12844 
12845         return ret;
12846     }
12847 #endif
12848 #ifdef TARGET_NR_clock_nanosleep_time64
12849     case TARGET_NR_clock_nanosleep_time64:
12850     {
12851         struct timespec ts;
12852 
12853         if (target_to_host_timespec64(&ts, arg3)) {
12854             return -TARGET_EFAULT;
12855         }
12856 
12857         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12858                                              &ts, arg4 ? &ts : NULL));
12859 
12860         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12861             host_to_target_timespec64(arg4, &ts)) {
12862             return -TARGET_EFAULT;
12863         }
12864         return ret;
12865     }
12866 #endif
12867 
12868 #if defined(TARGET_NR_set_tid_address)
12869     case TARGET_NR_set_tid_address:
12870     {
12871         TaskState *ts = cpu->opaque;
12872         ts->child_tidptr = arg1;
12873         /* do not call host set_tid_address() syscall, instead return tid() */
12874         return get_errno(sys_gettid());
12875     }
12876 #endif
12877 
12878     case TARGET_NR_tkill:
12879         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12880 
12881     case TARGET_NR_tgkill:
12882         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12883                          target_to_host_signal(arg3)));
12884 
12885 #ifdef TARGET_NR_set_robust_list
12886     case TARGET_NR_set_robust_list:
12887     case TARGET_NR_get_robust_list:
12888         /* The ABI for supporting robust futexes has userspace pass
12889          * the kernel a pointer to a linked list which is updated by
12890          * userspace after the syscall; the list is walked by the kernel
12891          * when the thread exits. Since the linked list in QEMU guest
12892          * memory isn't a valid linked list for the host and we have
12893          * no way to reliably intercept the thread-death event, we can't
12894          * support these. Silently return ENOSYS so that guest userspace
12895          * falls back to a non-robust futex implementation (which should
12896          * be OK except in the corner case of the guest crashing while
12897          * holding a mutex that is shared with another process via
12898          * shared memory).
12899          */
12900         return -TARGET_ENOSYS;
12901 #endif
12902 
12903 #if defined(TARGET_NR_utimensat)
12904     case TARGET_NR_utimensat:
12905         {
12906             struct timespec *tsp, ts[2];
12907             if (!arg3) {
12908                 tsp = NULL;
12909             } else {
12910                 if (target_to_host_timespec(ts, arg3)) {
12911                     return -TARGET_EFAULT;
12912                 }
12913                 if (target_to_host_timespec(ts + 1, arg3 +
12914                                             sizeof(struct target_timespec))) {
12915                     return -TARGET_EFAULT;
12916                 }
12917                 tsp = ts;
12918             }
12919             if (!arg2)
12920                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12921             else {
12922                 if (!(p = lock_user_string(arg2))) {
12923                     return -TARGET_EFAULT;
12924                 }
12925                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12926                 unlock_user(p, arg2, 0);
12927             }
12928         }
12929         return ret;
12930 #endif
12931 #ifdef TARGET_NR_utimensat_time64
12932     case TARGET_NR_utimensat_time64:
12933         {
12934             struct timespec *tsp, ts[2];
12935             if (!arg3) {
12936                 tsp = NULL;
12937             } else {
12938                 if (target_to_host_timespec64(ts, arg3)) {
12939                     return -TARGET_EFAULT;
12940                 }
12941                 if (target_to_host_timespec64(ts + 1, arg3 +
12942                                      sizeof(struct target__kernel_timespec))) {
12943                     return -TARGET_EFAULT;
12944                 }
12945                 tsp = ts;
12946             }
12947             if (!arg2)
12948                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12949             else {
12950                 p = lock_user_string(arg2);
12951                 if (!p) {
12952                     return -TARGET_EFAULT;
12953                 }
12954                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12955                 unlock_user(p, arg2, 0);
12956             }
12957         }
12958         return ret;
12959 #endif
12960 #ifdef TARGET_NR_futex
12961     case TARGET_NR_futex:
12962         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12963 #endif
12964 #ifdef TARGET_NR_futex_time64
12965     case TARGET_NR_futex_time64:
12966         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12967 #endif
12968 #ifdef CONFIG_INOTIFY
12969 #if defined(TARGET_NR_inotify_init)
12970     case TARGET_NR_inotify_init:
12971         ret = get_errno(inotify_init());
12972         if (ret >= 0) {
12973             fd_trans_register(ret, &target_inotify_trans);
12974         }
12975         return ret;
12976 #endif
12977 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12978     case TARGET_NR_inotify_init1:
12979         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12980                                           fcntl_flags_tbl)));
12981         if (ret >= 0) {
12982             fd_trans_register(ret, &target_inotify_trans);
12983         }
12984         return ret;
12985 #endif
12986 #if defined(TARGET_NR_inotify_add_watch)
12987     case TARGET_NR_inotify_add_watch:
12988         p = lock_user_string(arg2);
12989         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12990         unlock_user(p, arg2, 0);
12991         return ret;
12992 #endif
12993 #if defined(TARGET_NR_inotify_rm_watch)
12994     case TARGET_NR_inotify_rm_watch:
12995         return get_errno(inotify_rm_watch(arg1, arg2));
12996 #endif
12997 #endif
12998 
12999 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13000     case TARGET_NR_mq_open:
13001         {
13002             struct mq_attr posix_mq_attr;
13003             struct mq_attr *pposix_mq_attr;
13004             int host_flags;
13005 
13006             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13007             pposix_mq_attr = NULL;
13008             if (arg4) {
13009                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13010                     return -TARGET_EFAULT;
13011                 }
13012                 pposix_mq_attr = &posix_mq_attr;
13013             }
13014             p = lock_user_string(arg1 - 1);
13015             if (!p) {
13016                 return -TARGET_EFAULT;
13017             }
13018             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13019             unlock_user (p, arg1, 0);
13020         }
13021         return ret;
13022 
13023     case TARGET_NR_mq_unlink:
13024         p = lock_user_string(arg1 - 1);
13025         if (!p) {
13026             return -TARGET_EFAULT;
13027         }
13028         ret = get_errno(mq_unlink(p));
13029         unlock_user (p, arg1, 0);
13030         return ret;
13031 
13032 #ifdef TARGET_NR_mq_timedsend
13033     case TARGET_NR_mq_timedsend:
13034         {
13035             struct timespec ts;
13036 
13037             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13038             if (arg5 != 0) {
13039                 if (target_to_host_timespec(&ts, arg5)) {
13040                     return -TARGET_EFAULT;
13041                 }
13042                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13043                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13044                     return -TARGET_EFAULT;
13045                 }
13046             } else {
13047                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13048             }
13049             unlock_user (p, arg2, arg3);
13050         }
13051         return ret;
13052 #endif
13053 #ifdef TARGET_NR_mq_timedsend_time64
13054     case TARGET_NR_mq_timedsend_time64:
13055         {
13056             struct timespec ts;
13057 
13058             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13059             if (arg5 != 0) {
13060                 if (target_to_host_timespec64(&ts, arg5)) {
13061                     return -TARGET_EFAULT;
13062                 }
13063                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13064                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13065                     return -TARGET_EFAULT;
13066                 }
13067             } else {
13068                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13069             }
13070             unlock_user(p, arg2, arg3);
13071         }
13072         return ret;
13073 #endif
13074 
13075 #ifdef TARGET_NR_mq_timedreceive
13076     case TARGET_NR_mq_timedreceive:
13077         {
13078             struct timespec ts;
13079             unsigned int prio;
13080 
13081             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13082             if (arg5 != 0) {
13083                 if (target_to_host_timespec(&ts, arg5)) {
13084                     return -TARGET_EFAULT;
13085                 }
13086                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13087                                                      &prio, &ts));
13088                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13089                     return -TARGET_EFAULT;
13090                 }
13091             } else {
13092                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13093                                                      &prio, NULL));
13094             }
13095             unlock_user (p, arg2, arg3);
13096             if (arg4 != 0)
13097                 put_user_u32(prio, arg4);
13098         }
13099         return ret;
13100 #endif
13101 #ifdef TARGET_NR_mq_timedreceive_time64
13102     case TARGET_NR_mq_timedreceive_time64:
13103         {
13104             struct timespec ts;
13105             unsigned int prio;
13106 
13107             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13108             if (arg5 != 0) {
13109                 if (target_to_host_timespec64(&ts, arg5)) {
13110                     return -TARGET_EFAULT;
13111                 }
13112                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13113                                                      &prio, &ts));
13114                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13115                     return -TARGET_EFAULT;
13116                 }
13117             } else {
13118                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13119                                                      &prio, NULL));
13120             }
13121             unlock_user(p, arg2, arg3);
13122             if (arg4 != 0) {
13123                 put_user_u32(prio, arg4);
13124             }
13125         }
13126         return ret;
13127 #endif
13128 
13129     /* Not implemented for now... */
13130 /*     case TARGET_NR_mq_notify: */
13131 /*         break; */
13132 
13133     case TARGET_NR_mq_getsetattr:
13134         {
13135             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13136             ret = 0;
13137             if (arg2 != 0) {
13138                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13139                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13140                                            &posix_mq_attr_out));
13141             } else if (arg3 != 0) {
13142                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13143             }
13144             if (ret == 0 && arg3 != 0) {
13145                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13146             }
13147         }
13148         return ret;
13149 #endif
13150 
13151 #ifdef CONFIG_SPLICE
13152 #ifdef TARGET_NR_tee
13153     case TARGET_NR_tee:
13154         {
13155             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13156         }
13157         return ret;
13158 #endif
13159 #ifdef TARGET_NR_splice
13160     case TARGET_NR_splice:
13161         {
13162             loff_t loff_in, loff_out;
13163             loff_t *ploff_in = NULL, *ploff_out = NULL;
13164             if (arg2) {
13165                 if (get_user_u64(loff_in, arg2)) {
13166                     return -TARGET_EFAULT;
13167                 }
13168                 ploff_in = &loff_in;
13169             }
13170             if (arg4) {
13171                 if (get_user_u64(loff_out, arg4)) {
13172                     return -TARGET_EFAULT;
13173                 }
13174                 ploff_out = &loff_out;
13175             }
13176             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13177             if (arg2) {
13178                 if (put_user_u64(loff_in, arg2)) {
13179                     return -TARGET_EFAULT;
13180                 }
13181             }
13182             if (arg4) {
13183                 if (put_user_u64(loff_out, arg4)) {
13184                     return -TARGET_EFAULT;
13185                 }
13186             }
13187         }
13188         return ret;
13189 #endif
13190 #ifdef TARGET_NR_vmsplice
13191 	case TARGET_NR_vmsplice:
13192         {
13193             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13194             if (vec != NULL) {
13195                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13196                 unlock_iovec(vec, arg2, arg3, 0);
13197             } else {
13198                 ret = -host_to_target_errno(errno);
13199             }
13200         }
13201         return ret;
13202 #endif
13203 #endif /* CONFIG_SPLICE */
13204 #ifdef CONFIG_EVENTFD
13205 #if defined(TARGET_NR_eventfd)
13206     case TARGET_NR_eventfd:
13207         ret = get_errno(eventfd(arg1, 0));
13208         if (ret >= 0) {
13209             fd_trans_register(ret, &target_eventfd_trans);
13210         }
13211         return ret;
13212 #endif
13213 #if defined(TARGET_NR_eventfd2)
13214     case TARGET_NR_eventfd2:
13215     {
13216         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13217         if (arg2 & TARGET_O_NONBLOCK) {
13218             host_flags |= O_NONBLOCK;
13219         }
13220         if (arg2 & TARGET_O_CLOEXEC) {
13221             host_flags |= O_CLOEXEC;
13222         }
13223         ret = get_errno(eventfd(arg1, host_flags));
13224         if (ret >= 0) {
13225             fd_trans_register(ret, &target_eventfd_trans);
13226         }
13227         return ret;
13228     }
13229 #endif
13230 #endif /* CONFIG_EVENTFD  */
13231 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13232     case TARGET_NR_fallocate:
13233 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13234         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13235                                   target_offset64(arg5, arg6)));
13236 #else
13237         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13238 #endif
13239         return ret;
13240 #endif
13241 #if defined(CONFIG_SYNC_FILE_RANGE)
13242 #if defined(TARGET_NR_sync_file_range)
13243     case TARGET_NR_sync_file_range:
13244 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13245 #if defined(TARGET_MIPS)
13246         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13247                                         target_offset64(arg5, arg6), arg7));
13248 #else
13249         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13250                                         target_offset64(arg4, arg5), arg6));
13251 #endif /* !TARGET_MIPS */
13252 #else
13253         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13254 #endif
13255         return ret;
13256 #endif
13257 #if defined(TARGET_NR_sync_file_range2) || \
13258     defined(TARGET_NR_arm_sync_file_range)
13259 #if defined(TARGET_NR_sync_file_range2)
13260     case TARGET_NR_sync_file_range2:
13261 #endif
13262 #if defined(TARGET_NR_arm_sync_file_range)
13263     case TARGET_NR_arm_sync_file_range:
13264 #endif
13265         /* This is like sync_file_range but the arguments are reordered */
13266 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13267         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13268                                         target_offset64(arg5, arg6), arg2));
13269 #else
13270         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13271 #endif
13272         return ret;
13273 #endif
13274 #endif
13275 #if defined(TARGET_NR_signalfd4)
13276     case TARGET_NR_signalfd4:
13277         return do_signalfd4(arg1, arg2, arg4);
13278 #endif
13279 #if defined(TARGET_NR_signalfd)
13280     case TARGET_NR_signalfd:
13281         return do_signalfd4(arg1, arg2, 0);
13282 #endif
13283 #if defined(CONFIG_EPOLL)
13284 #if defined(TARGET_NR_epoll_create)
13285     case TARGET_NR_epoll_create:
13286         return get_errno(epoll_create(arg1));
13287 #endif
13288 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13289     case TARGET_NR_epoll_create1:
13290         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13291 #endif
13292 #if defined(TARGET_NR_epoll_ctl)
13293     case TARGET_NR_epoll_ctl:
13294     {
13295         struct epoll_event ep;
13296         struct epoll_event *epp = 0;
13297         if (arg4) {
13298             if (arg2 != EPOLL_CTL_DEL) {
13299                 struct target_epoll_event *target_ep;
13300                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13301                     return -TARGET_EFAULT;
13302                 }
13303                 ep.events = tswap32(target_ep->events);
13304                 /*
13305                  * The epoll_data_t union is just opaque data to the kernel,
13306                  * so we transfer all 64 bits across and need not worry what
13307                  * actual data type it is.
13308                  */
13309                 ep.data.u64 = tswap64(target_ep->data.u64);
13310                 unlock_user_struct(target_ep, arg4, 0);
13311             }
13312             /*
13313              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13314              * non-null pointer, even though this argument is ignored.
13315              *
13316              */
13317             epp = &ep;
13318         }
13319         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13320     }
13321 #endif
13322 
13323 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13324 #if defined(TARGET_NR_epoll_wait)
13325     case TARGET_NR_epoll_wait:
13326 #endif
13327 #if defined(TARGET_NR_epoll_pwait)
13328     case TARGET_NR_epoll_pwait:
13329 #endif
13330     {
13331         struct target_epoll_event *target_ep;
13332         struct epoll_event *ep;
13333         int epfd = arg1;
13334         int maxevents = arg3;
13335         int timeout = arg4;
13336 
13337         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13338             return -TARGET_EINVAL;
13339         }
13340 
13341         target_ep = lock_user(VERIFY_WRITE, arg2,
13342                               maxevents * sizeof(struct target_epoll_event), 1);
13343         if (!target_ep) {
13344             return -TARGET_EFAULT;
13345         }
13346 
13347         ep = g_try_new(struct epoll_event, maxevents);
13348         if (!ep) {
13349             unlock_user(target_ep, arg2, 0);
13350             return -TARGET_ENOMEM;
13351         }
13352 
13353         switch (num) {
13354 #if defined(TARGET_NR_epoll_pwait)
13355         case TARGET_NR_epoll_pwait:
13356         {
13357             sigset_t *set = NULL;
13358 
13359             if (arg5) {
13360                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13361                 if (ret != 0) {
13362                     break;
13363                 }
13364             }
13365 
13366             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13367                                              set, SIGSET_T_SIZE));
13368 
13369             if (set) {
13370                 finish_sigsuspend_mask(ret);
13371             }
13372             break;
13373         }
13374 #endif
13375 #if defined(TARGET_NR_epoll_wait)
13376         case TARGET_NR_epoll_wait:
13377             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13378                                              NULL, 0));
13379             break;
13380 #endif
13381         default:
13382             ret = -TARGET_ENOSYS;
13383         }
13384         if (!is_error(ret)) {
13385             int i;
13386             for (i = 0; i < ret; i++) {
13387                 target_ep[i].events = tswap32(ep[i].events);
13388                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13389             }
13390             unlock_user(target_ep, arg2,
13391                         ret * sizeof(struct target_epoll_event));
13392         } else {
13393             unlock_user(target_ep, arg2, 0);
13394         }
13395         g_free(ep);
13396         return ret;
13397     }
13398 #endif
13399 #endif
13400 #ifdef TARGET_NR_prlimit64
13401     case TARGET_NR_prlimit64:
13402     {
13403         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13404         struct target_rlimit64 *target_rnew, *target_rold;
13405         struct host_rlimit64 rnew, rold, *rnewp = 0;
13406         int resource = target_to_host_resource(arg2);
13407 
13408         if (arg3 && (resource != RLIMIT_AS &&
13409                      resource != RLIMIT_DATA &&
13410                      resource != RLIMIT_STACK)) {
13411             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13412                 return -TARGET_EFAULT;
13413             }
13414             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13415             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13416             unlock_user_struct(target_rnew, arg3, 0);
13417             rnewp = &rnew;
13418         }
13419 
13420         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13421         if (!is_error(ret) && arg4) {
13422             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13423                 return -TARGET_EFAULT;
13424             }
13425             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13426             __put_user(rold.rlim_max, &target_rold->rlim_max);
13427             unlock_user_struct(target_rold, arg4, 1);
13428         }
13429         return ret;
13430     }
13431 #endif
13432 #ifdef TARGET_NR_gethostname
13433     case TARGET_NR_gethostname:
13434     {
13435         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13436         if (name) {
13437             ret = get_errno(gethostname(name, arg2));
13438             unlock_user(name, arg1, arg2);
13439         } else {
13440             ret = -TARGET_EFAULT;
13441         }
13442         return ret;
13443     }
13444 #endif
13445 #ifdef TARGET_NR_atomic_cmpxchg_32
13446     case TARGET_NR_atomic_cmpxchg_32:
13447     {
13448         /* should use start_exclusive from main.c */
13449         abi_ulong mem_value;
13450         if (get_user_u32(mem_value, arg6)) {
13451             target_siginfo_t info;
13452             info.si_signo = SIGSEGV;
13453             info.si_errno = 0;
13454             info.si_code = TARGET_SEGV_MAPERR;
13455             info._sifields._sigfault._addr = arg6;
13456             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13457             ret = 0xdeadbeef;
13458 
13459         }
13460         if (mem_value == arg2)
13461             put_user_u32(arg1, arg6);
13462         return mem_value;
13463     }
13464 #endif
13465 #ifdef TARGET_NR_atomic_barrier
13466     case TARGET_NR_atomic_barrier:
13467         /* Like the kernel implementation and the
13468            qemu arm barrier, no-op this? */
13469         return 0;
13470 #endif
13471 
13472 #ifdef TARGET_NR_timer_create
13473     case TARGET_NR_timer_create:
13474     {
13475         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13476 
13477         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13478 
13479         int clkid = arg1;
13480         int timer_index = next_free_host_timer();
13481 
13482         if (timer_index < 0) {
13483             ret = -TARGET_EAGAIN;
13484         } else {
13485             timer_t *phtimer = g_posix_timers  + timer_index;
13486 
13487             if (arg2) {
13488                 phost_sevp = &host_sevp;
13489                 ret = target_to_host_sigevent(phost_sevp, arg2);
13490                 if (ret != 0) {
13491                     free_host_timer_slot(timer_index);
13492                     return ret;
13493                 }
13494             }
13495 
13496             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13497             if (ret) {
13498                 free_host_timer_slot(timer_index);
13499             } else {
13500                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13501                     timer_delete(*phtimer);
13502                     free_host_timer_slot(timer_index);
13503                     return -TARGET_EFAULT;
13504                 }
13505             }
13506         }
13507         return ret;
13508     }
13509 #endif
13510 
13511 #ifdef TARGET_NR_timer_settime
13512     case TARGET_NR_timer_settime:
13513     {
13514         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13515          * struct itimerspec * old_value */
13516         target_timer_t timerid = get_timer_id(arg1);
13517 
13518         if (timerid < 0) {
13519             ret = timerid;
13520         } else if (arg3 == 0) {
13521             ret = -TARGET_EINVAL;
13522         } else {
13523             timer_t htimer = g_posix_timers[timerid];
13524             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13525 
13526             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13527                 return -TARGET_EFAULT;
13528             }
13529             ret = get_errno(
13530                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13531             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13532                 return -TARGET_EFAULT;
13533             }
13534         }
13535         return ret;
13536     }
13537 #endif
13538 
13539 #ifdef TARGET_NR_timer_settime64
13540     case TARGET_NR_timer_settime64:
13541     {
13542         target_timer_t timerid = get_timer_id(arg1);
13543 
13544         if (timerid < 0) {
13545             ret = timerid;
13546         } else if (arg3 == 0) {
13547             ret = -TARGET_EINVAL;
13548         } else {
13549             timer_t htimer = g_posix_timers[timerid];
13550             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13551 
13552             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13553                 return -TARGET_EFAULT;
13554             }
13555             ret = get_errno(
13556                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13557             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13558                 return -TARGET_EFAULT;
13559             }
13560         }
13561         return ret;
13562     }
13563 #endif
13564 
13565 #ifdef TARGET_NR_timer_gettime
13566     case TARGET_NR_timer_gettime:
13567     {
13568         /* args: timer_t timerid, struct itimerspec *curr_value */
13569         target_timer_t timerid = get_timer_id(arg1);
13570 
13571         if (timerid < 0) {
13572             ret = timerid;
13573         } else if (!arg2) {
13574             ret = -TARGET_EFAULT;
13575         } else {
13576             timer_t htimer = g_posix_timers[timerid];
13577             struct itimerspec hspec;
13578             ret = get_errno(timer_gettime(htimer, &hspec));
13579 
13580             if (host_to_target_itimerspec(arg2, &hspec)) {
13581                 ret = -TARGET_EFAULT;
13582             }
13583         }
13584         return ret;
13585     }
13586 #endif
13587 
13588 #ifdef TARGET_NR_timer_gettime64
13589     case TARGET_NR_timer_gettime64:
13590     {
13591         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13592         target_timer_t timerid = get_timer_id(arg1);
13593 
13594         if (timerid < 0) {
13595             ret = timerid;
13596         } else if (!arg2) {
13597             ret = -TARGET_EFAULT;
13598         } else {
13599             timer_t htimer = g_posix_timers[timerid];
13600             struct itimerspec hspec;
13601             ret = get_errno(timer_gettime(htimer, &hspec));
13602 
13603             if (host_to_target_itimerspec64(arg2, &hspec)) {
13604                 ret = -TARGET_EFAULT;
13605             }
13606         }
13607         return ret;
13608     }
13609 #endif
13610 
13611 #ifdef TARGET_NR_timer_getoverrun
13612     case TARGET_NR_timer_getoverrun:
13613     {
13614         /* args: timer_t timerid */
13615         target_timer_t timerid = get_timer_id(arg1);
13616 
13617         if (timerid < 0) {
13618             ret = timerid;
13619         } else {
13620             timer_t htimer = g_posix_timers[timerid];
13621             ret = get_errno(timer_getoverrun(htimer));
13622         }
13623         return ret;
13624     }
13625 #endif
13626 
13627 #ifdef TARGET_NR_timer_delete
13628     case TARGET_NR_timer_delete:
13629     {
13630         /* args: timer_t timerid */
13631         target_timer_t timerid = get_timer_id(arg1);
13632 
13633         if (timerid < 0) {
13634             ret = timerid;
13635         } else {
13636             timer_t htimer = g_posix_timers[timerid];
13637             ret = get_errno(timer_delete(htimer));
13638             free_host_timer_slot(timerid);
13639         }
13640         return ret;
13641     }
13642 #endif
13643 
13644 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13645     case TARGET_NR_timerfd_create:
13646         ret = get_errno(timerfd_create(arg1,
13647                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13648         if (ret >= 0) {
13649             fd_trans_register(ret, &target_timerfd_trans);
13650         }
13651         return ret;
13652 #endif
13653 
13654 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13655     case TARGET_NR_timerfd_gettime:
13656         {
13657             struct itimerspec its_curr;
13658 
13659             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13660 
13661             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13662                 return -TARGET_EFAULT;
13663             }
13664         }
13665         return ret;
13666 #endif
13667 
13668 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13669     case TARGET_NR_timerfd_gettime64:
13670         {
13671             struct itimerspec its_curr;
13672 
13673             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13674 
13675             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13676                 return -TARGET_EFAULT;
13677             }
13678         }
13679         return ret;
13680 #endif
13681 
13682 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13683     case TARGET_NR_timerfd_settime:
13684         {
13685             struct itimerspec its_new, its_old, *p_new;
13686 
13687             if (arg3) {
13688                 if (target_to_host_itimerspec(&its_new, arg3)) {
13689                     return -TARGET_EFAULT;
13690                 }
13691                 p_new = &its_new;
13692             } else {
13693                 p_new = NULL;
13694             }
13695 
13696             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13697 
13698             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13699                 return -TARGET_EFAULT;
13700             }
13701         }
13702         return ret;
13703 #endif
13704 
13705 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13706     case TARGET_NR_timerfd_settime64:
13707         {
13708             struct itimerspec its_new, its_old, *p_new;
13709 
13710             if (arg3) {
13711                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13712                     return -TARGET_EFAULT;
13713                 }
13714                 p_new = &its_new;
13715             } else {
13716                 p_new = NULL;
13717             }
13718 
13719             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13720 
13721             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13722                 return -TARGET_EFAULT;
13723             }
13724         }
13725         return ret;
13726 #endif
13727 
13728 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13729     case TARGET_NR_ioprio_get:
13730         return get_errno(ioprio_get(arg1, arg2));
13731 #endif
13732 
13733 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13734     case TARGET_NR_ioprio_set:
13735         return get_errno(ioprio_set(arg1, arg2, arg3));
13736 #endif
13737 
13738 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13739     case TARGET_NR_setns:
13740         return get_errno(setns(arg1, arg2));
13741 #endif
13742 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13743     case TARGET_NR_unshare:
13744         return get_errno(unshare(arg1));
13745 #endif
13746 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13747     case TARGET_NR_kcmp:
13748         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13749 #endif
13750 #ifdef TARGET_NR_swapcontext
13751     case TARGET_NR_swapcontext:
13752         /* PowerPC specific.  */
13753         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13754 #endif
13755 #ifdef TARGET_NR_memfd_create
13756     case TARGET_NR_memfd_create:
13757         p = lock_user_string(arg1);
13758         if (!p) {
13759             return -TARGET_EFAULT;
13760         }
13761         ret = get_errno(memfd_create(p, arg2));
13762         fd_trans_unregister(ret);
13763         unlock_user(p, arg1, 0);
13764         return ret;
13765 #endif
13766 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13767     case TARGET_NR_membarrier:
13768         return get_errno(membarrier(arg1, arg2));
13769 #endif
13770 
13771 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13772     case TARGET_NR_copy_file_range:
13773         {
13774             loff_t inoff, outoff;
13775             loff_t *pinoff = NULL, *poutoff = NULL;
13776 
13777             if (arg2) {
13778                 if (get_user_u64(inoff, arg2)) {
13779                     return -TARGET_EFAULT;
13780                 }
13781                 pinoff = &inoff;
13782             }
13783             if (arg4) {
13784                 if (get_user_u64(outoff, arg4)) {
13785                     return -TARGET_EFAULT;
13786                 }
13787                 poutoff = &outoff;
13788             }
13789             /* Do not sign-extend the count parameter. */
13790             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13791                                                  (abi_ulong)arg5, arg6));
13792             if (!is_error(ret) && ret > 0) {
13793                 if (arg2) {
13794                     if (put_user_u64(inoff, arg2)) {
13795                         return -TARGET_EFAULT;
13796                     }
13797                 }
13798                 if (arg4) {
13799                     if (put_user_u64(outoff, arg4)) {
13800                         return -TARGET_EFAULT;
13801                     }
13802                 }
13803             }
13804         }
13805         return ret;
13806 #endif
13807 
13808 #if defined(TARGET_NR_pivot_root)
13809     case TARGET_NR_pivot_root:
13810         {
13811             void *p2;
13812             p = lock_user_string(arg1); /* new_root */
13813             p2 = lock_user_string(arg2); /* put_old */
13814             if (!p || !p2) {
13815                 ret = -TARGET_EFAULT;
13816             } else {
13817                 ret = get_errno(pivot_root(p, p2));
13818             }
13819             unlock_user(p2, arg2, 0);
13820             unlock_user(p, arg1, 0);
13821         }
13822         return ret;
13823 #endif
13824 
13825 #if defined(TARGET_NR_riscv_hwprobe)
13826     case TARGET_NR_riscv_hwprobe:
13827         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13828 #endif
13829 
13830     default:
13831         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13832         return -TARGET_ENOSYS;
13833     }
13834     return ret;
13835 }
13836 
13837 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13838                     abi_long arg2, abi_long arg3, abi_long arg4,
13839                     abi_long arg5, abi_long arg6, abi_long arg7,
13840                     abi_long arg8)
13841 {
13842     CPUState *cpu = env_cpu(cpu_env);
13843     abi_long ret;
13844 
13845 #ifdef DEBUG_ERESTARTSYS
13846     /* Debug-only code for exercising the syscall-restart code paths
13847      * in the per-architecture cpu main loops: restart every syscall
13848      * the guest makes once before letting it through.
13849      */
13850     {
13851         static bool flag;
13852         flag = !flag;
13853         if (flag) {
13854             return -QEMU_ERESTARTSYS;
13855         }
13856     }
13857 #endif
13858 
13859     record_syscall_start(cpu, num, arg1,
13860                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13861 
13862     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13863         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13864     }
13865 
13866     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13867                       arg5, arg6, arg7, arg8);
13868 
13869     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13870         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13871                           arg3, arg4, arg5, arg6);
13872     }
13873 
13874     record_syscall_return(cpu, num, ret);
13875     return ret;
13876 }
13877