xref: /openbmc/qemu/linux-user/syscall.c (revision 9ab8d071)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
85 
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
92 
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
130 
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321           loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458   { 0, 0, 0, 0 }
459 };
460 
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462 
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467           const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470                          const struct timespec times[2], int flags)
471 {
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477 
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482           const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485                          int newfd, const char *new, int flags)
486 {
487     if (flags == 0) {
488         return renameat(oldfd, old, newfd, new);
489     }
490     errno = ENOSYS;
491     return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495 
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY  */
505 
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513     uint64_t rlim_cur;
514     uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517           const struct host_rlimit64 *, new_limit,
518           struct host_rlimit64 *, old_limit)
519 #endif
520 
521 
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527 
528 static inline int next_free_host_timer(void)
529 {
530     int k;
531     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533             return k;
534         }
535     }
536     return -1;
537 }
538 
539 static inline void free_host_timer_slot(int id)
540 {
541     qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544 
545 static inline int host_to_target_errno(int host_errno)
546 {
547     switch (host_errno) {
548 #define E(X)  case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return host_errno;
553     }
554 }
555 
556 static inline int target_to_host_errno(int target_errno)
557 {
558     switch (target_errno) {
559 #define E(X)  case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562     default:
563         return target_errno;
564     }
565 }
566 
567 abi_long get_errno(abi_long ret)
568 {
569     if (ret == -1)
570         return -host_to_target_errno(errno);
571     else
572         return ret;
573 }
574 
575 const char *target_strerror(int err)
576 {
577     if (err == QEMU_ERESTARTSYS) {
578         return "To be restarted";
579     }
580     if (err == QEMU_ESIGRETURN) {
581         return "Successful exit from sigreturn";
582     }
583 
584     return strerror(target_to_host_errno(err));
585 }
586 
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589     int i;
590     uint8_t b;
591     if (usize <= ksize) {
592         return 1;
593     }
594     for (i = ksize; i < usize; i++) {
595         if (get_user_u8(b, addr + i)) {
596             return -TARGET_EFAULT;
597         }
598         if (b != 0) {
599             return 0;
600         }
601     }
602     return 1;
603 }
604 
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608     return safe_syscall(__NR_##name); \
609 }
610 
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614     return safe_syscall(__NR_##name, arg1); \
615 }
616 
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622 
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628 
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630     type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635 
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637     type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639     type5 arg5) \
640 { \
641     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643 
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645     type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647     type5 arg5, type6 arg6) \
648 { \
649     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651 
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655               int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658               struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661               int, options, struct rusage *, rusage)
662 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
663 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
664               char **, argv, char **, envp, int, flags)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
668               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
669 #endif
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
672               struct timespec *, tsp, const sigset_t *, sigmask,
673               size_t, sigsetsize)
674 #endif
675 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
676               int, maxevents, int, timeout, const sigset_t *, sigmask,
677               size_t, sigsetsize)
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
680               const struct timespec *,timeout,int *,uaddr2,int,val3)
681 #endif
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
684               const struct timespec *,timeout,int *,uaddr2,int,val3)
685 #endif
686 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
687 safe_syscall2(int, kill, pid_t, pid, int, sig)
688 safe_syscall2(int, tkill, int, tid, int, sig)
689 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
690 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
692 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
693               unsigned long, pos_l, unsigned long, pos_h)
694 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
695               unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
697               socklen_t, addrlen)
698 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
699               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
700 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
701               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
702 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
703 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
704 safe_syscall2(int, flock, int, fd, int, operation)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
707               const struct timespec *, uts, size_t, sigsetsize)
708 #endif
709 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
710               int, flags)
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep, const struct timespec *, req,
713               struct timespec *, rem)
714 #endif
715 #if defined(TARGET_NR_clock_nanosleep) || \
716     defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
718               const struct timespec *, req, struct timespec *, rem)
719 #endif
720 #ifdef __NR_ipc
721 #ifdef __s390x__
722 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
723               void *, ptr)
724 #else
725 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
726               void *, ptr, long, fifth)
727 #endif
728 #endif
729 #ifdef __NR_msgsnd
730 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
731               int, flags)
732 #endif
733 #ifdef __NR_msgrcv
734 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
735               long, msgtype, int, flags)
736 #endif
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739               unsigned, nsops, const struct timespec *, timeout)
740 #endif
741 #if defined(TARGET_NR_mq_timedsend) || \
742     defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
744               size_t, len, unsigned, prio, const struct timespec *, timeout)
745 #endif
746 #if defined(TARGET_NR_mq_timedreceive) || \
747     defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
749               size_t, len, unsigned *, prio, const struct timespec *, timeout)
750 #endif
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
753               int, outfd, loff_t *, poutoff, size_t, length,
754               unsigned int, flags)
755 #endif
756 
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758  * "third argument might be integer or pointer or not present" behaviour of
759  * the libc function.
760  */
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764  *  use the flock64 struct rather than unsuffixed flock
765  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
766  */
767 #ifdef __NR_fcntl64
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
769 #else
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
771 #endif
772 
773 static inline int host_to_target_sock_type(int host_type)
774 {
775     int target_type;
776 
777     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
778     case SOCK_DGRAM:
779         target_type = TARGET_SOCK_DGRAM;
780         break;
781     case SOCK_STREAM:
782         target_type = TARGET_SOCK_STREAM;
783         break;
784     default:
785         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
786         break;
787     }
788 
789 #if defined(SOCK_CLOEXEC)
790     if (host_type & SOCK_CLOEXEC) {
791         target_type |= TARGET_SOCK_CLOEXEC;
792     }
793 #endif
794 
795 #if defined(SOCK_NONBLOCK)
796     if (host_type & SOCK_NONBLOCK) {
797         target_type |= TARGET_SOCK_NONBLOCK;
798     }
799 #endif
800 
801     return target_type;
802 }
803 
804 static abi_ulong target_brk, initial_target_brk;
805 
806 void target_set_brk(abi_ulong new_brk)
807 {
808     target_brk = TARGET_PAGE_ALIGN(new_brk);
809     initial_target_brk = target_brk;
810 }
811 
812 /* do_brk() must return target values and target errnos. */
813 abi_long do_brk(abi_ulong brk_val)
814 {
815     abi_long mapped_addr;
816     abi_ulong new_brk;
817     abi_ulong old_brk;
818 
819     /* brk pointers are always untagged */
820 
821     /* do not allow to shrink below initial brk value */
822     if (brk_val < initial_target_brk) {
823         return target_brk;
824     }
825 
826     new_brk = TARGET_PAGE_ALIGN(brk_val);
827     old_brk = TARGET_PAGE_ALIGN(target_brk);
828 
829     /* new and old target_brk might be on the same page */
830     if (new_brk == old_brk) {
831         target_brk = brk_val;
832         return target_brk;
833     }
834 
835     /* Release heap if necesary */
836     if (new_brk < old_brk) {
837         target_munmap(new_brk, old_brk - new_brk);
838 
839         target_brk = brk_val;
840         return target_brk;
841     }
842 
843     mapped_addr = target_mmap(old_brk, new_brk - old_brk,
844                               PROT_READ | PROT_WRITE,
845                               MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
846                               -1, 0);
847 
848     if (mapped_addr == old_brk) {
849         target_brk = brk_val;
850         return target_brk;
851     }
852 
853 #if defined(TARGET_ALPHA)
854     /* We (partially) emulate OSF/1 on Alpha, which requires we
855        return a proper errno, not an unchanged brk value.  */
856     return -TARGET_ENOMEM;
857 #endif
858     /* For everything else, return the previous break. */
859     return target_brk;
860 }
861 
862 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
863     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
864 static inline abi_long copy_from_user_fdset(fd_set *fds,
865                                             abi_ulong target_fds_addr,
866                                             int n)
867 {
868     int i, nw, j, k;
869     abi_ulong b, *target_fds;
870 
871     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
872     if (!(target_fds = lock_user(VERIFY_READ,
873                                  target_fds_addr,
874                                  sizeof(abi_ulong) * nw,
875                                  1)))
876         return -TARGET_EFAULT;
877 
878     FD_ZERO(fds);
879     k = 0;
880     for (i = 0; i < nw; i++) {
881         /* grab the abi_ulong */
882         __get_user(b, &target_fds[i]);
883         for (j = 0; j < TARGET_ABI_BITS; j++) {
884             /* check the bit inside the abi_ulong */
885             if ((b >> j) & 1)
886                 FD_SET(k, fds);
887             k++;
888         }
889     }
890 
891     unlock_user(target_fds, target_fds_addr, 0);
892 
893     return 0;
894 }
895 
896 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
897                                                  abi_ulong target_fds_addr,
898                                                  int n)
899 {
900     if (target_fds_addr) {
901         if (copy_from_user_fdset(fds, target_fds_addr, n))
902             return -TARGET_EFAULT;
903         *fds_ptr = fds;
904     } else {
905         *fds_ptr = NULL;
906     }
907     return 0;
908 }
909 
910 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
911                                           const fd_set *fds,
912                                           int n)
913 {
914     int i, nw, j, k;
915     abi_long v;
916     abi_ulong *target_fds;
917 
918     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
919     if (!(target_fds = lock_user(VERIFY_WRITE,
920                                  target_fds_addr,
921                                  sizeof(abi_ulong) * nw,
922                                  0)))
923         return -TARGET_EFAULT;
924 
925     k = 0;
926     for (i = 0; i < nw; i++) {
927         v = 0;
928         for (j = 0; j < TARGET_ABI_BITS; j++) {
929             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
930             k++;
931         }
932         __put_user(v, &target_fds[i]);
933     }
934 
935     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
936 
937     return 0;
938 }
939 #endif
940 
941 #if defined(__alpha__)
942 #define HOST_HZ 1024
943 #else
944 #define HOST_HZ 100
945 #endif
946 
947 static inline abi_long host_to_target_clock_t(long ticks)
948 {
949 #if HOST_HZ == TARGET_HZ
950     return ticks;
951 #else
952     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
953 #endif
954 }
955 
956 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
957                                              const struct rusage *rusage)
958 {
959     struct target_rusage *target_rusage;
960 
961     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
962         return -TARGET_EFAULT;
963     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
964     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
965     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
966     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
967     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
968     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
969     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
970     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
971     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
972     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
973     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
974     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
975     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
976     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
977     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
978     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
979     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
980     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
981     unlock_user_struct(target_rusage, target_addr, 1);
982 
983     return 0;
984 }
985 
986 #ifdef TARGET_NR_setrlimit
987 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
988 {
989     abi_ulong target_rlim_swap;
990     rlim_t result;
991 
992     target_rlim_swap = tswapal(target_rlim);
993     if (target_rlim_swap == TARGET_RLIM_INFINITY)
994         return RLIM_INFINITY;
995 
996     result = target_rlim_swap;
997     if (target_rlim_swap != (rlim_t)result)
998         return RLIM_INFINITY;
999 
1000     return result;
1001 }
1002 #endif
1003 
1004 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1005 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1006 {
1007     abi_ulong target_rlim_swap;
1008     abi_ulong result;
1009 
1010     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1011         target_rlim_swap = TARGET_RLIM_INFINITY;
1012     else
1013         target_rlim_swap = rlim;
1014     result = tswapal(target_rlim_swap);
1015 
1016     return result;
1017 }
1018 #endif
1019 
1020 static inline int target_to_host_resource(int code)
1021 {
1022     switch (code) {
1023     case TARGET_RLIMIT_AS:
1024         return RLIMIT_AS;
1025     case TARGET_RLIMIT_CORE:
1026         return RLIMIT_CORE;
1027     case TARGET_RLIMIT_CPU:
1028         return RLIMIT_CPU;
1029     case TARGET_RLIMIT_DATA:
1030         return RLIMIT_DATA;
1031     case TARGET_RLIMIT_FSIZE:
1032         return RLIMIT_FSIZE;
1033     case TARGET_RLIMIT_LOCKS:
1034         return RLIMIT_LOCKS;
1035     case TARGET_RLIMIT_MEMLOCK:
1036         return RLIMIT_MEMLOCK;
1037     case TARGET_RLIMIT_MSGQUEUE:
1038         return RLIMIT_MSGQUEUE;
1039     case TARGET_RLIMIT_NICE:
1040         return RLIMIT_NICE;
1041     case TARGET_RLIMIT_NOFILE:
1042         return RLIMIT_NOFILE;
1043     case TARGET_RLIMIT_NPROC:
1044         return RLIMIT_NPROC;
1045     case TARGET_RLIMIT_RSS:
1046         return RLIMIT_RSS;
1047     case TARGET_RLIMIT_RTPRIO:
1048         return RLIMIT_RTPRIO;
1049 #ifdef RLIMIT_RTTIME
1050     case TARGET_RLIMIT_RTTIME:
1051         return RLIMIT_RTTIME;
1052 #endif
1053     case TARGET_RLIMIT_SIGPENDING:
1054         return RLIMIT_SIGPENDING;
1055     case TARGET_RLIMIT_STACK:
1056         return RLIMIT_STACK;
1057     default:
1058         return code;
1059     }
1060 }
1061 
1062 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1063                                               abi_ulong target_tv_addr)
1064 {
1065     struct target_timeval *target_tv;
1066 
1067     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1068         return -TARGET_EFAULT;
1069     }
1070 
1071     __get_user(tv->tv_sec, &target_tv->tv_sec);
1072     __get_user(tv->tv_usec, &target_tv->tv_usec);
1073 
1074     unlock_user_struct(target_tv, target_tv_addr, 0);
1075 
1076     return 0;
1077 }
1078 
1079 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1080                                             const struct timeval *tv)
1081 {
1082     struct target_timeval *target_tv;
1083 
1084     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1085         return -TARGET_EFAULT;
1086     }
1087 
1088     __put_user(tv->tv_sec, &target_tv->tv_sec);
1089     __put_user(tv->tv_usec, &target_tv->tv_usec);
1090 
1091     unlock_user_struct(target_tv, target_tv_addr, 1);
1092 
1093     return 0;
1094 }
1095 
1096 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1097 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1098                                                 abi_ulong target_tv_addr)
1099 {
1100     struct target__kernel_sock_timeval *target_tv;
1101 
1102     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1103         return -TARGET_EFAULT;
1104     }
1105 
1106     __get_user(tv->tv_sec, &target_tv->tv_sec);
1107     __get_user(tv->tv_usec, &target_tv->tv_usec);
1108 
1109     unlock_user_struct(target_tv, target_tv_addr, 0);
1110 
1111     return 0;
1112 }
1113 #endif
1114 
1115 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1116                                               const struct timeval *tv)
1117 {
1118     struct target__kernel_sock_timeval *target_tv;
1119 
1120     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1121         return -TARGET_EFAULT;
1122     }
1123 
1124     __put_user(tv->tv_sec, &target_tv->tv_sec);
1125     __put_user(tv->tv_usec, &target_tv->tv_usec);
1126 
1127     unlock_user_struct(target_tv, target_tv_addr, 1);
1128 
1129     return 0;
1130 }
1131 
1132 #if defined(TARGET_NR_futex) || \
1133     defined(TARGET_NR_rt_sigtimedwait) || \
1134     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1135     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1136     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1137     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1138     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1139     defined(TARGET_NR_timer_settime) || \
1140     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1141 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1142                                                abi_ulong target_addr)
1143 {
1144     struct target_timespec *target_ts;
1145 
1146     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1147         return -TARGET_EFAULT;
1148     }
1149     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1150     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1151     unlock_user_struct(target_ts, target_addr, 0);
1152     return 0;
1153 }
1154 #endif
1155 
1156 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1157     defined(TARGET_NR_timer_settime64) || \
1158     defined(TARGET_NR_mq_timedsend_time64) || \
1159     defined(TARGET_NR_mq_timedreceive_time64) || \
1160     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1161     defined(TARGET_NR_clock_nanosleep_time64) || \
1162     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1163     defined(TARGET_NR_utimensat) || \
1164     defined(TARGET_NR_utimensat_time64) || \
1165     defined(TARGET_NR_semtimedop_time64) || \
1166     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1167 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1168                                                  abi_ulong target_addr)
1169 {
1170     struct target__kernel_timespec *target_ts;
1171 
1172     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1173         return -TARGET_EFAULT;
1174     }
1175     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1176     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1177     /* in 32bit mode, this drops the padding */
1178     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1179     unlock_user_struct(target_ts, target_addr, 0);
1180     return 0;
1181 }
1182 #endif
1183 
1184 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1185                                                struct timespec *host_ts)
1186 {
1187     struct target_timespec *target_ts;
1188 
1189     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1190         return -TARGET_EFAULT;
1191     }
1192     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1193     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1194     unlock_user_struct(target_ts, target_addr, 1);
1195     return 0;
1196 }
1197 
1198 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1199                                                  struct timespec *host_ts)
1200 {
1201     struct target__kernel_timespec *target_ts;
1202 
1203     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1204         return -TARGET_EFAULT;
1205     }
1206     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1207     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1208     unlock_user_struct(target_ts, target_addr, 1);
1209     return 0;
1210 }
1211 
1212 #if defined(TARGET_NR_gettimeofday)
1213 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1214                                              struct timezone *tz)
1215 {
1216     struct target_timezone *target_tz;
1217 
1218     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1219         return -TARGET_EFAULT;
1220     }
1221 
1222     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1223     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1224 
1225     unlock_user_struct(target_tz, target_tz_addr, 1);
1226 
1227     return 0;
1228 }
1229 #endif
1230 
1231 #if defined(TARGET_NR_settimeofday)
1232 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1233                                                abi_ulong target_tz_addr)
1234 {
1235     struct target_timezone *target_tz;
1236 
1237     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1238         return -TARGET_EFAULT;
1239     }
1240 
1241     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1242     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1243 
1244     unlock_user_struct(target_tz, target_tz_addr, 0);
1245 
1246     return 0;
1247 }
1248 #endif
1249 
1250 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1251 #include <mqueue.h>
1252 
1253 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1254                                               abi_ulong target_mq_attr_addr)
1255 {
1256     struct target_mq_attr *target_mq_attr;
1257 
1258     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1259                           target_mq_attr_addr, 1))
1260         return -TARGET_EFAULT;
1261 
1262     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1263     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1264     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1265     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1266 
1267     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1268 
1269     return 0;
1270 }
1271 
1272 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1273                                             const struct mq_attr *attr)
1274 {
1275     struct target_mq_attr *target_mq_attr;
1276 
1277     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1278                           target_mq_attr_addr, 0))
1279         return -TARGET_EFAULT;
1280 
1281     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1282     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1283     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1284     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1285 
1286     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1287 
1288     return 0;
1289 }
1290 #endif
1291 
1292 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1293 /* do_select() must return target values and target errnos. */
1294 static abi_long do_select(int n,
1295                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1296                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1297 {
1298     fd_set rfds, wfds, efds;
1299     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1300     struct timeval tv;
1301     struct timespec ts, *ts_ptr;
1302     abi_long ret;
1303 
1304     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1305     if (ret) {
1306         return ret;
1307     }
1308     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1309     if (ret) {
1310         return ret;
1311     }
1312     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1313     if (ret) {
1314         return ret;
1315     }
1316 
1317     if (target_tv_addr) {
1318         if (copy_from_user_timeval(&tv, target_tv_addr))
1319             return -TARGET_EFAULT;
1320         ts.tv_sec = tv.tv_sec;
1321         ts.tv_nsec = tv.tv_usec * 1000;
1322         ts_ptr = &ts;
1323     } else {
1324         ts_ptr = NULL;
1325     }
1326 
1327     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1328                                   ts_ptr, NULL));
1329 
1330     if (!is_error(ret)) {
1331         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1332             return -TARGET_EFAULT;
1333         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1334             return -TARGET_EFAULT;
1335         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1336             return -TARGET_EFAULT;
1337 
1338         if (target_tv_addr) {
1339             tv.tv_sec = ts.tv_sec;
1340             tv.tv_usec = ts.tv_nsec / 1000;
1341             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1342                 return -TARGET_EFAULT;
1343             }
1344         }
1345     }
1346 
1347     return ret;
1348 }
1349 
1350 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1351 static abi_long do_old_select(abi_ulong arg1)
1352 {
1353     struct target_sel_arg_struct *sel;
1354     abi_ulong inp, outp, exp, tvp;
1355     long nsel;
1356 
1357     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1358         return -TARGET_EFAULT;
1359     }
1360 
1361     nsel = tswapal(sel->n);
1362     inp = tswapal(sel->inp);
1363     outp = tswapal(sel->outp);
1364     exp = tswapal(sel->exp);
1365     tvp = tswapal(sel->tvp);
1366 
1367     unlock_user_struct(sel, arg1, 0);
1368 
1369     return do_select(nsel, inp, outp, exp, tvp);
1370 }
1371 #endif
1372 #endif
1373 
1374 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1375 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1376                             abi_long arg4, abi_long arg5, abi_long arg6,
1377                             bool time64)
1378 {
1379     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1380     fd_set rfds, wfds, efds;
1381     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1382     struct timespec ts, *ts_ptr;
1383     abi_long ret;
1384 
1385     /*
1386      * The 6th arg is actually two args smashed together,
1387      * so we cannot use the C library.
1388      */
1389     struct {
1390         sigset_t *set;
1391         size_t size;
1392     } sig, *sig_ptr;
1393 
1394     abi_ulong arg_sigset, arg_sigsize, *arg7;
1395 
1396     n = arg1;
1397     rfd_addr = arg2;
1398     wfd_addr = arg3;
1399     efd_addr = arg4;
1400     ts_addr = arg5;
1401 
1402     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1403     if (ret) {
1404         return ret;
1405     }
1406     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1407     if (ret) {
1408         return ret;
1409     }
1410     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1411     if (ret) {
1412         return ret;
1413     }
1414 
1415     /*
1416      * This takes a timespec, and not a timeval, so we cannot
1417      * use the do_select() helper ...
1418      */
1419     if (ts_addr) {
1420         if (time64) {
1421             if (target_to_host_timespec64(&ts, ts_addr)) {
1422                 return -TARGET_EFAULT;
1423             }
1424         } else {
1425             if (target_to_host_timespec(&ts, ts_addr)) {
1426                 return -TARGET_EFAULT;
1427             }
1428         }
1429             ts_ptr = &ts;
1430     } else {
1431         ts_ptr = NULL;
1432     }
1433 
1434     /* Extract the two packed args for the sigset */
1435     sig_ptr = NULL;
1436     if (arg6) {
1437         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1438         if (!arg7) {
1439             return -TARGET_EFAULT;
1440         }
1441         arg_sigset = tswapal(arg7[0]);
1442         arg_sigsize = tswapal(arg7[1]);
1443         unlock_user(arg7, arg6, 0);
1444 
1445         if (arg_sigset) {
1446             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1447             if (ret != 0) {
1448                 return ret;
1449             }
1450             sig_ptr = &sig;
1451             sig.size = SIGSET_T_SIZE;
1452         }
1453     }
1454 
1455     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1456                                   ts_ptr, sig_ptr));
1457 
1458     if (sig_ptr) {
1459         finish_sigsuspend_mask(ret);
1460     }
1461 
1462     if (!is_error(ret)) {
1463         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1464             return -TARGET_EFAULT;
1465         }
1466         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1467             return -TARGET_EFAULT;
1468         }
1469         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1470             return -TARGET_EFAULT;
1471         }
1472         if (time64) {
1473             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1474                 return -TARGET_EFAULT;
1475             }
1476         } else {
1477             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1478                 return -TARGET_EFAULT;
1479             }
1480         }
1481     }
1482     return ret;
1483 }
1484 #endif
1485 
1486 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1487     defined(TARGET_NR_ppoll_time64)
1488 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1489                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1490 {
1491     struct target_pollfd *target_pfd;
1492     unsigned int nfds = arg2;
1493     struct pollfd *pfd;
1494     unsigned int i;
1495     abi_long ret;
1496 
1497     pfd = NULL;
1498     target_pfd = NULL;
1499     if (nfds) {
1500         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1501             return -TARGET_EINVAL;
1502         }
1503         target_pfd = lock_user(VERIFY_WRITE, arg1,
1504                                sizeof(struct target_pollfd) * nfds, 1);
1505         if (!target_pfd) {
1506             return -TARGET_EFAULT;
1507         }
1508 
1509         pfd = alloca(sizeof(struct pollfd) * nfds);
1510         for (i = 0; i < nfds; i++) {
1511             pfd[i].fd = tswap32(target_pfd[i].fd);
1512             pfd[i].events = tswap16(target_pfd[i].events);
1513         }
1514     }
1515     if (ppoll) {
1516         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1517         sigset_t *set = NULL;
1518 
1519         if (arg3) {
1520             if (time64) {
1521                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1522                     unlock_user(target_pfd, arg1, 0);
1523                     return -TARGET_EFAULT;
1524                 }
1525             } else {
1526                 if (target_to_host_timespec(timeout_ts, arg3)) {
1527                     unlock_user(target_pfd, arg1, 0);
1528                     return -TARGET_EFAULT;
1529                 }
1530             }
1531         } else {
1532             timeout_ts = NULL;
1533         }
1534 
1535         if (arg4) {
1536             ret = process_sigsuspend_mask(&set, arg4, arg5);
1537             if (ret != 0) {
1538                 unlock_user(target_pfd, arg1, 0);
1539                 return ret;
1540             }
1541         }
1542 
1543         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1544                                    set, SIGSET_T_SIZE));
1545 
1546         if (set) {
1547             finish_sigsuspend_mask(ret);
1548         }
1549         if (!is_error(ret) && arg3) {
1550             if (time64) {
1551                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1552                     return -TARGET_EFAULT;
1553                 }
1554             } else {
1555                 if (host_to_target_timespec(arg3, timeout_ts)) {
1556                     return -TARGET_EFAULT;
1557                 }
1558             }
1559         }
1560     } else {
1561           struct timespec ts, *pts;
1562 
1563           if (arg3 >= 0) {
1564               /* Convert ms to secs, ns */
1565               ts.tv_sec = arg3 / 1000;
1566               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1567               pts = &ts;
1568           } else {
1569               /* -ve poll() timeout means "infinite" */
1570               pts = NULL;
1571           }
1572           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1573     }
1574 
1575     if (!is_error(ret)) {
1576         for (i = 0; i < nfds; i++) {
1577             target_pfd[i].revents = tswap16(pfd[i].revents);
1578         }
1579     }
1580     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1581     return ret;
1582 }
1583 #endif
1584 
1585 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1586                         int flags, int is_pipe2)
1587 {
1588     int host_pipe[2];
1589     abi_long ret;
1590     ret = pipe2(host_pipe, flags);
1591 
1592     if (is_error(ret))
1593         return get_errno(ret);
1594 
1595     /* Several targets have special calling conventions for the original
1596        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1597     if (!is_pipe2) {
1598 #if defined(TARGET_ALPHA)
1599         cpu_env->ir[IR_A4] = host_pipe[1];
1600         return host_pipe[0];
1601 #elif defined(TARGET_MIPS)
1602         cpu_env->active_tc.gpr[3] = host_pipe[1];
1603         return host_pipe[0];
1604 #elif defined(TARGET_SH4)
1605         cpu_env->gregs[1] = host_pipe[1];
1606         return host_pipe[0];
1607 #elif defined(TARGET_SPARC)
1608         cpu_env->regwptr[1] = host_pipe[1];
1609         return host_pipe[0];
1610 #endif
1611     }
1612 
1613     if (put_user_s32(host_pipe[0], pipedes)
1614         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1615         return -TARGET_EFAULT;
1616     return get_errno(ret);
1617 }
1618 
1619 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1620                                               abi_ulong target_addr,
1621                                               socklen_t len)
1622 {
1623     struct target_ip_mreqn *target_smreqn;
1624 
1625     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1626     if (!target_smreqn)
1627         return -TARGET_EFAULT;
1628     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1629     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1630     if (len == sizeof(struct target_ip_mreqn))
1631         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1632     unlock_user(target_smreqn, target_addr, 0);
1633 
1634     return 0;
1635 }
1636 
1637 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1638                                                abi_ulong target_addr,
1639                                                socklen_t len)
1640 {
1641     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1642     sa_family_t sa_family;
1643     struct target_sockaddr *target_saddr;
1644 
1645     if (fd_trans_target_to_host_addr(fd)) {
1646         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1647     }
1648 
1649     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1650     if (!target_saddr)
1651         return -TARGET_EFAULT;
1652 
1653     sa_family = tswap16(target_saddr->sa_family);
1654 
1655     /* Oops. The caller might send a incomplete sun_path; sun_path
1656      * must be terminated by \0 (see the manual page), but
1657      * unfortunately it is quite common to specify sockaddr_un
1658      * length as "strlen(x->sun_path)" while it should be
1659      * "strlen(...) + 1". We'll fix that here if needed.
1660      * Linux kernel has a similar feature.
1661      */
1662 
1663     if (sa_family == AF_UNIX) {
1664         if (len < unix_maxlen && len > 0) {
1665             char *cp = (char*)target_saddr;
1666 
1667             if ( cp[len-1] && !cp[len] )
1668                 len++;
1669         }
1670         if (len > unix_maxlen)
1671             len = unix_maxlen;
1672     }
1673 
1674     memcpy(addr, target_saddr, len);
1675     addr->sa_family = sa_family;
1676     if (sa_family == AF_NETLINK) {
1677         struct sockaddr_nl *nladdr;
1678 
1679         nladdr = (struct sockaddr_nl *)addr;
1680         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1681         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1682     } else if (sa_family == AF_PACKET) {
1683 	struct target_sockaddr_ll *lladdr;
1684 
1685 	lladdr = (struct target_sockaddr_ll *)addr;
1686 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1687 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1688     } else if (sa_family == AF_INET6) {
1689         struct sockaddr_in6 *in6addr;
1690 
1691         in6addr = (struct sockaddr_in6 *)addr;
1692         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1693     }
1694     unlock_user(target_saddr, target_addr, 0);
1695 
1696     return 0;
1697 }
1698 
1699 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1700                                                struct sockaddr *addr,
1701                                                socklen_t len)
1702 {
1703     struct target_sockaddr *target_saddr;
1704 
1705     if (len == 0) {
1706         return 0;
1707     }
1708     assert(addr);
1709 
1710     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1711     if (!target_saddr)
1712         return -TARGET_EFAULT;
1713     memcpy(target_saddr, addr, len);
1714     if (len >= offsetof(struct target_sockaddr, sa_family) +
1715         sizeof(target_saddr->sa_family)) {
1716         target_saddr->sa_family = tswap16(addr->sa_family);
1717     }
1718     if (addr->sa_family == AF_NETLINK &&
1719         len >= sizeof(struct target_sockaddr_nl)) {
1720         struct target_sockaddr_nl *target_nl =
1721                (struct target_sockaddr_nl *)target_saddr;
1722         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1723         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1724     } else if (addr->sa_family == AF_PACKET) {
1725         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1726         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1727         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1728     } else if (addr->sa_family == AF_INET6 &&
1729                len >= sizeof(struct target_sockaddr_in6)) {
1730         struct target_sockaddr_in6 *target_in6 =
1731                (struct target_sockaddr_in6 *)target_saddr;
1732         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1733     }
1734     unlock_user(target_saddr, target_addr, len);
1735 
1736     return 0;
1737 }
1738 
1739 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1740                                            struct target_msghdr *target_msgh)
1741 {
1742     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1743     abi_long msg_controllen;
1744     abi_ulong target_cmsg_addr;
1745     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1746     socklen_t space = 0;
1747 
1748     msg_controllen = tswapal(target_msgh->msg_controllen);
1749     if (msg_controllen < sizeof (struct target_cmsghdr))
1750         goto the_end;
1751     target_cmsg_addr = tswapal(target_msgh->msg_control);
1752     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1753     target_cmsg_start = target_cmsg;
1754     if (!target_cmsg)
1755         return -TARGET_EFAULT;
1756 
1757     while (cmsg && target_cmsg) {
1758         void *data = CMSG_DATA(cmsg);
1759         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1760 
1761         int len = tswapal(target_cmsg->cmsg_len)
1762             - sizeof(struct target_cmsghdr);
1763 
1764         space += CMSG_SPACE(len);
1765         if (space > msgh->msg_controllen) {
1766             space -= CMSG_SPACE(len);
1767             /* This is a QEMU bug, since we allocated the payload
1768              * area ourselves (unlike overflow in host-to-target
1769              * conversion, which is just the guest giving us a buffer
1770              * that's too small). It can't happen for the payload types
1771              * we currently support; if it becomes an issue in future
1772              * we would need to improve our allocation strategy to
1773              * something more intelligent than "twice the size of the
1774              * target buffer we're reading from".
1775              */
1776             qemu_log_mask(LOG_UNIMP,
1777                           ("Unsupported ancillary data %d/%d: "
1778                            "unhandled msg size\n"),
1779                           tswap32(target_cmsg->cmsg_level),
1780                           tswap32(target_cmsg->cmsg_type));
1781             break;
1782         }
1783 
1784         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1785             cmsg->cmsg_level = SOL_SOCKET;
1786         } else {
1787             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1788         }
1789         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1790         cmsg->cmsg_len = CMSG_LEN(len);
1791 
1792         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1793             int *fd = (int *)data;
1794             int *target_fd = (int *)target_data;
1795             int i, numfds = len / sizeof(int);
1796 
1797             for (i = 0; i < numfds; i++) {
1798                 __get_user(fd[i], target_fd + i);
1799             }
1800         } else if (cmsg->cmsg_level == SOL_SOCKET
1801                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1802             struct ucred *cred = (struct ucred *)data;
1803             struct target_ucred *target_cred =
1804                 (struct target_ucred *)target_data;
1805 
1806             __get_user(cred->pid, &target_cred->pid);
1807             __get_user(cred->uid, &target_cred->uid);
1808             __get_user(cred->gid, &target_cred->gid);
1809         } else if (cmsg->cmsg_level == SOL_ALG) {
1810             uint32_t *dst = (uint32_t *)data;
1811 
1812             memcpy(dst, target_data, len);
1813             /* fix endianess of first 32-bit word */
1814             if (len >= sizeof(uint32_t)) {
1815                 *dst = tswap32(*dst);
1816             }
1817         } else {
1818             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1819                           cmsg->cmsg_level, cmsg->cmsg_type);
1820             memcpy(data, target_data, len);
1821         }
1822 
1823         cmsg = CMSG_NXTHDR(msgh, cmsg);
1824         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1825                                          target_cmsg_start);
1826     }
1827     unlock_user(target_cmsg, target_cmsg_addr, 0);
1828  the_end:
1829     msgh->msg_controllen = space;
1830     return 0;
1831 }
1832 
1833 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1834                                            struct msghdr *msgh)
1835 {
1836     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1837     abi_long msg_controllen;
1838     abi_ulong target_cmsg_addr;
1839     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1840     socklen_t space = 0;
1841 
1842     msg_controllen = tswapal(target_msgh->msg_controllen);
1843     if (msg_controllen < sizeof (struct target_cmsghdr))
1844         goto the_end;
1845     target_cmsg_addr = tswapal(target_msgh->msg_control);
1846     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1847     target_cmsg_start = target_cmsg;
1848     if (!target_cmsg)
1849         return -TARGET_EFAULT;
1850 
1851     while (cmsg && target_cmsg) {
1852         void *data = CMSG_DATA(cmsg);
1853         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1854 
1855         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1856         int tgt_len, tgt_space;
1857 
1858         /* We never copy a half-header but may copy half-data;
1859          * this is Linux's behaviour in put_cmsg(). Note that
1860          * truncation here is a guest problem (which we report
1861          * to the guest via the CTRUNC bit), unlike truncation
1862          * in target_to_host_cmsg, which is a QEMU bug.
1863          */
1864         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1865             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1866             break;
1867         }
1868 
1869         if (cmsg->cmsg_level == SOL_SOCKET) {
1870             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1871         } else {
1872             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1873         }
1874         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1875 
1876         /* Payload types which need a different size of payload on
1877          * the target must adjust tgt_len here.
1878          */
1879         tgt_len = len;
1880         switch (cmsg->cmsg_level) {
1881         case SOL_SOCKET:
1882             switch (cmsg->cmsg_type) {
1883             case SO_TIMESTAMP:
1884                 tgt_len = sizeof(struct target_timeval);
1885                 break;
1886             default:
1887                 break;
1888             }
1889             break;
1890         default:
1891             break;
1892         }
1893 
1894         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1895             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1896             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1897         }
1898 
1899         /* We must now copy-and-convert len bytes of payload
1900          * into tgt_len bytes of destination space. Bear in mind
1901          * that in both source and destination we may be dealing
1902          * with a truncated value!
1903          */
1904         switch (cmsg->cmsg_level) {
1905         case SOL_SOCKET:
1906             switch (cmsg->cmsg_type) {
1907             case SCM_RIGHTS:
1908             {
1909                 int *fd = (int *)data;
1910                 int *target_fd = (int *)target_data;
1911                 int i, numfds = tgt_len / sizeof(int);
1912 
1913                 for (i = 0; i < numfds; i++) {
1914                     __put_user(fd[i], target_fd + i);
1915                 }
1916                 break;
1917             }
1918             case SO_TIMESTAMP:
1919             {
1920                 struct timeval *tv = (struct timeval *)data;
1921                 struct target_timeval *target_tv =
1922                     (struct target_timeval *)target_data;
1923 
1924                 if (len != sizeof(struct timeval) ||
1925                     tgt_len != sizeof(struct target_timeval)) {
1926                     goto unimplemented;
1927                 }
1928 
1929                 /* copy struct timeval to target */
1930                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1931                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1932                 break;
1933             }
1934             case SCM_CREDENTIALS:
1935             {
1936                 struct ucred *cred = (struct ucred *)data;
1937                 struct target_ucred *target_cred =
1938                     (struct target_ucred *)target_data;
1939 
1940                 __put_user(cred->pid, &target_cred->pid);
1941                 __put_user(cred->uid, &target_cred->uid);
1942                 __put_user(cred->gid, &target_cred->gid);
1943                 break;
1944             }
1945             default:
1946                 goto unimplemented;
1947             }
1948             break;
1949 
1950         case SOL_IP:
1951             switch (cmsg->cmsg_type) {
1952             case IP_TTL:
1953             {
1954                 uint32_t *v = (uint32_t *)data;
1955                 uint32_t *t_int = (uint32_t *)target_data;
1956 
1957                 if (len != sizeof(uint32_t) ||
1958                     tgt_len != sizeof(uint32_t)) {
1959                     goto unimplemented;
1960                 }
1961                 __put_user(*v, t_int);
1962                 break;
1963             }
1964             case IP_RECVERR:
1965             {
1966                 struct errhdr_t {
1967                    struct sock_extended_err ee;
1968                    struct sockaddr_in offender;
1969                 };
1970                 struct errhdr_t *errh = (struct errhdr_t *)data;
1971                 struct errhdr_t *target_errh =
1972                     (struct errhdr_t *)target_data;
1973 
1974                 if (len != sizeof(struct errhdr_t) ||
1975                     tgt_len != sizeof(struct errhdr_t)) {
1976                     goto unimplemented;
1977                 }
1978                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
1979                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
1980                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
1981                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
1982                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
1983                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
1984                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
1985                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
1986                     (void *) &errh->offender, sizeof(errh->offender));
1987                 break;
1988             }
1989             default:
1990                 goto unimplemented;
1991             }
1992             break;
1993 
1994         case SOL_IPV6:
1995             switch (cmsg->cmsg_type) {
1996             case IPV6_HOPLIMIT:
1997             {
1998                 uint32_t *v = (uint32_t *)data;
1999                 uint32_t *t_int = (uint32_t *)target_data;
2000 
2001                 if (len != sizeof(uint32_t) ||
2002                     tgt_len != sizeof(uint32_t)) {
2003                     goto unimplemented;
2004                 }
2005                 __put_user(*v, t_int);
2006                 break;
2007             }
2008             case IPV6_RECVERR:
2009             {
2010                 struct errhdr6_t {
2011                    struct sock_extended_err ee;
2012                    struct sockaddr_in6 offender;
2013                 };
2014                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2015                 struct errhdr6_t *target_errh =
2016                     (struct errhdr6_t *)target_data;
2017 
2018                 if (len != sizeof(struct errhdr6_t) ||
2019                     tgt_len != sizeof(struct errhdr6_t)) {
2020                     goto unimplemented;
2021                 }
2022                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2023                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2024                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2025                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2026                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2027                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2028                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2029                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2030                     (void *) &errh->offender, sizeof(errh->offender));
2031                 break;
2032             }
2033             default:
2034                 goto unimplemented;
2035             }
2036             break;
2037 
2038         default:
2039         unimplemented:
2040             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2041                           cmsg->cmsg_level, cmsg->cmsg_type);
2042             memcpy(target_data, data, MIN(len, tgt_len));
2043             if (tgt_len > len) {
2044                 memset(target_data + len, 0, tgt_len - len);
2045             }
2046         }
2047 
2048         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2049         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2050         if (msg_controllen < tgt_space) {
2051             tgt_space = msg_controllen;
2052         }
2053         msg_controllen -= tgt_space;
2054         space += tgt_space;
2055         cmsg = CMSG_NXTHDR(msgh, cmsg);
2056         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2057                                          target_cmsg_start);
2058     }
2059     unlock_user(target_cmsg, target_cmsg_addr, space);
2060  the_end:
2061     target_msgh->msg_controllen = tswapal(space);
2062     return 0;
2063 }
2064 
2065 /* do_setsockopt() Must return target values and target errnos. */
2066 static abi_long do_setsockopt(int sockfd, int level, int optname,
2067                               abi_ulong optval_addr, socklen_t optlen)
2068 {
2069     abi_long ret;
2070     int val;
2071     struct ip_mreqn *ip_mreq;
2072     struct ip_mreq_source *ip_mreq_source;
2073 
2074     switch(level) {
2075     case SOL_TCP:
2076     case SOL_UDP:
2077         /* TCP and UDP options all take an 'int' value.  */
2078         if (optlen < sizeof(uint32_t))
2079             return -TARGET_EINVAL;
2080 
2081         if (get_user_u32(val, optval_addr))
2082             return -TARGET_EFAULT;
2083         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2084         break;
2085     case SOL_IP:
2086         switch(optname) {
2087         case IP_TOS:
2088         case IP_TTL:
2089         case IP_HDRINCL:
2090         case IP_ROUTER_ALERT:
2091         case IP_RECVOPTS:
2092         case IP_RETOPTS:
2093         case IP_PKTINFO:
2094         case IP_MTU_DISCOVER:
2095         case IP_RECVERR:
2096         case IP_RECVTTL:
2097         case IP_RECVTOS:
2098 #ifdef IP_FREEBIND
2099         case IP_FREEBIND:
2100 #endif
2101         case IP_MULTICAST_TTL:
2102         case IP_MULTICAST_LOOP:
2103             val = 0;
2104             if (optlen >= sizeof(uint32_t)) {
2105                 if (get_user_u32(val, optval_addr))
2106                     return -TARGET_EFAULT;
2107             } else if (optlen >= 1) {
2108                 if (get_user_u8(val, optval_addr))
2109                     return -TARGET_EFAULT;
2110             }
2111             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2112             break;
2113         case IP_ADD_MEMBERSHIP:
2114         case IP_DROP_MEMBERSHIP:
2115             if (optlen < sizeof (struct target_ip_mreq) ||
2116                 optlen > sizeof (struct target_ip_mreqn))
2117                 return -TARGET_EINVAL;
2118 
2119             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2120             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2121             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2122             break;
2123 
2124         case IP_BLOCK_SOURCE:
2125         case IP_UNBLOCK_SOURCE:
2126         case IP_ADD_SOURCE_MEMBERSHIP:
2127         case IP_DROP_SOURCE_MEMBERSHIP:
2128             if (optlen != sizeof (struct target_ip_mreq_source))
2129                 return -TARGET_EINVAL;
2130 
2131             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2132             if (!ip_mreq_source) {
2133                 return -TARGET_EFAULT;
2134             }
2135             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2136             unlock_user (ip_mreq_source, optval_addr, 0);
2137             break;
2138 
2139         default:
2140             goto unimplemented;
2141         }
2142         break;
2143     case SOL_IPV6:
2144         switch (optname) {
2145         case IPV6_MTU_DISCOVER:
2146         case IPV6_MTU:
2147         case IPV6_V6ONLY:
2148         case IPV6_RECVPKTINFO:
2149         case IPV6_UNICAST_HOPS:
2150         case IPV6_MULTICAST_HOPS:
2151         case IPV6_MULTICAST_LOOP:
2152         case IPV6_RECVERR:
2153         case IPV6_RECVHOPLIMIT:
2154         case IPV6_2292HOPLIMIT:
2155         case IPV6_CHECKSUM:
2156         case IPV6_ADDRFORM:
2157         case IPV6_2292PKTINFO:
2158         case IPV6_RECVTCLASS:
2159         case IPV6_RECVRTHDR:
2160         case IPV6_2292RTHDR:
2161         case IPV6_RECVHOPOPTS:
2162         case IPV6_2292HOPOPTS:
2163         case IPV6_RECVDSTOPTS:
2164         case IPV6_2292DSTOPTS:
2165         case IPV6_TCLASS:
2166         case IPV6_ADDR_PREFERENCES:
2167 #ifdef IPV6_RECVPATHMTU
2168         case IPV6_RECVPATHMTU:
2169 #endif
2170 #ifdef IPV6_TRANSPARENT
2171         case IPV6_TRANSPARENT:
2172 #endif
2173 #ifdef IPV6_FREEBIND
2174         case IPV6_FREEBIND:
2175 #endif
2176 #ifdef IPV6_RECVORIGDSTADDR
2177         case IPV6_RECVORIGDSTADDR:
2178 #endif
2179             val = 0;
2180             if (optlen < sizeof(uint32_t)) {
2181                 return -TARGET_EINVAL;
2182             }
2183             if (get_user_u32(val, optval_addr)) {
2184                 return -TARGET_EFAULT;
2185             }
2186             ret = get_errno(setsockopt(sockfd, level, optname,
2187                                        &val, sizeof(val)));
2188             break;
2189         case IPV6_PKTINFO:
2190         {
2191             struct in6_pktinfo pki;
2192 
2193             if (optlen < sizeof(pki)) {
2194                 return -TARGET_EINVAL;
2195             }
2196 
2197             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2198                 return -TARGET_EFAULT;
2199             }
2200 
2201             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2202 
2203             ret = get_errno(setsockopt(sockfd, level, optname,
2204                                        &pki, sizeof(pki)));
2205             break;
2206         }
2207         case IPV6_ADD_MEMBERSHIP:
2208         case IPV6_DROP_MEMBERSHIP:
2209         {
2210             struct ipv6_mreq ipv6mreq;
2211 
2212             if (optlen < sizeof(ipv6mreq)) {
2213                 return -TARGET_EINVAL;
2214             }
2215 
2216             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2217                 return -TARGET_EFAULT;
2218             }
2219 
2220             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2221 
2222             ret = get_errno(setsockopt(sockfd, level, optname,
2223                                        &ipv6mreq, sizeof(ipv6mreq)));
2224             break;
2225         }
2226         default:
2227             goto unimplemented;
2228         }
2229         break;
2230     case SOL_ICMPV6:
2231         switch (optname) {
2232         case ICMPV6_FILTER:
2233         {
2234             struct icmp6_filter icmp6f;
2235 
2236             if (optlen > sizeof(icmp6f)) {
2237                 optlen = sizeof(icmp6f);
2238             }
2239 
2240             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2241                 return -TARGET_EFAULT;
2242             }
2243 
2244             for (val = 0; val < 8; val++) {
2245                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2246             }
2247 
2248             ret = get_errno(setsockopt(sockfd, level, optname,
2249                                        &icmp6f, optlen));
2250             break;
2251         }
2252         default:
2253             goto unimplemented;
2254         }
2255         break;
2256     case SOL_RAW:
2257         switch (optname) {
2258         case ICMP_FILTER:
2259         case IPV6_CHECKSUM:
2260             /* those take an u32 value */
2261             if (optlen < sizeof(uint32_t)) {
2262                 return -TARGET_EINVAL;
2263             }
2264 
2265             if (get_user_u32(val, optval_addr)) {
2266                 return -TARGET_EFAULT;
2267             }
2268             ret = get_errno(setsockopt(sockfd, level, optname,
2269                                        &val, sizeof(val)));
2270             break;
2271 
2272         default:
2273             goto unimplemented;
2274         }
2275         break;
2276 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2277     case SOL_ALG:
2278         switch (optname) {
2279         case ALG_SET_KEY:
2280         {
2281             char *alg_key = g_malloc(optlen);
2282 
2283             if (!alg_key) {
2284                 return -TARGET_ENOMEM;
2285             }
2286             if (copy_from_user(alg_key, optval_addr, optlen)) {
2287                 g_free(alg_key);
2288                 return -TARGET_EFAULT;
2289             }
2290             ret = get_errno(setsockopt(sockfd, level, optname,
2291                                        alg_key, optlen));
2292             g_free(alg_key);
2293             break;
2294         }
2295         case ALG_SET_AEAD_AUTHSIZE:
2296         {
2297             ret = get_errno(setsockopt(sockfd, level, optname,
2298                                        NULL, optlen));
2299             break;
2300         }
2301         default:
2302             goto unimplemented;
2303         }
2304         break;
2305 #endif
2306     case TARGET_SOL_SOCKET:
2307         switch (optname) {
2308         case TARGET_SO_RCVTIMEO:
2309         {
2310                 struct timeval tv;
2311 
2312                 optname = SO_RCVTIMEO;
2313 
2314 set_timeout:
2315                 if (optlen != sizeof(struct target_timeval)) {
2316                     return -TARGET_EINVAL;
2317                 }
2318 
2319                 if (copy_from_user_timeval(&tv, optval_addr)) {
2320                     return -TARGET_EFAULT;
2321                 }
2322 
2323                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2324                                 &tv, sizeof(tv)));
2325                 return ret;
2326         }
2327         case TARGET_SO_SNDTIMEO:
2328                 optname = SO_SNDTIMEO;
2329                 goto set_timeout;
2330         case TARGET_SO_ATTACH_FILTER:
2331         {
2332                 struct target_sock_fprog *tfprog;
2333                 struct target_sock_filter *tfilter;
2334                 struct sock_fprog fprog;
2335                 struct sock_filter *filter;
2336                 int i;
2337 
2338                 if (optlen != sizeof(*tfprog)) {
2339                     return -TARGET_EINVAL;
2340                 }
2341                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2342                     return -TARGET_EFAULT;
2343                 }
2344                 if (!lock_user_struct(VERIFY_READ, tfilter,
2345                                       tswapal(tfprog->filter), 0)) {
2346                     unlock_user_struct(tfprog, optval_addr, 1);
2347                     return -TARGET_EFAULT;
2348                 }
2349 
2350                 fprog.len = tswap16(tfprog->len);
2351                 filter = g_try_new(struct sock_filter, fprog.len);
2352                 if (filter == NULL) {
2353                     unlock_user_struct(tfilter, tfprog->filter, 1);
2354                     unlock_user_struct(tfprog, optval_addr, 1);
2355                     return -TARGET_ENOMEM;
2356                 }
2357                 for (i = 0; i < fprog.len; i++) {
2358                     filter[i].code = tswap16(tfilter[i].code);
2359                     filter[i].jt = tfilter[i].jt;
2360                     filter[i].jf = tfilter[i].jf;
2361                     filter[i].k = tswap32(tfilter[i].k);
2362                 }
2363                 fprog.filter = filter;
2364 
2365                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2366                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2367                 g_free(filter);
2368 
2369                 unlock_user_struct(tfilter, tfprog->filter, 1);
2370                 unlock_user_struct(tfprog, optval_addr, 1);
2371                 return ret;
2372         }
2373 	case TARGET_SO_BINDTODEVICE:
2374 	{
2375 		char *dev_ifname, *addr_ifname;
2376 
2377 		if (optlen > IFNAMSIZ - 1) {
2378 		    optlen = IFNAMSIZ - 1;
2379 		}
2380 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2381 		if (!dev_ifname) {
2382 		    return -TARGET_EFAULT;
2383 		}
2384 		optname = SO_BINDTODEVICE;
2385 		addr_ifname = alloca(IFNAMSIZ);
2386 		memcpy(addr_ifname, dev_ifname, optlen);
2387 		addr_ifname[optlen] = 0;
2388 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2389                                            addr_ifname, optlen));
2390 		unlock_user (dev_ifname, optval_addr, 0);
2391 		return ret;
2392 	}
2393         case TARGET_SO_LINGER:
2394         {
2395                 struct linger lg;
2396                 struct target_linger *tlg;
2397 
2398                 if (optlen != sizeof(struct target_linger)) {
2399                     return -TARGET_EINVAL;
2400                 }
2401                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2402                     return -TARGET_EFAULT;
2403                 }
2404                 __get_user(lg.l_onoff, &tlg->l_onoff);
2405                 __get_user(lg.l_linger, &tlg->l_linger);
2406                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2407                                 &lg, sizeof(lg)));
2408                 unlock_user_struct(tlg, optval_addr, 0);
2409                 return ret;
2410         }
2411             /* Options with 'int' argument.  */
2412         case TARGET_SO_DEBUG:
2413 		optname = SO_DEBUG;
2414 		break;
2415         case TARGET_SO_REUSEADDR:
2416 		optname = SO_REUSEADDR;
2417 		break;
2418 #ifdef SO_REUSEPORT
2419         case TARGET_SO_REUSEPORT:
2420                 optname = SO_REUSEPORT;
2421                 break;
2422 #endif
2423         case TARGET_SO_TYPE:
2424 		optname = SO_TYPE;
2425 		break;
2426         case TARGET_SO_ERROR:
2427 		optname = SO_ERROR;
2428 		break;
2429         case TARGET_SO_DONTROUTE:
2430 		optname = SO_DONTROUTE;
2431 		break;
2432         case TARGET_SO_BROADCAST:
2433 		optname = SO_BROADCAST;
2434 		break;
2435         case TARGET_SO_SNDBUF:
2436 		optname = SO_SNDBUF;
2437 		break;
2438         case TARGET_SO_SNDBUFFORCE:
2439                 optname = SO_SNDBUFFORCE;
2440                 break;
2441         case TARGET_SO_RCVBUF:
2442 		optname = SO_RCVBUF;
2443 		break;
2444         case TARGET_SO_RCVBUFFORCE:
2445                 optname = SO_RCVBUFFORCE;
2446                 break;
2447         case TARGET_SO_KEEPALIVE:
2448 		optname = SO_KEEPALIVE;
2449 		break;
2450         case TARGET_SO_OOBINLINE:
2451 		optname = SO_OOBINLINE;
2452 		break;
2453         case TARGET_SO_NO_CHECK:
2454 		optname = SO_NO_CHECK;
2455 		break;
2456         case TARGET_SO_PRIORITY:
2457 		optname = SO_PRIORITY;
2458 		break;
2459 #ifdef SO_BSDCOMPAT
2460         case TARGET_SO_BSDCOMPAT:
2461 		optname = SO_BSDCOMPAT;
2462 		break;
2463 #endif
2464         case TARGET_SO_PASSCRED:
2465 		optname = SO_PASSCRED;
2466 		break;
2467         case TARGET_SO_PASSSEC:
2468                 optname = SO_PASSSEC;
2469                 break;
2470         case TARGET_SO_TIMESTAMP:
2471 		optname = SO_TIMESTAMP;
2472 		break;
2473         case TARGET_SO_RCVLOWAT:
2474 		optname = SO_RCVLOWAT;
2475 		break;
2476         default:
2477             goto unimplemented;
2478         }
2479 	if (optlen < sizeof(uint32_t))
2480             return -TARGET_EINVAL;
2481 
2482 	if (get_user_u32(val, optval_addr))
2483             return -TARGET_EFAULT;
2484 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2485         break;
2486 #ifdef SOL_NETLINK
2487     case SOL_NETLINK:
2488         switch (optname) {
2489         case NETLINK_PKTINFO:
2490         case NETLINK_ADD_MEMBERSHIP:
2491         case NETLINK_DROP_MEMBERSHIP:
2492         case NETLINK_BROADCAST_ERROR:
2493         case NETLINK_NO_ENOBUFS:
2494 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2495         case NETLINK_LISTEN_ALL_NSID:
2496         case NETLINK_CAP_ACK:
2497 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2498 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2499         case NETLINK_EXT_ACK:
2500 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2502         case NETLINK_GET_STRICT_CHK:
2503 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2504             break;
2505         default:
2506             goto unimplemented;
2507         }
2508         val = 0;
2509         if (optlen < sizeof(uint32_t)) {
2510             return -TARGET_EINVAL;
2511         }
2512         if (get_user_u32(val, optval_addr)) {
2513             return -TARGET_EFAULT;
2514         }
2515         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2516                                    sizeof(val)));
2517         break;
2518 #endif /* SOL_NETLINK */
2519     default:
2520     unimplemented:
2521         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2522                       level, optname);
2523         ret = -TARGET_ENOPROTOOPT;
2524     }
2525     return ret;
2526 }
2527 
2528 /* do_getsockopt() Must return target values and target errnos. */
2529 static abi_long do_getsockopt(int sockfd, int level, int optname,
2530                               abi_ulong optval_addr, abi_ulong optlen)
2531 {
2532     abi_long ret;
2533     int len, val;
2534     socklen_t lv;
2535 
2536     switch(level) {
2537     case TARGET_SOL_SOCKET:
2538         level = SOL_SOCKET;
2539         switch (optname) {
2540         /* These don't just return a single integer */
2541         case TARGET_SO_PEERNAME:
2542             goto unimplemented;
2543         case TARGET_SO_RCVTIMEO: {
2544             struct timeval tv;
2545             socklen_t tvlen;
2546 
2547             optname = SO_RCVTIMEO;
2548 
2549 get_timeout:
2550             if (get_user_u32(len, optlen)) {
2551                 return -TARGET_EFAULT;
2552             }
2553             if (len < 0) {
2554                 return -TARGET_EINVAL;
2555             }
2556 
2557             tvlen = sizeof(tv);
2558             ret = get_errno(getsockopt(sockfd, level, optname,
2559                                        &tv, &tvlen));
2560             if (ret < 0) {
2561                 return ret;
2562             }
2563             if (len > sizeof(struct target_timeval)) {
2564                 len = sizeof(struct target_timeval);
2565             }
2566             if (copy_to_user_timeval(optval_addr, &tv)) {
2567                 return -TARGET_EFAULT;
2568             }
2569             if (put_user_u32(len, optlen)) {
2570                 return -TARGET_EFAULT;
2571             }
2572             break;
2573         }
2574         case TARGET_SO_SNDTIMEO:
2575             optname = SO_SNDTIMEO;
2576             goto get_timeout;
2577         case TARGET_SO_PEERCRED: {
2578             struct ucred cr;
2579             socklen_t crlen;
2580             struct target_ucred *tcr;
2581 
2582             if (get_user_u32(len, optlen)) {
2583                 return -TARGET_EFAULT;
2584             }
2585             if (len < 0) {
2586                 return -TARGET_EINVAL;
2587             }
2588 
2589             crlen = sizeof(cr);
2590             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2591                                        &cr, &crlen));
2592             if (ret < 0) {
2593                 return ret;
2594             }
2595             if (len > crlen) {
2596                 len = crlen;
2597             }
2598             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2599                 return -TARGET_EFAULT;
2600             }
2601             __put_user(cr.pid, &tcr->pid);
2602             __put_user(cr.uid, &tcr->uid);
2603             __put_user(cr.gid, &tcr->gid);
2604             unlock_user_struct(tcr, optval_addr, 1);
2605             if (put_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             break;
2609         }
2610         case TARGET_SO_PEERSEC: {
2611             char *name;
2612 
2613             if (get_user_u32(len, optlen)) {
2614                 return -TARGET_EFAULT;
2615             }
2616             if (len < 0) {
2617                 return -TARGET_EINVAL;
2618             }
2619             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2620             if (!name) {
2621                 return -TARGET_EFAULT;
2622             }
2623             lv = len;
2624             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2625                                        name, &lv));
2626             if (put_user_u32(lv, optlen)) {
2627                 ret = -TARGET_EFAULT;
2628             }
2629             unlock_user(name, optval_addr, lv);
2630             break;
2631         }
2632         case TARGET_SO_LINGER:
2633         {
2634             struct linger lg;
2635             socklen_t lglen;
2636             struct target_linger *tlg;
2637 
2638             if (get_user_u32(len, optlen)) {
2639                 return -TARGET_EFAULT;
2640             }
2641             if (len < 0) {
2642                 return -TARGET_EINVAL;
2643             }
2644 
2645             lglen = sizeof(lg);
2646             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2647                                        &lg, &lglen));
2648             if (ret < 0) {
2649                 return ret;
2650             }
2651             if (len > lglen) {
2652                 len = lglen;
2653             }
2654             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2655                 return -TARGET_EFAULT;
2656             }
2657             __put_user(lg.l_onoff, &tlg->l_onoff);
2658             __put_user(lg.l_linger, &tlg->l_linger);
2659             unlock_user_struct(tlg, optval_addr, 1);
2660             if (put_user_u32(len, optlen)) {
2661                 return -TARGET_EFAULT;
2662             }
2663             break;
2664         }
2665         /* Options with 'int' argument.  */
2666         case TARGET_SO_DEBUG:
2667             optname = SO_DEBUG;
2668             goto int_case;
2669         case TARGET_SO_REUSEADDR:
2670             optname = SO_REUSEADDR;
2671             goto int_case;
2672 #ifdef SO_REUSEPORT
2673         case TARGET_SO_REUSEPORT:
2674             optname = SO_REUSEPORT;
2675             goto int_case;
2676 #endif
2677         case TARGET_SO_TYPE:
2678             optname = SO_TYPE;
2679             goto int_case;
2680         case TARGET_SO_ERROR:
2681             optname = SO_ERROR;
2682             goto int_case;
2683         case TARGET_SO_DONTROUTE:
2684             optname = SO_DONTROUTE;
2685             goto int_case;
2686         case TARGET_SO_BROADCAST:
2687             optname = SO_BROADCAST;
2688             goto int_case;
2689         case TARGET_SO_SNDBUF:
2690             optname = SO_SNDBUF;
2691             goto int_case;
2692         case TARGET_SO_RCVBUF:
2693             optname = SO_RCVBUF;
2694             goto int_case;
2695         case TARGET_SO_KEEPALIVE:
2696             optname = SO_KEEPALIVE;
2697             goto int_case;
2698         case TARGET_SO_OOBINLINE:
2699             optname = SO_OOBINLINE;
2700             goto int_case;
2701         case TARGET_SO_NO_CHECK:
2702             optname = SO_NO_CHECK;
2703             goto int_case;
2704         case TARGET_SO_PRIORITY:
2705             optname = SO_PRIORITY;
2706             goto int_case;
2707 #ifdef SO_BSDCOMPAT
2708         case TARGET_SO_BSDCOMPAT:
2709             optname = SO_BSDCOMPAT;
2710             goto int_case;
2711 #endif
2712         case TARGET_SO_PASSCRED:
2713             optname = SO_PASSCRED;
2714             goto int_case;
2715         case TARGET_SO_TIMESTAMP:
2716             optname = SO_TIMESTAMP;
2717             goto int_case;
2718         case TARGET_SO_RCVLOWAT:
2719             optname = SO_RCVLOWAT;
2720             goto int_case;
2721         case TARGET_SO_ACCEPTCONN:
2722             optname = SO_ACCEPTCONN;
2723             goto int_case;
2724         case TARGET_SO_PROTOCOL:
2725             optname = SO_PROTOCOL;
2726             goto int_case;
2727         case TARGET_SO_DOMAIN:
2728             optname = SO_DOMAIN;
2729             goto int_case;
2730         default:
2731             goto int_case;
2732         }
2733         break;
2734     case SOL_TCP:
2735     case SOL_UDP:
2736         /* TCP and UDP options all take an 'int' value.  */
2737     int_case:
2738         if (get_user_u32(len, optlen))
2739             return -TARGET_EFAULT;
2740         if (len < 0)
2741             return -TARGET_EINVAL;
2742         lv = sizeof(lv);
2743         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2744         if (ret < 0)
2745             return ret;
2746         switch (optname) {
2747         case SO_TYPE:
2748             val = host_to_target_sock_type(val);
2749             break;
2750         case SO_ERROR:
2751             val = host_to_target_errno(val);
2752             break;
2753         }
2754         if (len > lv)
2755             len = lv;
2756         if (len == 4) {
2757             if (put_user_u32(val, optval_addr))
2758                 return -TARGET_EFAULT;
2759         } else {
2760             if (put_user_u8(val, optval_addr))
2761                 return -TARGET_EFAULT;
2762         }
2763         if (put_user_u32(len, optlen))
2764             return -TARGET_EFAULT;
2765         break;
2766     case SOL_IP:
2767         switch(optname) {
2768         case IP_TOS:
2769         case IP_TTL:
2770         case IP_HDRINCL:
2771         case IP_ROUTER_ALERT:
2772         case IP_RECVOPTS:
2773         case IP_RETOPTS:
2774         case IP_PKTINFO:
2775         case IP_MTU_DISCOVER:
2776         case IP_RECVERR:
2777         case IP_RECVTOS:
2778 #ifdef IP_FREEBIND
2779         case IP_FREEBIND:
2780 #endif
2781         case IP_MULTICAST_TTL:
2782         case IP_MULTICAST_LOOP:
2783             if (get_user_u32(len, optlen))
2784                 return -TARGET_EFAULT;
2785             if (len < 0)
2786                 return -TARGET_EINVAL;
2787             lv = sizeof(lv);
2788             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2789             if (ret < 0)
2790                 return ret;
2791             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2792                 len = 1;
2793                 if (put_user_u32(len, optlen)
2794                     || put_user_u8(val, optval_addr))
2795                     return -TARGET_EFAULT;
2796             } else {
2797                 if (len > sizeof(int))
2798                     len = sizeof(int);
2799                 if (put_user_u32(len, optlen)
2800                     || put_user_u32(val, optval_addr))
2801                     return -TARGET_EFAULT;
2802             }
2803             break;
2804         default:
2805             ret = -TARGET_ENOPROTOOPT;
2806             break;
2807         }
2808         break;
2809     case SOL_IPV6:
2810         switch (optname) {
2811         case IPV6_MTU_DISCOVER:
2812         case IPV6_MTU:
2813         case IPV6_V6ONLY:
2814         case IPV6_RECVPKTINFO:
2815         case IPV6_UNICAST_HOPS:
2816         case IPV6_MULTICAST_HOPS:
2817         case IPV6_MULTICAST_LOOP:
2818         case IPV6_RECVERR:
2819         case IPV6_RECVHOPLIMIT:
2820         case IPV6_2292HOPLIMIT:
2821         case IPV6_CHECKSUM:
2822         case IPV6_ADDRFORM:
2823         case IPV6_2292PKTINFO:
2824         case IPV6_RECVTCLASS:
2825         case IPV6_RECVRTHDR:
2826         case IPV6_2292RTHDR:
2827         case IPV6_RECVHOPOPTS:
2828         case IPV6_2292HOPOPTS:
2829         case IPV6_RECVDSTOPTS:
2830         case IPV6_2292DSTOPTS:
2831         case IPV6_TCLASS:
2832         case IPV6_ADDR_PREFERENCES:
2833 #ifdef IPV6_RECVPATHMTU
2834         case IPV6_RECVPATHMTU:
2835 #endif
2836 #ifdef IPV6_TRANSPARENT
2837         case IPV6_TRANSPARENT:
2838 #endif
2839 #ifdef IPV6_FREEBIND
2840         case IPV6_FREEBIND:
2841 #endif
2842 #ifdef IPV6_RECVORIGDSTADDR
2843         case IPV6_RECVORIGDSTADDR:
2844 #endif
2845             if (get_user_u32(len, optlen))
2846                 return -TARGET_EFAULT;
2847             if (len < 0)
2848                 return -TARGET_EINVAL;
2849             lv = sizeof(lv);
2850             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2851             if (ret < 0)
2852                 return ret;
2853             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2854                 len = 1;
2855                 if (put_user_u32(len, optlen)
2856                     || put_user_u8(val, optval_addr))
2857                     return -TARGET_EFAULT;
2858             } else {
2859                 if (len > sizeof(int))
2860                     len = sizeof(int);
2861                 if (put_user_u32(len, optlen)
2862                     || put_user_u32(val, optval_addr))
2863                     return -TARGET_EFAULT;
2864             }
2865             break;
2866         default:
2867             ret = -TARGET_ENOPROTOOPT;
2868             break;
2869         }
2870         break;
2871 #ifdef SOL_NETLINK
2872     case SOL_NETLINK:
2873         switch (optname) {
2874         case NETLINK_PKTINFO:
2875         case NETLINK_BROADCAST_ERROR:
2876         case NETLINK_NO_ENOBUFS:
2877 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2878         case NETLINK_LISTEN_ALL_NSID:
2879         case NETLINK_CAP_ACK:
2880 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2881 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2882         case NETLINK_EXT_ACK:
2883 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2884 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2885         case NETLINK_GET_STRICT_CHK:
2886 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2887             if (get_user_u32(len, optlen)) {
2888                 return -TARGET_EFAULT;
2889             }
2890             if (len != sizeof(val)) {
2891                 return -TARGET_EINVAL;
2892             }
2893             lv = len;
2894             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2895             if (ret < 0) {
2896                 return ret;
2897             }
2898             if (put_user_u32(lv, optlen)
2899                 || put_user_u32(val, optval_addr)) {
2900                 return -TARGET_EFAULT;
2901             }
2902             break;
2903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2904         case NETLINK_LIST_MEMBERSHIPS:
2905         {
2906             uint32_t *results;
2907             int i;
2908             if (get_user_u32(len, optlen)) {
2909                 return -TARGET_EFAULT;
2910             }
2911             if (len < 0) {
2912                 return -TARGET_EINVAL;
2913             }
2914             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2915             if (!results && len > 0) {
2916                 return -TARGET_EFAULT;
2917             }
2918             lv = len;
2919             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2920             if (ret < 0) {
2921                 unlock_user(results, optval_addr, 0);
2922                 return ret;
2923             }
2924             /* swap host endianess to target endianess. */
2925             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2926                 results[i] = tswap32(results[i]);
2927             }
2928             if (put_user_u32(lv, optlen)) {
2929                 return -TARGET_EFAULT;
2930             }
2931             unlock_user(results, optval_addr, 0);
2932             break;
2933         }
2934 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2935         default:
2936             goto unimplemented;
2937         }
2938         break;
2939 #endif /* SOL_NETLINK */
2940     default:
2941     unimplemented:
2942         qemu_log_mask(LOG_UNIMP,
2943                       "getsockopt level=%d optname=%d not yet supported\n",
2944                       level, optname);
2945         ret = -TARGET_EOPNOTSUPP;
2946         break;
2947     }
2948     return ret;
2949 }
2950 
2951 /* Convert target low/high pair representing file offset into the host
2952  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2953  * as the kernel doesn't handle them either.
2954  */
2955 static void target_to_host_low_high(abi_ulong tlow,
2956                                     abi_ulong thigh,
2957                                     unsigned long *hlow,
2958                                     unsigned long *hhigh)
2959 {
2960     uint64_t off = tlow |
2961         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2962         TARGET_LONG_BITS / 2;
2963 
2964     *hlow = off;
2965     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2966 }
2967 
2968 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2969                                 abi_ulong count, int copy)
2970 {
2971     struct target_iovec *target_vec;
2972     struct iovec *vec;
2973     abi_ulong total_len, max_len;
2974     int i;
2975     int err = 0;
2976     bool bad_address = false;
2977 
2978     if (count == 0) {
2979         errno = 0;
2980         return NULL;
2981     }
2982     if (count > IOV_MAX) {
2983         errno = EINVAL;
2984         return NULL;
2985     }
2986 
2987     vec = g_try_new0(struct iovec, count);
2988     if (vec == NULL) {
2989         errno = ENOMEM;
2990         return NULL;
2991     }
2992 
2993     target_vec = lock_user(VERIFY_READ, target_addr,
2994                            count * sizeof(struct target_iovec), 1);
2995     if (target_vec == NULL) {
2996         err = EFAULT;
2997         goto fail2;
2998     }
2999 
3000     /* ??? If host page size > target page size, this will result in a
3001        value larger than what we can actually support.  */
3002     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3003     total_len = 0;
3004 
3005     for (i = 0; i < count; i++) {
3006         abi_ulong base = tswapal(target_vec[i].iov_base);
3007         abi_long len = tswapal(target_vec[i].iov_len);
3008 
3009         if (len < 0) {
3010             err = EINVAL;
3011             goto fail;
3012         } else if (len == 0) {
3013             /* Zero length pointer is ignored.  */
3014             vec[i].iov_base = 0;
3015         } else {
3016             vec[i].iov_base = lock_user(type, base, len, copy);
3017             /* If the first buffer pointer is bad, this is a fault.  But
3018              * subsequent bad buffers will result in a partial write; this
3019              * is realized by filling the vector with null pointers and
3020              * zero lengths. */
3021             if (!vec[i].iov_base) {
3022                 if (i == 0) {
3023                     err = EFAULT;
3024                     goto fail;
3025                 } else {
3026                     bad_address = true;
3027                 }
3028             }
3029             if (bad_address) {
3030                 len = 0;
3031             }
3032             if (len > max_len - total_len) {
3033                 len = max_len - total_len;
3034             }
3035         }
3036         vec[i].iov_len = len;
3037         total_len += len;
3038     }
3039 
3040     unlock_user(target_vec, target_addr, 0);
3041     return vec;
3042 
3043  fail:
3044     while (--i >= 0) {
3045         if (tswapal(target_vec[i].iov_len) > 0) {
3046             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3047         }
3048     }
3049     unlock_user(target_vec, target_addr, 0);
3050  fail2:
3051     g_free(vec);
3052     errno = err;
3053     return NULL;
3054 }
3055 
3056 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3057                          abi_ulong count, int copy)
3058 {
3059     struct target_iovec *target_vec;
3060     int i;
3061 
3062     target_vec = lock_user(VERIFY_READ, target_addr,
3063                            count * sizeof(struct target_iovec), 1);
3064     if (target_vec) {
3065         for (i = 0; i < count; i++) {
3066             abi_ulong base = tswapal(target_vec[i].iov_base);
3067             abi_long len = tswapal(target_vec[i].iov_len);
3068             if (len < 0) {
3069                 break;
3070             }
3071             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3072         }
3073         unlock_user(target_vec, target_addr, 0);
3074     }
3075 
3076     g_free(vec);
3077 }
3078 
3079 static inline int target_to_host_sock_type(int *type)
3080 {
3081     int host_type = 0;
3082     int target_type = *type;
3083 
3084     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3085     case TARGET_SOCK_DGRAM:
3086         host_type = SOCK_DGRAM;
3087         break;
3088     case TARGET_SOCK_STREAM:
3089         host_type = SOCK_STREAM;
3090         break;
3091     default:
3092         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3093         break;
3094     }
3095     if (target_type & TARGET_SOCK_CLOEXEC) {
3096 #if defined(SOCK_CLOEXEC)
3097         host_type |= SOCK_CLOEXEC;
3098 #else
3099         return -TARGET_EINVAL;
3100 #endif
3101     }
3102     if (target_type & TARGET_SOCK_NONBLOCK) {
3103 #if defined(SOCK_NONBLOCK)
3104         host_type |= SOCK_NONBLOCK;
3105 #elif !defined(O_NONBLOCK)
3106         return -TARGET_EINVAL;
3107 #endif
3108     }
3109     *type = host_type;
3110     return 0;
3111 }
3112 
3113 /* Try to emulate socket type flags after socket creation.  */
3114 static int sock_flags_fixup(int fd, int target_type)
3115 {
3116 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3117     if (target_type & TARGET_SOCK_NONBLOCK) {
3118         int flags = fcntl(fd, F_GETFL);
3119         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3120             close(fd);
3121             return -TARGET_EINVAL;
3122         }
3123     }
3124 #endif
3125     return fd;
3126 }
3127 
3128 /* do_socket() Must return target values and target errnos. */
3129 static abi_long do_socket(int domain, int type, int protocol)
3130 {
3131     int target_type = type;
3132     int ret;
3133 
3134     ret = target_to_host_sock_type(&type);
3135     if (ret) {
3136         return ret;
3137     }
3138 
3139     if (domain == PF_NETLINK && !(
3140 #ifdef CONFIG_RTNETLINK
3141          protocol == NETLINK_ROUTE ||
3142 #endif
3143          protocol == NETLINK_KOBJECT_UEVENT ||
3144          protocol == NETLINK_AUDIT)) {
3145         return -TARGET_EPROTONOSUPPORT;
3146     }
3147 
3148     if (domain == AF_PACKET ||
3149         (domain == AF_INET && type == SOCK_PACKET)) {
3150         protocol = tswap16(protocol);
3151     }
3152 
3153     ret = get_errno(socket(domain, type, protocol));
3154     if (ret >= 0) {
3155         ret = sock_flags_fixup(ret, target_type);
3156         if (type == SOCK_PACKET) {
3157             /* Manage an obsolete case :
3158              * if socket type is SOCK_PACKET, bind by name
3159              */
3160             fd_trans_register(ret, &target_packet_trans);
3161         } else if (domain == PF_NETLINK) {
3162             switch (protocol) {
3163 #ifdef CONFIG_RTNETLINK
3164             case NETLINK_ROUTE:
3165                 fd_trans_register(ret, &target_netlink_route_trans);
3166                 break;
3167 #endif
3168             case NETLINK_KOBJECT_UEVENT:
3169                 /* nothing to do: messages are strings */
3170                 break;
3171             case NETLINK_AUDIT:
3172                 fd_trans_register(ret, &target_netlink_audit_trans);
3173                 break;
3174             default:
3175                 g_assert_not_reached();
3176             }
3177         }
3178     }
3179     return ret;
3180 }
3181 
3182 /* do_bind() Must return target values and target errnos. */
3183 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3184                         socklen_t addrlen)
3185 {
3186     void *addr;
3187     abi_long ret;
3188 
3189     if ((int)addrlen < 0) {
3190         return -TARGET_EINVAL;
3191     }
3192 
3193     addr = alloca(addrlen+1);
3194 
3195     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3196     if (ret)
3197         return ret;
3198 
3199     return get_errno(bind(sockfd, addr, addrlen));
3200 }
3201 
3202 /* do_connect() Must return target values and target errnos. */
3203 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3204                            socklen_t addrlen)
3205 {
3206     void *addr;
3207     abi_long ret;
3208 
3209     if ((int)addrlen < 0) {
3210         return -TARGET_EINVAL;
3211     }
3212 
3213     addr = alloca(addrlen+1);
3214 
3215     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3216     if (ret)
3217         return ret;
3218 
3219     return get_errno(safe_connect(sockfd, addr, addrlen));
3220 }
3221 
3222 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3223 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3224                                       int flags, int send)
3225 {
3226     abi_long ret, len;
3227     struct msghdr msg;
3228     abi_ulong count;
3229     struct iovec *vec;
3230     abi_ulong target_vec;
3231 
3232     if (msgp->msg_name) {
3233         msg.msg_namelen = tswap32(msgp->msg_namelen);
3234         msg.msg_name = alloca(msg.msg_namelen+1);
3235         ret = target_to_host_sockaddr(fd, msg.msg_name,
3236                                       tswapal(msgp->msg_name),
3237                                       msg.msg_namelen);
3238         if (ret == -TARGET_EFAULT) {
3239             /* For connected sockets msg_name and msg_namelen must
3240              * be ignored, so returning EFAULT immediately is wrong.
3241              * Instead, pass a bad msg_name to the host kernel, and
3242              * let it decide whether to return EFAULT or not.
3243              */
3244             msg.msg_name = (void *)-1;
3245         } else if (ret) {
3246             goto out2;
3247         }
3248     } else {
3249         msg.msg_name = NULL;
3250         msg.msg_namelen = 0;
3251     }
3252     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3253     msg.msg_control = alloca(msg.msg_controllen);
3254     memset(msg.msg_control, 0, msg.msg_controllen);
3255 
3256     msg.msg_flags = tswap32(msgp->msg_flags);
3257 
3258     count = tswapal(msgp->msg_iovlen);
3259     target_vec = tswapal(msgp->msg_iov);
3260 
3261     if (count > IOV_MAX) {
3262         /* sendrcvmsg returns a different errno for this condition than
3263          * readv/writev, so we must catch it here before lock_iovec() does.
3264          */
3265         ret = -TARGET_EMSGSIZE;
3266         goto out2;
3267     }
3268 
3269     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3270                      target_vec, count, send);
3271     if (vec == NULL) {
3272         ret = -host_to_target_errno(errno);
3273         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3274         if (!send || ret) {
3275             goto out2;
3276         }
3277     }
3278     msg.msg_iovlen = count;
3279     msg.msg_iov = vec;
3280 
3281     if (send) {
3282         if (fd_trans_target_to_host_data(fd)) {
3283             void *host_msg;
3284 
3285             host_msg = g_malloc(msg.msg_iov->iov_len);
3286             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3287             ret = fd_trans_target_to_host_data(fd)(host_msg,
3288                                                    msg.msg_iov->iov_len);
3289             if (ret >= 0) {
3290                 msg.msg_iov->iov_base = host_msg;
3291                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3292             }
3293             g_free(host_msg);
3294         } else {
3295             ret = target_to_host_cmsg(&msg, msgp);
3296             if (ret == 0) {
3297                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3298             }
3299         }
3300     } else {
3301         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3302         if (!is_error(ret)) {
3303             len = ret;
3304             if (fd_trans_host_to_target_data(fd)) {
3305                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3306                                                MIN(msg.msg_iov->iov_len, len));
3307             }
3308             if (!is_error(ret)) {
3309                 ret = host_to_target_cmsg(msgp, &msg);
3310             }
3311             if (!is_error(ret)) {
3312                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3313                 msgp->msg_flags = tswap32(msg.msg_flags);
3314                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3315                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3316                                     msg.msg_name, msg.msg_namelen);
3317                     if (ret) {
3318                         goto out;
3319                     }
3320                 }
3321 
3322                 ret = len;
3323             }
3324         }
3325     }
3326 
3327 out:
3328     if (vec) {
3329         unlock_iovec(vec, target_vec, count, !send);
3330     }
3331 out2:
3332     return ret;
3333 }
3334 
3335 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3336                                int flags, int send)
3337 {
3338     abi_long ret;
3339     struct target_msghdr *msgp;
3340 
3341     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3342                           msgp,
3343                           target_msg,
3344                           send ? 1 : 0)) {
3345         return -TARGET_EFAULT;
3346     }
3347     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3348     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3349     return ret;
3350 }
3351 
3352 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3353  * so it might not have this *mmsg-specific flag either.
3354  */
3355 #ifndef MSG_WAITFORONE
3356 #define MSG_WAITFORONE 0x10000
3357 #endif
3358 
3359 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3360                                 unsigned int vlen, unsigned int flags,
3361                                 int send)
3362 {
3363     struct target_mmsghdr *mmsgp;
3364     abi_long ret = 0;
3365     int i;
3366 
3367     if (vlen > UIO_MAXIOV) {
3368         vlen = UIO_MAXIOV;
3369     }
3370 
3371     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3372     if (!mmsgp) {
3373         return -TARGET_EFAULT;
3374     }
3375 
3376     for (i = 0; i < vlen; i++) {
3377         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3378         if (is_error(ret)) {
3379             break;
3380         }
3381         mmsgp[i].msg_len = tswap32(ret);
3382         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3383         if (flags & MSG_WAITFORONE) {
3384             flags |= MSG_DONTWAIT;
3385         }
3386     }
3387 
3388     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3389 
3390     /* Return number of datagrams sent if we sent any at all;
3391      * otherwise return the error.
3392      */
3393     if (i) {
3394         return i;
3395     }
3396     return ret;
3397 }
3398 
3399 /* do_accept4() Must return target values and target errnos. */
3400 static abi_long do_accept4(int fd, abi_ulong target_addr,
3401                            abi_ulong target_addrlen_addr, int flags)
3402 {
3403     socklen_t addrlen, ret_addrlen;
3404     void *addr;
3405     abi_long ret;
3406     int host_flags;
3407 
3408     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3409         return -TARGET_EINVAL;
3410     }
3411 
3412     host_flags = 0;
3413     if (flags & TARGET_SOCK_NONBLOCK) {
3414         host_flags |= SOCK_NONBLOCK;
3415     }
3416     if (flags & TARGET_SOCK_CLOEXEC) {
3417         host_flags |= SOCK_CLOEXEC;
3418     }
3419 
3420     if (target_addr == 0) {
3421         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3422     }
3423 
3424     /* linux returns EFAULT if addrlen pointer is invalid */
3425     if (get_user_u32(addrlen, target_addrlen_addr))
3426         return -TARGET_EFAULT;
3427 
3428     if ((int)addrlen < 0) {
3429         return -TARGET_EINVAL;
3430     }
3431 
3432     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3433         return -TARGET_EFAULT;
3434     }
3435 
3436     addr = alloca(addrlen);
3437 
3438     ret_addrlen = addrlen;
3439     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3440     if (!is_error(ret)) {
3441         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3442         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3443             ret = -TARGET_EFAULT;
3444         }
3445     }
3446     return ret;
3447 }
3448 
3449 /* do_getpeername() Must return target values and target errnos. */
3450 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3451                                abi_ulong target_addrlen_addr)
3452 {
3453     socklen_t addrlen, ret_addrlen;
3454     void *addr;
3455     abi_long ret;
3456 
3457     if (get_user_u32(addrlen, target_addrlen_addr))
3458         return -TARGET_EFAULT;
3459 
3460     if ((int)addrlen < 0) {
3461         return -TARGET_EINVAL;
3462     }
3463 
3464     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3465         return -TARGET_EFAULT;
3466     }
3467 
3468     addr = alloca(addrlen);
3469 
3470     ret_addrlen = addrlen;
3471     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3472     if (!is_error(ret)) {
3473         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3474         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3475             ret = -TARGET_EFAULT;
3476         }
3477     }
3478     return ret;
3479 }
3480 
3481 /* do_getsockname() Must return target values and target errnos. */
3482 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3483                                abi_ulong target_addrlen_addr)
3484 {
3485     socklen_t addrlen, ret_addrlen;
3486     void *addr;
3487     abi_long ret;
3488 
3489     if (get_user_u32(addrlen, target_addrlen_addr))
3490         return -TARGET_EFAULT;
3491 
3492     if ((int)addrlen < 0) {
3493         return -TARGET_EINVAL;
3494     }
3495 
3496     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3497         return -TARGET_EFAULT;
3498     }
3499 
3500     addr = alloca(addrlen);
3501 
3502     ret_addrlen = addrlen;
3503     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3504     if (!is_error(ret)) {
3505         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3506         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3507             ret = -TARGET_EFAULT;
3508         }
3509     }
3510     return ret;
3511 }
3512 
3513 /* do_socketpair() Must return target values and target errnos. */
3514 static abi_long do_socketpair(int domain, int type, int protocol,
3515                               abi_ulong target_tab_addr)
3516 {
3517     int tab[2];
3518     abi_long ret;
3519 
3520     target_to_host_sock_type(&type);
3521 
3522     ret = get_errno(socketpair(domain, type, protocol, tab));
3523     if (!is_error(ret)) {
3524         if (put_user_s32(tab[0], target_tab_addr)
3525             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3526             ret = -TARGET_EFAULT;
3527     }
3528     return ret;
3529 }
3530 
3531 /* do_sendto() Must return target values and target errnos. */
3532 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3533                           abi_ulong target_addr, socklen_t addrlen)
3534 {
3535     void *addr;
3536     void *host_msg;
3537     void *copy_msg = NULL;
3538     abi_long ret;
3539 
3540     if ((int)addrlen < 0) {
3541         return -TARGET_EINVAL;
3542     }
3543 
3544     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3545     if (!host_msg)
3546         return -TARGET_EFAULT;
3547     if (fd_trans_target_to_host_data(fd)) {
3548         copy_msg = host_msg;
3549         host_msg = g_malloc(len);
3550         memcpy(host_msg, copy_msg, len);
3551         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3552         if (ret < 0) {
3553             goto fail;
3554         }
3555     }
3556     if (target_addr) {
3557         addr = alloca(addrlen+1);
3558         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3559         if (ret) {
3560             goto fail;
3561         }
3562         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3563     } else {
3564         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3565     }
3566 fail:
3567     if (copy_msg) {
3568         g_free(host_msg);
3569         host_msg = copy_msg;
3570     }
3571     unlock_user(host_msg, msg, 0);
3572     return ret;
3573 }
3574 
3575 /* do_recvfrom() Must return target values and target errnos. */
3576 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3577                             abi_ulong target_addr,
3578                             abi_ulong target_addrlen)
3579 {
3580     socklen_t addrlen, ret_addrlen;
3581     void *addr;
3582     void *host_msg;
3583     abi_long ret;
3584 
3585     if (!msg) {
3586         host_msg = NULL;
3587     } else {
3588         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3589         if (!host_msg) {
3590             return -TARGET_EFAULT;
3591         }
3592     }
3593     if (target_addr) {
3594         if (get_user_u32(addrlen, target_addrlen)) {
3595             ret = -TARGET_EFAULT;
3596             goto fail;
3597         }
3598         if ((int)addrlen < 0) {
3599             ret = -TARGET_EINVAL;
3600             goto fail;
3601         }
3602         addr = alloca(addrlen);
3603         ret_addrlen = addrlen;
3604         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3605                                       addr, &ret_addrlen));
3606     } else {
3607         addr = NULL; /* To keep compiler quiet.  */
3608         addrlen = 0; /* To keep compiler quiet.  */
3609         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3610     }
3611     if (!is_error(ret)) {
3612         if (fd_trans_host_to_target_data(fd)) {
3613             abi_long trans;
3614             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3615             if (is_error(trans)) {
3616                 ret = trans;
3617                 goto fail;
3618             }
3619         }
3620         if (target_addr) {
3621             host_to_target_sockaddr(target_addr, addr,
3622                                     MIN(addrlen, ret_addrlen));
3623             if (put_user_u32(ret_addrlen, target_addrlen)) {
3624                 ret = -TARGET_EFAULT;
3625                 goto fail;
3626             }
3627         }
3628         unlock_user(host_msg, msg, len);
3629     } else {
3630 fail:
3631         unlock_user(host_msg, msg, 0);
3632     }
3633     return ret;
3634 }
3635 
3636 #ifdef TARGET_NR_socketcall
3637 /* do_socketcall() must return target values and target errnos. */
3638 static abi_long do_socketcall(int num, abi_ulong vptr)
3639 {
3640     static const unsigned nargs[] = { /* number of arguments per operation */
3641         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3642         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3643         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3644         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3645         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3646         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3647         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3648         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3649         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3650         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3651         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3652         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3653         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3654         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3655         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3656         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3657         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3658         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3659         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3660         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3661     };
3662     abi_long a[6]; /* max 6 args */
3663     unsigned i;
3664 
3665     /* check the range of the first argument num */
3666     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3667     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3668         return -TARGET_EINVAL;
3669     }
3670     /* ensure we have space for args */
3671     if (nargs[num] > ARRAY_SIZE(a)) {
3672         return -TARGET_EINVAL;
3673     }
3674     /* collect the arguments in a[] according to nargs[] */
3675     for (i = 0; i < nargs[num]; ++i) {
3676         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3677             return -TARGET_EFAULT;
3678         }
3679     }
3680     /* now when we have the args, invoke the appropriate underlying function */
3681     switch (num) {
3682     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3683         return do_socket(a[0], a[1], a[2]);
3684     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3685         return do_bind(a[0], a[1], a[2]);
3686     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3687         return do_connect(a[0], a[1], a[2]);
3688     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3689         return get_errno(listen(a[0], a[1]));
3690     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3691         return do_accept4(a[0], a[1], a[2], 0);
3692     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3693         return do_getsockname(a[0], a[1], a[2]);
3694     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3695         return do_getpeername(a[0], a[1], a[2]);
3696     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3697         return do_socketpair(a[0], a[1], a[2], a[3]);
3698     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3699         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3700     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3701         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3702     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3703         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3704     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3705         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3706     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3707         return get_errno(shutdown(a[0], a[1]));
3708     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3709         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3710     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3711         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3712     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3713         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3714     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3715         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3716     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3717         return do_accept4(a[0], a[1], a[2], a[3]);
3718     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3719         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3720     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3721         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3722     default:
3723         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3724         return -TARGET_EINVAL;
3725     }
3726 }
3727 #endif
3728 
3729 #define N_SHM_REGIONS	32
3730 
3731 static struct shm_region {
3732     abi_ulong start;
3733     abi_ulong size;
3734     bool in_use;
3735 } shm_regions[N_SHM_REGIONS];
3736 
3737 #ifndef TARGET_SEMID64_DS
3738 /* asm-generic version of this struct */
3739 struct target_semid64_ds
3740 {
3741   struct target_ipc_perm sem_perm;
3742   abi_ulong sem_otime;
3743 #if TARGET_ABI_BITS == 32
3744   abi_ulong __unused1;
3745 #endif
3746   abi_ulong sem_ctime;
3747 #if TARGET_ABI_BITS == 32
3748   abi_ulong __unused2;
3749 #endif
3750   abi_ulong sem_nsems;
3751   abi_ulong __unused3;
3752   abi_ulong __unused4;
3753 };
3754 #endif
3755 
3756 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3757                                                abi_ulong target_addr)
3758 {
3759     struct target_ipc_perm *target_ip;
3760     struct target_semid64_ds *target_sd;
3761 
3762     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3763         return -TARGET_EFAULT;
3764     target_ip = &(target_sd->sem_perm);
3765     host_ip->__key = tswap32(target_ip->__key);
3766     host_ip->uid = tswap32(target_ip->uid);
3767     host_ip->gid = tswap32(target_ip->gid);
3768     host_ip->cuid = tswap32(target_ip->cuid);
3769     host_ip->cgid = tswap32(target_ip->cgid);
3770 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3771     host_ip->mode = tswap32(target_ip->mode);
3772 #else
3773     host_ip->mode = tswap16(target_ip->mode);
3774 #endif
3775 #if defined(TARGET_PPC)
3776     host_ip->__seq = tswap32(target_ip->__seq);
3777 #else
3778     host_ip->__seq = tswap16(target_ip->__seq);
3779 #endif
3780     unlock_user_struct(target_sd, target_addr, 0);
3781     return 0;
3782 }
3783 
3784 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3785                                                struct ipc_perm *host_ip)
3786 {
3787     struct target_ipc_perm *target_ip;
3788     struct target_semid64_ds *target_sd;
3789 
3790     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3791         return -TARGET_EFAULT;
3792     target_ip = &(target_sd->sem_perm);
3793     target_ip->__key = tswap32(host_ip->__key);
3794     target_ip->uid = tswap32(host_ip->uid);
3795     target_ip->gid = tswap32(host_ip->gid);
3796     target_ip->cuid = tswap32(host_ip->cuid);
3797     target_ip->cgid = tswap32(host_ip->cgid);
3798 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3799     target_ip->mode = tswap32(host_ip->mode);
3800 #else
3801     target_ip->mode = tswap16(host_ip->mode);
3802 #endif
3803 #if defined(TARGET_PPC)
3804     target_ip->__seq = tswap32(host_ip->__seq);
3805 #else
3806     target_ip->__seq = tswap16(host_ip->__seq);
3807 #endif
3808     unlock_user_struct(target_sd, target_addr, 1);
3809     return 0;
3810 }
3811 
3812 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3813                                                abi_ulong target_addr)
3814 {
3815     struct target_semid64_ds *target_sd;
3816 
3817     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3818         return -TARGET_EFAULT;
3819     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3820         return -TARGET_EFAULT;
3821     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3822     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3823     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3824     unlock_user_struct(target_sd, target_addr, 0);
3825     return 0;
3826 }
3827 
3828 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3829                                                struct semid_ds *host_sd)
3830 {
3831     struct target_semid64_ds *target_sd;
3832 
3833     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3834         return -TARGET_EFAULT;
3835     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3836         return -TARGET_EFAULT;
3837     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3838     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3839     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3840     unlock_user_struct(target_sd, target_addr, 1);
3841     return 0;
3842 }
3843 
3844 struct target_seminfo {
3845     int semmap;
3846     int semmni;
3847     int semmns;
3848     int semmnu;
3849     int semmsl;
3850     int semopm;
3851     int semume;
3852     int semusz;
3853     int semvmx;
3854     int semaem;
3855 };
3856 
3857 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3858                                               struct seminfo *host_seminfo)
3859 {
3860     struct target_seminfo *target_seminfo;
3861     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3862         return -TARGET_EFAULT;
3863     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3864     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3865     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3866     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3867     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3868     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3869     __put_user(host_seminfo->semume, &target_seminfo->semume);
3870     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3871     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3872     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3873     unlock_user_struct(target_seminfo, target_addr, 1);
3874     return 0;
3875 }
3876 
3877 union semun {
3878 	int val;
3879 	struct semid_ds *buf;
3880 	unsigned short *array;
3881 	struct seminfo *__buf;
3882 };
3883 
3884 union target_semun {
3885 	int val;
3886 	abi_ulong buf;
3887 	abi_ulong array;
3888 	abi_ulong __buf;
3889 };
3890 
3891 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3892                                                abi_ulong target_addr)
3893 {
3894     int nsems;
3895     unsigned short *array;
3896     union semun semun;
3897     struct semid_ds semid_ds;
3898     int i, ret;
3899 
3900     semun.buf = &semid_ds;
3901 
3902     ret = semctl(semid, 0, IPC_STAT, semun);
3903     if (ret == -1)
3904         return get_errno(ret);
3905 
3906     nsems = semid_ds.sem_nsems;
3907 
3908     *host_array = g_try_new(unsigned short, nsems);
3909     if (!*host_array) {
3910         return -TARGET_ENOMEM;
3911     }
3912     array = lock_user(VERIFY_READ, target_addr,
3913                       nsems*sizeof(unsigned short), 1);
3914     if (!array) {
3915         g_free(*host_array);
3916         return -TARGET_EFAULT;
3917     }
3918 
3919     for(i=0; i<nsems; i++) {
3920         __get_user((*host_array)[i], &array[i]);
3921     }
3922     unlock_user(array, target_addr, 0);
3923 
3924     return 0;
3925 }
3926 
3927 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3928                                                unsigned short **host_array)
3929 {
3930     int nsems;
3931     unsigned short *array;
3932     union semun semun;
3933     struct semid_ds semid_ds;
3934     int i, ret;
3935 
3936     semun.buf = &semid_ds;
3937 
3938     ret = semctl(semid, 0, IPC_STAT, semun);
3939     if (ret == -1)
3940         return get_errno(ret);
3941 
3942     nsems = semid_ds.sem_nsems;
3943 
3944     array = lock_user(VERIFY_WRITE, target_addr,
3945                       nsems*sizeof(unsigned short), 0);
3946     if (!array)
3947         return -TARGET_EFAULT;
3948 
3949     for(i=0; i<nsems; i++) {
3950         __put_user((*host_array)[i], &array[i]);
3951     }
3952     g_free(*host_array);
3953     unlock_user(array, target_addr, 1);
3954 
3955     return 0;
3956 }
3957 
3958 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3959                                  abi_ulong target_arg)
3960 {
3961     union target_semun target_su = { .buf = target_arg };
3962     union semun arg;
3963     struct semid_ds dsarg;
3964     unsigned short *array = NULL;
3965     struct seminfo seminfo;
3966     abi_long ret = -TARGET_EINVAL;
3967     abi_long err;
3968     cmd &= 0xff;
3969 
3970     switch( cmd ) {
3971 	case GETVAL:
3972 	case SETVAL:
3973             /* In 64 bit cross-endian situations, we will erroneously pick up
3974              * the wrong half of the union for the "val" element.  To rectify
3975              * this, the entire 8-byte structure is byteswapped, followed by
3976 	     * a swap of the 4 byte val field. In other cases, the data is
3977 	     * already in proper host byte order. */
3978 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
3979 		target_su.buf = tswapal(target_su.buf);
3980 		arg.val = tswap32(target_su.val);
3981 	    } else {
3982 		arg.val = target_su.val;
3983 	    }
3984             ret = get_errno(semctl(semid, semnum, cmd, arg));
3985             break;
3986 	case GETALL:
3987 	case SETALL:
3988             err = target_to_host_semarray(semid, &array, target_su.array);
3989             if (err)
3990                 return err;
3991             arg.array = array;
3992             ret = get_errno(semctl(semid, semnum, cmd, arg));
3993             err = host_to_target_semarray(semid, target_su.array, &array);
3994             if (err)
3995                 return err;
3996             break;
3997 	case IPC_STAT:
3998 	case IPC_SET:
3999 	case SEM_STAT:
4000             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4001             if (err)
4002                 return err;
4003             arg.buf = &dsarg;
4004             ret = get_errno(semctl(semid, semnum, cmd, arg));
4005             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4006             if (err)
4007                 return err;
4008             break;
4009 	case IPC_INFO:
4010 	case SEM_INFO:
4011             arg.__buf = &seminfo;
4012             ret = get_errno(semctl(semid, semnum, cmd, arg));
4013             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4014             if (err)
4015                 return err;
4016             break;
4017 	case IPC_RMID:
4018 	case GETPID:
4019 	case GETNCNT:
4020 	case GETZCNT:
4021             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4022             break;
4023     }
4024 
4025     return ret;
4026 }
4027 
4028 struct target_sembuf {
4029     unsigned short sem_num;
4030     short sem_op;
4031     short sem_flg;
4032 };
4033 
4034 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4035                                              abi_ulong target_addr,
4036                                              unsigned nsops)
4037 {
4038     struct target_sembuf *target_sembuf;
4039     int i;
4040 
4041     target_sembuf = lock_user(VERIFY_READ, target_addr,
4042                               nsops*sizeof(struct target_sembuf), 1);
4043     if (!target_sembuf)
4044         return -TARGET_EFAULT;
4045 
4046     for(i=0; i<nsops; i++) {
4047         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4048         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4049         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4050     }
4051 
4052     unlock_user(target_sembuf, target_addr, 0);
4053 
4054     return 0;
4055 }
4056 
4057 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4058     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4059 
4060 /*
4061  * This macro is required to handle the s390 variants, which passes the
4062  * arguments in a different order than default.
4063  */
4064 #ifdef __s390x__
4065 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4066   (__nsops), (__timeout), (__sops)
4067 #else
4068 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4069   (__nsops), 0, (__sops), (__timeout)
4070 #endif
4071 
4072 static inline abi_long do_semtimedop(int semid,
4073                                      abi_long ptr,
4074                                      unsigned nsops,
4075                                      abi_long timeout, bool time64)
4076 {
4077     struct sembuf *sops;
4078     struct timespec ts, *pts = NULL;
4079     abi_long ret;
4080 
4081     if (timeout) {
4082         pts = &ts;
4083         if (time64) {
4084             if (target_to_host_timespec64(pts, timeout)) {
4085                 return -TARGET_EFAULT;
4086             }
4087         } else {
4088             if (target_to_host_timespec(pts, timeout)) {
4089                 return -TARGET_EFAULT;
4090             }
4091         }
4092     }
4093 
4094     if (nsops > TARGET_SEMOPM) {
4095         return -TARGET_E2BIG;
4096     }
4097 
4098     sops = g_new(struct sembuf, nsops);
4099 
4100     if (target_to_host_sembuf(sops, ptr, nsops)) {
4101         g_free(sops);
4102         return -TARGET_EFAULT;
4103     }
4104 
4105     ret = -TARGET_ENOSYS;
4106 #ifdef __NR_semtimedop
4107     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4108 #endif
4109 #ifdef __NR_ipc
4110     if (ret == -TARGET_ENOSYS) {
4111         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4112                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4113     }
4114 #endif
4115     g_free(sops);
4116     return ret;
4117 }
4118 #endif
4119 
4120 struct target_msqid_ds
4121 {
4122     struct target_ipc_perm msg_perm;
4123     abi_ulong msg_stime;
4124 #if TARGET_ABI_BITS == 32
4125     abi_ulong __unused1;
4126 #endif
4127     abi_ulong msg_rtime;
4128 #if TARGET_ABI_BITS == 32
4129     abi_ulong __unused2;
4130 #endif
4131     abi_ulong msg_ctime;
4132 #if TARGET_ABI_BITS == 32
4133     abi_ulong __unused3;
4134 #endif
4135     abi_ulong __msg_cbytes;
4136     abi_ulong msg_qnum;
4137     abi_ulong msg_qbytes;
4138     abi_ulong msg_lspid;
4139     abi_ulong msg_lrpid;
4140     abi_ulong __unused4;
4141     abi_ulong __unused5;
4142 };
4143 
4144 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4145                                                abi_ulong target_addr)
4146 {
4147     struct target_msqid_ds *target_md;
4148 
4149     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4150         return -TARGET_EFAULT;
4151     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4152         return -TARGET_EFAULT;
4153     host_md->msg_stime = tswapal(target_md->msg_stime);
4154     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4155     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4156     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4157     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4158     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4159     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4160     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4161     unlock_user_struct(target_md, target_addr, 0);
4162     return 0;
4163 }
4164 
4165 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4166                                                struct msqid_ds *host_md)
4167 {
4168     struct target_msqid_ds *target_md;
4169 
4170     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4171         return -TARGET_EFAULT;
4172     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4173         return -TARGET_EFAULT;
4174     target_md->msg_stime = tswapal(host_md->msg_stime);
4175     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4176     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4177     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4178     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4179     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4180     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4181     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4182     unlock_user_struct(target_md, target_addr, 1);
4183     return 0;
4184 }
4185 
4186 struct target_msginfo {
4187     int msgpool;
4188     int msgmap;
4189     int msgmax;
4190     int msgmnb;
4191     int msgmni;
4192     int msgssz;
4193     int msgtql;
4194     unsigned short int msgseg;
4195 };
4196 
4197 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4198                                               struct msginfo *host_msginfo)
4199 {
4200     struct target_msginfo *target_msginfo;
4201     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4202         return -TARGET_EFAULT;
4203     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4204     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4205     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4206     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4207     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4208     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4209     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4210     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4211     unlock_user_struct(target_msginfo, target_addr, 1);
4212     return 0;
4213 }
4214 
4215 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4216 {
4217     struct msqid_ds dsarg;
4218     struct msginfo msginfo;
4219     abi_long ret = -TARGET_EINVAL;
4220 
4221     cmd &= 0xff;
4222 
4223     switch (cmd) {
4224     case IPC_STAT:
4225     case IPC_SET:
4226     case MSG_STAT:
4227         if (target_to_host_msqid_ds(&dsarg,ptr))
4228             return -TARGET_EFAULT;
4229         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4230         if (host_to_target_msqid_ds(ptr,&dsarg))
4231             return -TARGET_EFAULT;
4232         break;
4233     case IPC_RMID:
4234         ret = get_errno(msgctl(msgid, cmd, NULL));
4235         break;
4236     case IPC_INFO:
4237     case MSG_INFO:
4238         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4239         if (host_to_target_msginfo(ptr, &msginfo))
4240             return -TARGET_EFAULT;
4241         break;
4242     }
4243 
4244     return ret;
4245 }
4246 
4247 struct target_msgbuf {
4248     abi_long mtype;
4249     char	mtext[1];
4250 };
4251 
4252 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4253                                  ssize_t msgsz, int msgflg)
4254 {
4255     struct target_msgbuf *target_mb;
4256     struct msgbuf *host_mb;
4257     abi_long ret = 0;
4258 
4259     if (msgsz < 0) {
4260         return -TARGET_EINVAL;
4261     }
4262 
4263     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4264         return -TARGET_EFAULT;
4265     host_mb = g_try_malloc(msgsz + sizeof(long));
4266     if (!host_mb) {
4267         unlock_user_struct(target_mb, msgp, 0);
4268         return -TARGET_ENOMEM;
4269     }
4270     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4271     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4272     ret = -TARGET_ENOSYS;
4273 #ifdef __NR_msgsnd
4274     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4275 #endif
4276 #ifdef __NR_ipc
4277     if (ret == -TARGET_ENOSYS) {
4278 #ifdef __s390x__
4279         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4280                                  host_mb));
4281 #else
4282         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4283                                  host_mb, 0));
4284 #endif
4285     }
4286 #endif
4287     g_free(host_mb);
4288     unlock_user_struct(target_mb, msgp, 0);
4289 
4290     return ret;
4291 }
4292 
4293 #ifdef __NR_ipc
4294 #if defined(__sparc__)
4295 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4296 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4297 #elif defined(__s390x__)
4298 /* The s390 sys_ipc variant has only five parameters.  */
4299 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4300     ((long int[]){(long int)__msgp, __msgtyp})
4301 #else
4302 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4303     ((long int[]){(long int)__msgp, __msgtyp}), 0
4304 #endif
4305 #endif
4306 
4307 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4308                                  ssize_t msgsz, abi_long msgtyp,
4309                                  int msgflg)
4310 {
4311     struct target_msgbuf *target_mb;
4312     char *target_mtext;
4313     struct msgbuf *host_mb;
4314     abi_long ret = 0;
4315 
4316     if (msgsz < 0) {
4317         return -TARGET_EINVAL;
4318     }
4319 
4320     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4321         return -TARGET_EFAULT;
4322 
4323     host_mb = g_try_malloc(msgsz + sizeof(long));
4324     if (!host_mb) {
4325         ret = -TARGET_ENOMEM;
4326         goto end;
4327     }
4328     ret = -TARGET_ENOSYS;
4329 #ifdef __NR_msgrcv
4330     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4331 #endif
4332 #ifdef __NR_ipc
4333     if (ret == -TARGET_ENOSYS) {
4334         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4335                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4336     }
4337 #endif
4338 
4339     if (ret > 0) {
4340         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4341         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4342         if (!target_mtext) {
4343             ret = -TARGET_EFAULT;
4344             goto end;
4345         }
4346         memcpy(target_mb->mtext, host_mb->mtext, ret);
4347         unlock_user(target_mtext, target_mtext_addr, ret);
4348     }
4349 
4350     target_mb->mtype = tswapal(host_mb->mtype);
4351 
4352 end:
4353     if (target_mb)
4354         unlock_user_struct(target_mb, msgp, 1);
4355     g_free(host_mb);
4356     return ret;
4357 }
4358 
4359 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4360                                                abi_ulong target_addr)
4361 {
4362     struct target_shmid_ds *target_sd;
4363 
4364     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4365         return -TARGET_EFAULT;
4366     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4367         return -TARGET_EFAULT;
4368     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4369     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4370     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4371     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4372     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4373     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4374     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4375     unlock_user_struct(target_sd, target_addr, 0);
4376     return 0;
4377 }
4378 
4379 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4380                                                struct shmid_ds *host_sd)
4381 {
4382     struct target_shmid_ds *target_sd;
4383 
4384     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4385         return -TARGET_EFAULT;
4386     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4387         return -TARGET_EFAULT;
4388     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4389     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4390     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4391     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4392     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4393     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4394     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4395     unlock_user_struct(target_sd, target_addr, 1);
4396     return 0;
4397 }
4398 
4399 struct  target_shminfo {
4400     abi_ulong shmmax;
4401     abi_ulong shmmin;
4402     abi_ulong shmmni;
4403     abi_ulong shmseg;
4404     abi_ulong shmall;
4405 };
4406 
4407 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4408                                               struct shminfo *host_shminfo)
4409 {
4410     struct target_shminfo *target_shminfo;
4411     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4412         return -TARGET_EFAULT;
4413     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4414     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4415     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4416     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4417     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4418     unlock_user_struct(target_shminfo, target_addr, 1);
4419     return 0;
4420 }
4421 
4422 struct target_shm_info {
4423     int used_ids;
4424     abi_ulong shm_tot;
4425     abi_ulong shm_rss;
4426     abi_ulong shm_swp;
4427     abi_ulong swap_attempts;
4428     abi_ulong swap_successes;
4429 };
4430 
4431 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4432                                                struct shm_info *host_shm_info)
4433 {
4434     struct target_shm_info *target_shm_info;
4435     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4436         return -TARGET_EFAULT;
4437     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4438     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4439     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4440     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4441     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4442     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4443     unlock_user_struct(target_shm_info, target_addr, 1);
4444     return 0;
4445 }
4446 
4447 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4448 {
4449     struct shmid_ds dsarg;
4450     struct shminfo shminfo;
4451     struct shm_info shm_info;
4452     abi_long ret = -TARGET_EINVAL;
4453 
4454     cmd &= 0xff;
4455 
4456     switch(cmd) {
4457     case IPC_STAT:
4458     case IPC_SET:
4459     case SHM_STAT:
4460         if (target_to_host_shmid_ds(&dsarg, buf))
4461             return -TARGET_EFAULT;
4462         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4463         if (host_to_target_shmid_ds(buf, &dsarg))
4464             return -TARGET_EFAULT;
4465         break;
4466     case IPC_INFO:
4467         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4468         if (host_to_target_shminfo(buf, &shminfo))
4469             return -TARGET_EFAULT;
4470         break;
4471     case SHM_INFO:
4472         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4473         if (host_to_target_shm_info(buf, &shm_info))
4474             return -TARGET_EFAULT;
4475         break;
4476     case IPC_RMID:
4477     case SHM_LOCK:
4478     case SHM_UNLOCK:
4479         ret = get_errno(shmctl(shmid, cmd, NULL));
4480         break;
4481     }
4482 
4483     return ret;
4484 }
4485 
4486 #ifndef TARGET_FORCE_SHMLBA
4487 /* For most architectures, SHMLBA is the same as the page size;
4488  * some architectures have larger values, in which case they should
4489  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4490  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4491  * and defining its own value for SHMLBA.
4492  *
4493  * The kernel also permits SHMLBA to be set by the architecture to a
4494  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4495  * this means that addresses are rounded to the large size if
4496  * SHM_RND is set but addresses not aligned to that size are not rejected
4497  * as long as they are at least page-aligned. Since the only architecture
4498  * which uses this is ia64 this code doesn't provide for that oddity.
4499  */
4500 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4501 {
4502     return TARGET_PAGE_SIZE;
4503 }
4504 #endif
4505 
4506 static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
4507                           abi_ulong shmaddr, int shmflg)
4508 {
4509     CPUState *cpu = env_cpu(cpu_env);
4510     abi_ulong raddr;
4511     void *host_raddr;
4512     struct shmid_ds shm_info;
4513     int i, ret;
4514     abi_ulong shmlba;
4515 
4516     /* shmat pointers are always untagged */
4517 
4518     /* find out the length of the shared memory segment */
4519     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4520     if (is_error(ret)) {
4521         /* can't get length, bail out */
4522         return ret;
4523     }
4524 
4525     shmlba = target_shmlba(cpu_env);
4526 
4527     if (shmaddr & (shmlba - 1)) {
4528         if (shmflg & SHM_RND) {
4529             shmaddr &= ~(shmlba - 1);
4530         } else {
4531             return -TARGET_EINVAL;
4532         }
4533     }
4534     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4535         return -TARGET_EINVAL;
4536     }
4537 
4538     mmap_lock();
4539 
4540     /*
4541      * We're mapping shared memory, so ensure we generate code for parallel
4542      * execution and flush old translations.  This will work up to the level
4543      * supported by the host -- anything that requires EXCP_ATOMIC will not
4544      * be atomic with respect to an external process.
4545      */
4546     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4547         cpu->tcg_cflags |= CF_PARALLEL;
4548         tb_flush(cpu);
4549     }
4550 
4551     if (shmaddr)
4552         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4553     else {
4554         abi_ulong mmap_start;
4555 
4556         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4557         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4558 
4559         if (mmap_start == -1) {
4560             errno = ENOMEM;
4561             host_raddr = (void *)-1;
4562         } else
4563             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4564                                shmflg | SHM_REMAP);
4565     }
4566 
4567     if (host_raddr == (void *)-1) {
4568         mmap_unlock();
4569         return get_errno((intptr_t)host_raddr);
4570     }
4571     raddr = h2g((uintptr_t)host_raddr);
4572 
4573     page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4574                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4575                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4576 
4577     for (i = 0; i < N_SHM_REGIONS; i++) {
4578         if (!shm_regions[i].in_use) {
4579             shm_regions[i].in_use = true;
4580             shm_regions[i].start = raddr;
4581             shm_regions[i].size = shm_info.shm_segsz;
4582             break;
4583         }
4584     }
4585 
4586     mmap_unlock();
4587     return raddr;
4588 }
4589 
4590 static inline abi_long do_shmdt(abi_ulong shmaddr)
4591 {
4592     int i;
4593     abi_long rv;
4594 
4595     /* shmdt pointers are always untagged */
4596 
4597     mmap_lock();
4598 
4599     for (i = 0; i < N_SHM_REGIONS; ++i) {
4600         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4601             shm_regions[i].in_use = false;
4602             page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4603             break;
4604         }
4605     }
4606     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4607 
4608     mmap_unlock();
4609 
4610     return rv;
4611 }
4612 
4613 #ifdef TARGET_NR_ipc
4614 /* ??? This only works with linear mappings.  */
4615 /* do_ipc() must return target values and target errnos. */
4616 static abi_long do_ipc(CPUArchState *cpu_env,
4617                        unsigned int call, abi_long first,
4618                        abi_long second, abi_long third,
4619                        abi_long ptr, abi_long fifth)
4620 {
4621     int version;
4622     abi_long ret = 0;
4623 
4624     version = call >> 16;
4625     call &= 0xffff;
4626 
4627     switch (call) {
4628     case IPCOP_semop:
4629         ret = do_semtimedop(first, ptr, second, 0, false);
4630         break;
4631     case IPCOP_semtimedop:
4632     /*
4633      * The s390 sys_ipc variant has only five parameters instead of six
4634      * (as for default variant) and the only difference is the handling of
4635      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4636      * to a struct timespec where the generic variant uses fifth parameter.
4637      */
4638 #if defined(TARGET_S390X)
4639         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4640 #else
4641         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4642 #endif
4643         break;
4644 
4645     case IPCOP_semget:
4646         ret = get_errno(semget(first, second, third));
4647         break;
4648 
4649     case IPCOP_semctl: {
4650         /* The semun argument to semctl is passed by value, so dereference the
4651          * ptr argument. */
4652         abi_ulong atptr;
4653         get_user_ual(atptr, ptr);
4654         ret = do_semctl(first, second, third, atptr);
4655         break;
4656     }
4657 
4658     case IPCOP_msgget:
4659         ret = get_errno(msgget(first, second));
4660         break;
4661 
4662     case IPCOP_msgsnd:
4663         ret = do_msgsnd(first, ptr, second, third);
4664         break;
4665 
4666     case IPCOP_msgctl:
4667         ret = do_msgctl(first, second, ptr);
4668         break;
4669 
4670     case IPCOP_msgrcv:
4671         switch (version) {
4672         case 0:
4673             {
4674                 struct target_ipc_kludge {
4675                     abi_long msgp;
4676                     abi_long msgtyp;
4677                 } *tmp;
4678 
4679                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4680                     ret = -TARGET_EFAULT;
4681                     break;
4682                 }
4683 
4684                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4685 
4686                 unlock_user_struct(tmp, ptr, 0);
4687                 break;
4688             }
4689         default:
4690             ret = do_msgrcv(first, ptr, second, fifth, third);
4691         }
4692         break;
4693 
4694     case IPCOP_shmat:
4695         switch (version) {
4696         default:
4697         {
4698             abi_ulong raddr;
4699             raddr = do_shmat(cpu_env, first, ptr, second);
4700             if (is_error(raddr))
4701                 return get_errno(raddr);
4702             if (put_user_ual(raddr, third))
4703                 return -TARGET_EFAULT;
4704             break;
4705         }
4706         case 1:
4707             ret = -TARGET_EINVAL;
4708             break;
4709         }
4710 	break;
4711     case IPCOP_shmdt:
4712         ret = do_shmdt(ptr);
4713 	break;
4714 
4715     case IPCOP_shmget:
4716 	/* IPC_* flag values are the same on all linux platforms */
4717 	ret = get_errno(shmget(first, second, third));
4718 	break;
4719 
4720 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4721     case IPCOP_shmctl:
4722         ret = do_shmctl(first, second, ptr);
4723         break;
4724     default:
4725         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4726                       call, version);
4727 	ret = -TARGET_ENOSYS;
4728 	break;
4729     }
4730     return ret;
4731 }
4732 #endif
4733 
4734 /* kernel structure types definitions */
4735 
4736 #define STRUCT(name, ...) STRUCT_ ## name,
4737 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4738 enum {
4739 #include "syscall_types.h"
4740 STRUCT_MAX
4741 };
4742 #undef STRUCT
4743 #undef STRUCT_SPECIAL
4744 
4745 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4746 #define STRUCT_SPECIAL(name)
4747 #include "syscall_types.h"
4748 #undef STRUCT
4749 #undef STRUCT_SPECIAL
4750 
4751 #define MAX_STRUCT_SIZE 4096
4752 
4753 #ifdef CONFIG_FIEMAP
4754 /* So fiemap access checks don't overflow on 32 bit systems.
4755  * This is very slightly smaller than the limit imposed by
4756  * the underlying kernel.
4757  */
4758 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4759                             / sizeof(struct fiemap_extent))
4760 
4761 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4762                                        int fd, int cmd, abi_long arg)
4763 {
4764     /* The parameter for this ioctl is a struct fiemap followed
4765      * by an array of struct fiemap_extent whose size is set
4766      * in fiemap->fm_extent_count. The array is filled in by the
4767      * ioctl.
4768      */
4769     int target_size_in, target_size_out;
4770     struct fiemap *fm;
4771     const argtype *arg_type = ie->arg_type;
4772     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4773     void *argptr, *p;
4774     abi_long ret;
4775     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4776     uint32_t outbufsz;
4777     int free_fm = 0;
4778 
4779     assert(arg_type[0] == TYPE_PTR);
4780     assert(ie->access == IOC_RW);
4781     arg_type++;
4782     target_size_in = thunk_type_size(arg_type, 0);
4783     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4784     if (!argptr) {
4785         return -TARGET_EFAULT;
4786     }
4787     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4788     unlock_user(argptr, arg, 0);
4789     fm = (struct fiemap *)buf_temp;
4790     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4791         return -TARGET_EINVAL;
4792     }
4793 
4794     outbufsz = sizeof (*fm) +
4795         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4796 
4797     if (outbufsz > MAX_STRUCT_SIZE) {
4798         /* We can't fit all the extents into the fixed size buffer.
4799          * Allocate one that is large enough and use it instead.
4800          */
4801         fm = g_try_malloc(outbufsz);
4802         if (!fm) {
4803             return -TARGET_ENOMEM;
4804         }
4805         memcpy(fm, buf_temp, sizeof(struct fiemap));
4806         free_fm = 1;
4807     }
4808     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4809     if (!is_error(ret)) {
4810         target_size_out = target_size_in;
4811         /* An extent_count of 0 means we were only counting the extents
4812          * so there are no structs to copy
4813          */
4814         if (fm->fm_extent_count != 0) {
4815             target_size_out += fm->fm_mapped_extents * extent_size;
4816         }
4817         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4818         if (!argptr) {
4819             ret = -TARGET_EFAULT;
4820         } else {
4821             /* Convert the struct fiemap */
4822             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4823             if (fm->fm_extent_count != 0) {
4824                 p = argptr + target_size_in;
4825                 /* ...and then all the struct fiemap_extents */
4826                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4827                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4828                                   THUNK_TARGET);
4829                     p += extent_size;
4830                 }
4831             }
4832             unlock_user(argptr, arg, target_size_out);
4833         }
4834     }
4835     if (free_fm) {
4836         g_free(fm);
4837     }
4838     return ret;
4839 }
4840 #endif
4841 
4842 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4843                                 int fd, int cmd, abi_long arg)
4844 {
4845     const argtype *arg_type = ie->arg_type;
4846     int target_size;
4847     void *argptr;
4848     int ret;
4849     struct ifconf *host_ifconf;
4850     uint32_t outbufsz;
4851     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4852     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4853     int target_ifreq_size;
4854     int nb_ifreq;
4855     int free_buf = 0;
4856     int i;
4857     int target_ifc_len;
4858     abi_long target_ifc_buf;
4859     int host_ifc_len;
4860     char *host_ifc_buf;
4861 
4862     assert(arg_type[0] == TYPE_PTR);
4863     assert(ie->access == IOC_RW);
4864 
4865     arg_type++;
4866     target_size = thunk_type_size(arg_type, 0);
4867 
4868     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4869     if (!argptr)
4870         return -TARGET_EFAULT;
4871     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4872     unlock_user(argptr, arg, 0);
4873 
4874     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4875     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4876     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4877 
4878     if (target_ifc_buf != 0) {
4879         target_ifc_len = host_ifconf->ifc_len;
4880         nb_ifreq = target_ifc_len / target_ifreq_size;
4881         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4882 
4883         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4884         if (outbufsz > MAX_STRUCT_SIZE) {
4885             /*
4886              * We can't fit all the extents into the fixed size buffer.
4887              * Allocate one that is large enough and use it instead.
4888              */
4889             host_ifconf = g_try_malloc(outbufsz);
4890             if (!host_ifconf) {
4891                 return -TARGET_ENOMEM;
4892             }
4893             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4894             free_buf = 1;
4895         }
4896         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4897 
4898         host_ifconf->ifc_len = host_ifc_len;
4899     } else {
4900       host_ifc_buf = NULL;
4901     }
4902     host_ifconf->ifc_buf = host_ifc_buf;
4903 
4904     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4905     if (!is_error(ret)) {
4906 	/* convert host ifc_len to target ifc_len */
4907 
4908         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4909         target_ifc_len = nb_ifreq * target_ifreq_size;
4910         host_ifconf->ifc_len = target_ifc_len;
4911 
4912 	/* restore target ifc_buf */
4913 
4914         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4915 
4916 	/* copy struct ifconf to target user */
4917 
4918         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4919         if (!argptr)
4920             return -TARGET_EFAULT;
4921         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4922         unlock_user(argptr, arg, target_size);
4923 
4924         if (target_ifc_buf != 0) {
4925             /* copy ifreq[] to target user */
4926             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4927             for (i = 0; i < nb_ifreq ; i++) {
4928                 thunk_convert(argptr + i * target_ifreq_size,
4929                               host_ifc_buf + i * sizeof(struct ifreq),
4930                               ifreq_arg_type, THUNK_TARGET);
4931             }
4932             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4933         }
4934     }
4935 
4936     if (free_buf) {
4937         g_free(host_ifconf);
4938     }
4939 
4940     return ret;
4941 }
4942 
4943 #if defined(CONFIG_USBFS)
4944 #if HOST_LONG_BITS > 64
4945 #error USBDEVFS thunks do not support >64 bit hosts yet.
4946 #endif
4947 struct live_urb {
4948     uint64_t target_urb_adr;
4949     uint64_t target_buf_adr;
4950     char *target_buf_ptr;
4951     struct usbdevfs_urb host_urb;
4952 };
4953 
4954 static GHashTable *usbdevfs_urb_hashtable(void)
4955 {
4956     static GHashTable *urb_hashtable;
4957 
4958     if (!urb_hashtable) {
4959         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4960     }
4961     return urb_hashtable;
4962 }
4963 
4964 static void urb_hashtable_insert(struct live_urb *urb)
4965 {
4966     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4967     g_hash_table_insert(urb_hashtable, urb, urb);
4968 }
4969 
4970 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
4971 {
4972     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4973     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
4974 }
4975 
4976 static void urb_hashtable_remove(struct live_urb *urb)
4977 {
4978     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4979     g_hash_table_remove(urb_hashtable, urb);
4980 }
4981 
4982 static abi_long
4983 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
4984                           int fd, int cmd, abi_long arg)
4985 {
4986     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
4987     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
4988     struct live_urb *lurb;
4989     void *argptr;
4990     uint64_t hurb;
4991     int target_size;
4992     uintptr_t target_urb_adr;
4993     abi_long ret;
4994 
4995     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
4996 
4997     memset(buf_temp, 0, sizeof(uint64_t));
4998     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
4999     if (is_error(ret)) {
5000         return ret;
5001     }
5002 
5003     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5004     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5005     if (!lurb->target_urb_adr) {
5006         return -TARGET_EFAULT;
5007     }
5008     urb_hashtable_remove(lurb);
5009     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5010         lurb->host_urb.buffer_length);
5011     lurb->target_buf_ptr = NULL;
5012 
5013     /* restore the guest buffer pointer */
5014     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5015 
5016     /* update the guest urb struct */
5017     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5018     if (!argptr) {
5019         g_free(lurb);
5020         return -TARGET_EFAULT;
5021     }
5022     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5023     unlock_user(argptr, lurb->target_urb_adr, target_size);
5024 
5025     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5026     /* write back the urb handle */
5027     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5028     if (!argptr) {
5029         g_free(lurb);
5030         return -TARGET_EFAULT;
5031     }
5032 
5033     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5034     target_urb_adr = lurb->target_urb_adr;
5035     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5036     unlock_user(argptr, arg, target_size);
5037 
5038     g_free(lurb);
5039     return ret;
5040 }
5041 
5042 static abi_long
5043 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5044                              uint8_t *buf_temp __attribute__((unused)),
5045                              int fd, int cmd, abi_long arg)
5046 {
5047     struct live_urb *lurb;
5048 
5049     /* map target address back to host URB with metadata. */
5050     lurb = urb_hashtable_lookup(arg);
5051     if (!lurb) {
5052         return -TARGET_EFAULT;
5053     }
5054     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5055 }
5056 
5057 static abi_long
5058 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5059                             int fd, int cmd, abi_long arg)
5060 {
5061     const argtype *arg_type = ie->arg_type;
5062     int target_size;
5063     abi_long ret;
5064     void *argptr;
5065     int rw_dir;
5066     struct live_urb *lurb;
5067 
5068     /*
5069      * each submitted URB needs to map to a unique ID for the
5070      * kernel, and that unique ID needs to be a pointer to
5071      * host memory.  hence, we need to malloc for each URB.
5072      * isochronous transfers have a variable length struct.
5073      */
5074     arg_type++;
5075     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5076 
5077     /* construct host copy of urb and metadata */
5078     lurb = g_try_new0(struct live_urb, 1);
5079     if (!lurb) {
5080         return -TARGET_ENOMEM;
5081     }
5082 
5083     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5084     if (!argptr) {
5085         g_free(lurb);
5086         return -TARGET_EFAULT;
5087     }
5088     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5089     unlock_user(argptr, arg, 0);
5090 
5091     lurb->target_urb_adr = arg;
5092     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5093 
5094     /* buffer space used depends on endpoint type so lock the entire buffer */
5095     /* control type urbs should check the buffer contents for true direction */
5096     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5097     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5098         lurb->host_urb.buffer_length, 1);
5099     if (lurb->target_buf_ptr == NULL) {
5100         g_free(lurb);
5101         return -TARGET_EFAULT;
5102     }
5103 
5104     /* update buffer pointer in host copy */
5105     lurb->host_urb.buffer = lurb->target_buf_ptr;
5106 
5107     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5108     if (is_error(ret)) {
5109         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5110         g_free(lurb);
5111     } else {
5112         urb_hashtable_insert(lurb);
5113     }
5114 
5115     return ret;
5116 }
5117 #endif /* CONFIG_USBFS */
5118 
5119 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5120                             int cmd, abi_long arg)
5121 {
5122     void *argptr;
5123     struct dm_ioctl *host_dm;
5124     abi_long guest_data;
5125     uint32_t guest_data_size;
5126     int target_size;
5127     const argtype *arg_type = ie->arg_type;
5128     abi_long ret;
5129     void *big_buf = NULL;
5130     char *host_data;
5131 
5132     arg_type++;
5133     target_size = thunk_type_size(arg_type, 0);
5134     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5135     if (!argptr) {
5136         ret = -TARGET_EFAULT;
5137         goto out;
5138     }
5139     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5140     unlock_user(argptr, arg, 0);
5141 
5142     /* buf_temp is too small, so fetch things into a bigger buffer */
5143     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5144     memcpy(big_buf, buf_temp, target_size);
5145     buf_temp = big_buf;
5146     host_dm = big_buf;
5147 
5148     guest_data = arg + host_dm->data_start;
5149     if ((guest_data - arg) < 0) {
5150         ret = -TARGET_EINVAL;
5151         goto out;
5152     }
5153     guest_data_size = host_dm->data_size - host_dm->data_start;
5154     host_data = (char*)host_dm + host_dm->data_start;
5155 
5156     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5157     if (!argptr) {
5158         ret = -TARGET_EFAULT;
5159         goto out;
5160     }
5161 
5162     switch (ie->host_cmd) {
5163     case DM_REMOVE_ALL:
5164     case DM_LIST_DEVICES:
5165     case DM_DEV_CREATE:
5166     case DM_DEV_REMOVE:
5167     case DM_DEV_SUSPEND:
5168     case DM_DEV_STATUS:
5169     case DM_DEV_WAIT:
5170     case DM_TABLE_STATUS:
5171     case DM_TABLE_CLEAR:
5172     case DM_TABLE_DEPS:
5173     case DM_LIST_VERSIONS:
5174         /* no input data */
5175         break;
5176     case DM_DEV_RENAME:
5177     case DM_DEV_SET_GEOMETRY:
5178         /* data contains only strings */
5179         memcpy(host_data, argptr, guest_data_size);
5180         break;
5181     case DM_TARGET_MSG:
5182         memcpy(host_data, argptr, guest_data_size);
5183         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5184         break;
5185     case DM_TABLE_LOAD:
5186     {
5187         void *gspec = argptr;
5188         void *cur_data = host_data;
5189         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5190         int spec_size = thunk_type_size(arg_type, 0);
5191         int i;
5192 
5193         for (i = 0; i < host_dm->target_count; i++) {
5194             struct dm_target_spec *spec = cur_data;
5195             uint32_t next;
5196             int slen;
5197 
5198             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5199             slen = strlen((char*)gspec + spec_size) + 1;
5200             next = spec->next;
5201             spec->next = sizeof(*spec) + slen;
5202             strcpy((char*)&spec[1], gspec + spec_size);
5203             gspec += next;
5204             cur_data += spec->next;
5205         }
5206         break;
5207     }
5208     default:
5209         ret = -TARGET_EINVAL;
5210         unlock_user(argptr, guest_data, 0);
5211         goto out;
5212     }
5213     unlock_user(argptr, guest_data, 0);
5214 
5215     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5216     if (!is_error(ret)) {
5217         guest_data = arg + host_dm->data_start;
5218         guest_data_size = host_dm->data_size - host_dm->data_start;
5219         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5220         switch (ie->host_cmd) {
5221         case DM_REMOVE_ALL:
5222         case DM_DEV_CREATE:
5223         case DM_DEV_REMOVE:
5224         case DM_DEV_RENAME:
5225         case DM_DEV_SUSPEND:
5226         case DM_DEV_STATUS:
5227         case DM_TABLE_LOAD:
5228         case DM_TABLE_CLEAR:
5229         case DM_TARGET_MSG:
5230         case DM_DEV_SET_GEOMETRY:
5231             /* no return data */
5232             break;
5233         case DM_LIST_DEVICES:
5234         {
5235             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5236             uint32_t remaining_data = guest_data_size;
5237             void *cur_data = argptr;
5238             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5239             int nl_size = 12; /* can't use thunk_size due to alignment */
5240 
5241             while (1) {
5242                 uint32_t next = nl->next;
5243                 if (next) {
5244                     nl->next = nl_size + (strlen(nl->name) + 1);
5245                 }
5246                 if (remaining_data < nl->next) {
5247                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5248                     break;
5249                 }
5250                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5251                 strcpy(cur_data + nl_size, nl->name);
5252                 cur_data += nl->next;
5253                 remaining_data -= nl->next;
5254                 if (!next) {
5255                     break;
5256                 }
5257                 nl = (void*)nl + next;
5258             }
5259             break;
5260         }
5261         case DM_DEV_WAIT:
5262         case DM_TABLE_STATUS:
5263         {
5264             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5265             void *cur_data = argptr;
5266             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5267             int spec_size = thunk_type_size(arg_type, 0);
5268             int i;
5269 
5270             for (i = 0; i < host_dm->target_count; i++) {
5271                 uint32_t next = spec->next;
5272                 int slen = strlen((char*)&spec[1]) + 1;
5273                 spec->next = (cur_data - argptr) + spec_size + slen;
5274                 if (guest_data_size < spec->next) {
5275                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5276                     break;
5277                 }
5278                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5279                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5280                 cur_data = argptr + spec->next;
5281                 spec = (void*)host_dm + host_dm->data_start + next;
5282             }
5283             break;
5284         }
5285         case DM_TABLE_DEPS:
5286         {
5287             void *hdata = (void*)host_dm + host_dm->data_start;
5288             int count = *(uint32_t*)hdata;
5289             uint64_t *hdev = hdata + 8;
5290             uint64_t *gdev = argptr + 8;
5291             int i;
5292 
5293             *(uint32_t*)argptr = tswap32(count);
5294             for (i = 0; i < count; i++) {
5295                 *gdev = tswap64(*hdev);
5296                 gdev++;
5297                 hdev++;
5298             }
5299             break;
5300         }
5301         case DM_LIST_VERSIONS:
5302         {
5303             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5304             uint32_t remaining_data = guest_data_size;
5305             void *cur_data = argptr;
5306             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5307             int vers_size = thunk_type_size(arg_type, 0);
5308 
5309             while (1) {
5310                 uint32_t next = vers->next;
5311                 if (next) {
5312                     vers->next = vers_size + (strlen(vers->name) + 1);
5313                 }
5314                 if (remaining_data < vers->next) {
5315                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5316                     break;
5317                 }
5318                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5319                 strcpy(cur_data + vers_size, vers->name);
5320                 cur_data += vers->next;
5321                 remaining_data -= vers->next;
5322                 if (!next) {
5323                     break;
5324                 }
5325                 vers = (void*)vers + next;
5326             }
5327             break;
5328         }
5329         default:
5330             unlock_user(argptr, guest_data, 0);
5331             ret = -TARGET_EINVAL;
5332             goto out;
5333         }
5334         unlock_user(argptr, guest_data, guest_data_size);
5335 
5336         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5337         if (!argptr) {
5338             ret = -TARGET_EFAULT;
5339             goto out;
5340         }
5341         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5342         unlock_user(argptr, arg, target_size);
5343     }
5344 out:
5345     g_free(big_buf);
5346     return ret;
5347 }
5348 
5349 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5350                                int cmd, abi_long arg)
5351 {
5352     void *argptr;
5353     int target_size;
5354     const argtype *arg_type = ie->arg_type;
5355     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5356     abi_long ret;
5357 
5358     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5359     struct blkpg_partition host_part;
5360 
5361     /* Read and convert blkpg */
5362     arg_type++;
5363     target_size = thunk_type_size(arg_type, 0);
5364     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5365     if (!argptr) {
5366         ret = -TARGET_EFAULT;
5367         goto out;
5368     }
5369     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5370     unlock_user(argptr, arg, 0);
5371 
5372     switch (host_blkpg->op) {
5373     case BLKPG_ADD_PARTITION:
5374     case BLKPG_DEL_PARTITION:
5375         /* payload is struct blkpg_partition */
5376         break;
5377     default:
5378         /* Unknown opcode */
5379         ret = -TARGET_EINVAL;
5380         goto out;
5381     }
5382 
5383     /* Read and convert blkpg->data */
5384     arg = (abi_long)(uintptr_t)host_blkpg->data;
5385     target_size = thunk_type_size(part_arg_type, 0);
5386     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5387     if (!argptr) {
5388         ret = -TARGET_EFAULT;
5389         goto out;
5390     }
5391     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5392     unlock_user(argptr, arg, 0);
5393 
5394     /* Swizzle the data pointer to our local copy and call! */
5395     host_blkpg->data = &host_part;
5396     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5397 
5398 out:
5399     return ret;
5400 }
5401 
5402 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5403                                 int fd, int cmd, abi_long arg)
5404 {
5405     const argtype *arg_type = ie->arg_type;
5406     const StructEntry *se;
5407     const argtype *field_types;
5408     const int *dst_offsets, *src_offsets;
5409     int target_size;
5410     void *argptr;
5411     abi_ulong *target_rt_dev_ptr = NULL;
5412     unsigned long *host_rt_dev_ptr = NULL;
5413     abi_long ret;
5414     int i;
5415 
5416     assert(ie->access == IOC_W);
5417     assert(*arg_type == TYPE_PTR);
5418     arg_type++;
5419     assert(*arg_type == TYPE_STRUCT);
5420     target_size = thunk_type_size(arg_type, 0);
5421     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5422     if (!argptr) {
5423         return -TARGET_EFAULT;
5424     }
5425     arg_type++;
5426     assert(*arg_type == (int)STRUCT_rtentry);
5427     se = struct_entries + *arg_type++;
5428     assert(se->convert[0] == NULL);
5429     /* convert struct here to be able to catch rt_dev string */
5430     field_types = se->field_types;
5431     dst_offsets = se->field_offsets[THUNK_HOST];
5432     src_offsets = se->field_offsets[THUNK_TARGET];
5433     for (i = 0; i < se->nb_fields; i++) {
5434         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5435             assert(*field_types == TYPE_PTRVOID);
5436             target_rt_dev_ptr = argptr + src_offsets[i];
5437             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5438             if (*target_rt_dev_ptr != 0) {
5439                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5440                                                   tswapal(*target_rt_dev_ptr));
5441                 if (!*host_rt_dev_ptr) {
5442                     unlock_user(argptr, arg, 0);
5443                     return -TARGET_EFAULT;
5444                 }
5445             } else {
5446                 *host_rt_dev_ptr = 0;
5447             }
5448             field_types++;
5449             continue;
5450         }
5451         field_types = thunk_convert(buf_temp + dst_offsets[i],
5452                                     argptr + src_offsets[i],
5453                                     field_types, THUNK_HOST);
5454     }
5455     unlock_user(argptr, arg, 0);
5456 
5457     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5458 
5459     assert(host_rt_dev_ptr != NULL);
5460     assert(target_rt_dev_ptr != NULL);
5461     if (*host_rt_dev_ptr != 0) {
5462         unlock_user((void *)*host_rt_dev_ptr,
5463                     *target_rt_dev_ptr, 0);
5464     }
5465     return ret;
5466 }
5467 
5468 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5469                                      int fd, int cmd, abi_long arg)
5470 {
5471     int sig = target_to_host_signal(arg);
5472     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5473 }
5474 
5475 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5476                                     int fd, int cmd, abi_long arg)
5477 {
5478     struct timeval tv;
5479     abi_long ret;
5480 
5481     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5482     if (is_error(ret)) {
5483         return ret;
5484     }
5485 
5486     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5487         if (copy_to_user_timeval(arg, &tv)) {
5488             return -TARGET_EFAULT;
5489         }
5490     } else {
5491         if (copy_to_user_timeval64(arg, &tv)) {
5492             return -TARGET_EFAULT;
5493         }
5494     }
5495 
5496     return ret;
5497 }
5498 
5499 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5500                                       int fd, int cmd, abi_long arg)
5501 {
5502     struct timespec ts;
5503     abi_long ret;
5504 
5505     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5506     if (is_error(ret)) {
5507         return ret;
5508     }
5509 
5510     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5511         if (host_to_target_timespec(arg, &ts)) {
5512             return -TARGET_EFAULT;
5513         }
5514     } else{
5515         if (host_to_target_timespec64(arg, &ts)) {
5516             return -TARGET_EFAULT;
5517         }
5518     }
5519 
5520     return ret;
5521 }
5522 
5523 #ifdef TIOCGPTPEER
5524 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5525                                      int fd, int cmd, abi_long arg)
5526 {
5527     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5528     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5529 }
5530 #endif
5531 
5532 #ifdef HAVE_DRM_H
5533 
5534 static void unlock_drm_version(struct drm_version *host_ver,
5535                                struct target_drm_version *target_ver,
5536                                bool copy)
5537 {
5538     unlock_user(host_ver->name, target_ver->name,
5539                                 copy ? host_ver->name_len : 0);
5540     unlock_user(host_ver->date, target_ver->date,
5541                                 copy ? host_ver->date_len : 0);
5542     unlock_user(host_ver->desc, target_ver->desc,
5543                                 copy ? host_ver->desc_len : 0);
5544 }
5545 
5546 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5547                                           struct target_drm_version *target_ver)
5548 {
5549     memset(host_ver, 0, sizeof(*host_ver));
5550 
5551     __get_user(host_ver->name_len, &target_ver->name_len);
5552     if (host_ver->name_len) {
5553         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5554                                    target_ver->name_len, 0);
5555         if (!host_ver->name) {
5556             return -EFAULT;
5557         }
5558     }
5559 
5560     __get_user(host_ver->date_len, &target_ver->date_len);
5561     if (host_ver->date_len) {
5562         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5563                                    target_ver->date_len, 0);
5564         if (!host_ver->date) {
5565             goto err;
5566         }
5567     }
5568 
5569     __get_user(host_ver->desc_len, &target_ver->desc_len);
5570     if (host_ver->desc_len) {
5571         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5572                                    target_ver->desc_len, 0);
5573         if (!host_ver->desc) {
5574             goto err;
5575         }
5576     }
5577 
5578     return 0;
5579 err:
5580     unlock_drm_version(host_ver, target_ver, false);
5581     return -EFAULT;
5582 }
5583 
5584 static inline void host_to_target_drmversion(
5585                                           struct target_drm_version *target_ver,
5586                                           struct drm_version *host_ver)
5587 {
5588     __put_user(host_ver->version_major, &target_ver->version_major);
5589     __put_user(host_ver->version_minor, &target_ver->version_minor);
5590     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5591     __put_user(host_ver->name_len, &target_ver->name_len);
5592     __put_user(host_ver->date_len, &target_ver->date_len);
5593     __put_user(host_ver->desc_len, &target_ver->desc_len);
5594     unlock_drm_version(host_ver, target_ver, true);
5595 }
5596 
5597 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5598                              int fd, int cmd, abi_long arg)
5599 {
5600     struct drm_version *ver;
5601     struct target_drm_version *target_ver;
5602     abi_long ret;
5603 
5604     switch (ie->host_cmd) {
5605     case DRM_IOCTL_VERSION:
5606         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5607             return -TARGET_EFAULT;
5608         }
5609         ver = (struct drm_version *)buf_temp;
5610         ret = target_to_host_drmversion(ver, target_ver);
5611         if (!is_error(ret)) {
5612             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5613             if (is_error(ret)) {
5614                 unlock_drm_version(ver, target_ver, false);
5615             } else {
5616                 host_to_target_drmversion(target_ver, ver);
5617             }
5618         }
5619         unlock_user_struct(target_ver, arg, 0);
5620         return ret;
5621     }
5622     return -TARGET_ENOSYS;
5623 }
5624 
5625 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5626                                            struct drm_i915_getparam *gparam,
5627                                            int fd, abi_long arg)
5628 {
5629     abi_long ret;
5630     int value;
5631     struct target_drm_i915_getparam *target_gparam;
5632 
5633     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5634         return -TARGET_EFAULT;
5635     }
5636 
5637     __get_user(gparam->param, &target_gparam->param);
5638     gparam->value = &value;
5639     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5640     put_user_s32(value, target_gparam->value);
5641 
5642     unlock_user_struct(target_gparam, arg, 0);
5643     return ret;
5644 }
5645 
5646 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5647                                   int fd, int cmd, abi_long arg)
5648 {
5649     switch (ie->host_cmd) {
5650     case DRM_IOCTL_I915_GETPARAM:
5651         return do_ioctl_drm_i915_getparam(ie,
5652                                           (struct drm_i915_getparam *)buf_temp,
5653                                           fd, arg);
5654     default:
5655         return -TARGET_ENOSYS;
5656     }
5657 }
5658 
5659 #endif
5660 
5661 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5662                                         int fd, int cmd, abi_long arg)
5663 {
5664     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5665     struct tun_filter *target_filter;
5666     char *target_addr;
5667 
5668     assert(ie->access == IOC_W);
5669 
5670     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5671     if (!target_filter) {
5672         return -TARGET_EFAULT;
5673     }
5674     filter->flags = tswap16(target_filter->flags);
5675     filter->count = tswap16(target_filter->count);
5676     unlock_user(target_filter, arg, 0);
5677 
5678     if (filter->count) {
5679         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5680             MAX_STRUCT_SIZE) {
5681             return -TARGET_EFAULT;
5682         }
5683 
5684         target_addr = lock_user(VERIFY_READ,
5685                                 arg + offsetof(struct tun_filter, addr),
5686                                 filter->count * ETH_ALEN, 1);
5687         if (!target_addr) {
5688             return -TARGET_EFAULT;
5689         }
5690         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5691         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5692     }
5693 
5694     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5695 }
5696 
5697 IOCTLEntry ioctl_entries[] = {
5698 #define IOCTL(cmd, access, ...) \
5699     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5700 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5701     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5702 #define IOCTL_IGNORE(cmd) \
5703     { TARGET_ ## cmd, 0, #cmd },
5704 #include "ioctls.h"
5705     { 0, 0, },
5706 };
5707 
5708 /* ??? Implement proper locking for ioctls.  */
5709 /* do_ioctl() Must return target values and target errnos. */
5710 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5711 {
5712     const IOCTLEntry *ie;
5713     const argtype *arg_type;
5714     abi_long ret;
5715     uint8_t buf_temp[MAX_STRUCT_SIZE];
5716     int target_size;
5717     void *argptr;
5718 
5719     ie = ioctl_entries;
5720     for(;;) {
5721         if (ie->target_cmd == 0) {
5722             qemu_log_mask(
5723                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5724             return -TARGET_ENOTTY;
5725         }
5726         if (ie->target_cmd == cmd)
5727             break;
5728         ie++;
5729     }
5730     arg_type = ie->arg_type;
5731     if (ie->do_ioctl) {
5732         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5733     } else if (!ie->host_cmd) {
5734         /* Some architectures define BSD ioctls in their headers
5735            that are not implemented in Linux.  */
5736         return -TARGET_ENOTTY;
5737     }
5738 
5739     switch(arg_type[0]) {
5740     case TYPE_NULL:
5741         /* no argument */
5742         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5743         break;
5744     case TYPE_PTRVOID:
5745     case TYPE_INT:
5746     case TYPE_LONG:
5747     case TYPE_ULONG:
5748         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5749         break;
5750     case TYPE_PTR:
5751         arg_type++;
5752         target_size = thunk_type_size(arg_type, 0);
5753         switch(ie->access) {
5754         case IOC_R:
5755             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5756             if (!is_error(ret)) {
5757                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5758                 if (!argptr)
5759                     return -TARGET_EFAULT;
5760                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5761                 unlock_user(argptr, arg, target_size);
5762             }
5763             break;
5764         case IOC_W:
5765             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5766             if (!argptr)
5767                 return -TARGET_EFAULT;
5768             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5769             unlock_user(argptr, arg, 0);
5770             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5771             break;
5772         default:
5773         case IOC_RW:
5774             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5775             if (!argptr)
5776                 return -TARGET_EFAULT;
5777             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5778             unlock_user(argptr, arg, 0);
5779             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5780             if (!is_error(ret)) {
5781                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5782                 if (!argptr)
5783                     return -TARGET_EFAULT;
5784                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5785                 unlock_user(argptr, arg, target_size);
5786             }
5787             break;
5788         }
5789         break;
5790     default:
5791         qemu_log_mask(LOG_UNIMP,
5792                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5793                       (long)cmd, arg_type[0]);
5794         ret = -TARGET_ENOTTY;
5795         break;
5796     }
5797     return ret;
5798 }
5799 
5800 static const bitmask_transtbl iflag_tbl[] = {
5801         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5802         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5803         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5804         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5805         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5806         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5807         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5808         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5809         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5810         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5811         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5812         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5813         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5814         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5815         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5816         { 0, 0, 0, 0 }
5817 };
5818 
5819 static const bitmask_transtbl oflag_tbl[] = {
5820 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5821 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5822 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5823 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5824 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5825 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5826 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5827 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5828 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5829 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5830 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5831 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5832 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5833 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5834 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5835 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5836 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5837 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5838 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5839 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5840 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5841 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5842 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5843 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5844 	{ 0, 0, 0, 0 }
5845 };
5846 
5847 static const bitmask_transtbl cflag_tbl[] = {
5848 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5849 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5850 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5851 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5852 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5853 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5854 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5855 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5856 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5857 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5858 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5859 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5860 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5861 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5862 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5863 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5864 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5865 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5866 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5867 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5868 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5869 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5870 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5871 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5872 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5873 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5874 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5875 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5876 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5877 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5878 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5879 	{ 0, 0, 0, 0 }
5880 };
5881 
5882 static const bitmask_transtbl lflag_tbl[] = {
5883   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5884   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5885   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5886   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5887   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5888   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5889   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5890   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5891   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5892   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5893   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5894   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5895   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5896   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5897   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5898   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5899   { 0, 0, 0, 0 }
5900 };
5901 
5902 static void target_to_host_termios (void *dst, const void *src)
5903 {
5904     struct host_termios *host = dst;
5905     const struct target_termios *target = src;
5906 
5907     host->c_iflag =
5908         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5909     host->c_oflag =
5910         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5911     host->c_cflag =
5912         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5913     host->c_lflag =
5914         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5915     host->c_line = target->c_line;
5916 
5917     memset(host->c_cc, 0, sizeof(host->c_cc));
5918     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5919     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5920     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5921     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5922     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5923     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5924     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5925     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5926     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5927     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5928     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5929     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5930     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5931     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5932     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5933     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5934     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5935 }
5936 
5937 static void host_to_target_termios (void *dst, const void *src)
5938 {
5939     struct target_termios *target = dst;
5940     const struct host_termios *host = src;
5941 
5942     target->c_iflag =
5943         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5944     target->c_oflag =
5945         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5946     target->c_cflag =
5947         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5948     target->c_lflag =
5949         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5950     target->c_line = host->c_line;
5951 
5952     memset(target->c_cc, 0, sizeof(target->c_cc));
5953     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5954     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5955     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5956     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5957     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5958     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5959     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5960     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5961     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5962     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5963     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5964     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5965     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5966     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5967     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5968     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5969     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
5970 }
5971 
5972 static const StructEntry struct_termios_def = {
5973     .convert = { host_to_target_termios, target_to_host_termios },
5974     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
5975     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
5976     .print = print_termios,
5977 };
5978 
5979 /* If the host does not provide these bits, they may be safely discarded. */
5980 #ifndef MAP_SYNC
5981 #define MAP_SYNC 0
5982 #endif
5983 #ifndef MAP_UNINITIALIZED
5984 #define MAP_UNINITIALIZED 0
5985 #endif
5986 
5987 static const bitmask_transtbl mmap_flags_tbl[] = {
5988     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
5989     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
5990       MAP_ANONYMOUS, MAP_ANONYMOUS },
5991     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
5992       MAP_GROWSDOWN, MAP_GROWSDOWN },
5993     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
5994       MAP_DENYWRITE, MAP_DENYWRITE },
5995     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
5996       MAP_EXECUTABLE, MAP_EXECUTABLE },
5997     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
5998     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
5999       MAP_NORESERVE, MAP_NORESERVE },
6000     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6001     /* MAP_STACK had been ignored by the kernel for quite some time.
6002        Recognize it for the target insofar as we do not want to pass
6003        it through to the host.  */
6004     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6005     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
6006     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
6007     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
6008       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
6009     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
6010       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
6011     { 0, 0, 0, 0 }
6012 };
6013 
6014 /*
6015  * Arrange for legacy / undefined architecture specific flags to be
6016  * ignored by mmap handling code.
6017  */
6018 #ifndef TARGET_MAP_32BIT
6019 #define TARGET_MAP_32BIT 0
6020 #endif
6021 #ifndef TARGET_MAP_HUGE_2MB
6022 #define TARGET_MAP_HUGE_2MB 0
6023 #endif
6024 #ifndef TARGET_MAP_HUGE_1GB
6025 #define TARGET_MAP_HUGE_1GB 0
6026 #endif
6027 
6028 static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
6029                         int target_flags, int fd, off_t offset)
6030 {
6031     /*
6032      * The historical set of flags that all mmap types implicitly support.
6033      */
6034     enum {
6035         TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
6036                                | TARGET_MAP_PRIVATE
6037                                | TARGET_MAP_FIXED
6038                                | TARGET_MAP_ANONYMOUS
6039                                | TARGET_MAP_DENYWRITE
6040                                | TARGET_MAP_EXECUTABLE
6041                                | TARGET_MAP_UNINITIALIZED
6042                                | TARGET_MAP_GROWSDOWN
6043                                | TARGET_MAP_LOCKED
6044                                | TARGET_MAP_NORESERVE
6045                                | TARGET_MAP_POPULATE
6046                                | TARGET_MAP_NONBLOCK
6047                                | TARGET_MAP_STACK
6048                                | TARGET_MAP_HUGETLB
6049                                | TARGET_MAP_32BIT
6050                                | TARGET_MAP_HUGE_2MB
6051                                | TARGET_MAP_HUGE_1GB
6052     };
6053     int host_flags;
6054 
6055     switch (target_flags & TARGET_MAP_TYPE) {
6056     case TARGET_MAP_PRIVATE:
6057         host_flags = MAP_PRIVATE;
6058         break;
6059     case TARGET_MAP_SHARED:
6060         host_flags = MAP_SHARED;
6061         break;
6062     case TARGET_MAP_SHARED_VALIDATE:
6063         /*
6064          * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
6065          * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
6066          */
6067         if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
6068             return -TARGET_EOPNOTSUPP;
6069         }
6070         host_flags = MAP_SHARED_VALIDATE;
6071         if (target_flags & TARGET_MAP_SYNC) {
6072             host_flags |= MAP_SYNC;
6073         }
6074         break;
6075     default:
6076         return -TARGET_EINVAL;
6077     }
6078     host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
6079 
6080     return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
6081 }
6082 
6083 /*
6084  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6085  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6086  */
6087 #if defined(TARGET_I386)
6088 
6089 /* NOTE: there is really one LDT for all the threads */
6090 static uint8_t *ldt_table;
6091 
6092 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6093 {
6094     int size;
6095     void *p;
6096 
6097     if (!ldt_table)
6098         return 0;
6099     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6100     if (size > bytecount)
6101         size = bytecount;
6102     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6103     if (!p)
6104         return -TARGET_EFAULT;
6105     /* ??? Should this by byteswapped?  */
6106     memcpy(p, ldt_table, size);
6107     unlock_user(p, ptr, size);
6108     return size;
6109 }
6110 
6111 /* XXX: add locking support */
6112 static abi_long write_ldt(CPUX86State *env,
6113                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6114 {
6115     struct target_modify_ldt_ldt_s ldt_info;
6116     struct target_modify_ldt_ldt_s *target_ldt_info;
6117     int seg_32bit, contents, read_exec_only, limit_in_pages;
6118     int seg_not_present, useable, lm;
6119     uint32_t *lp, entry_1, entry_2;
6120 
6121     if (bytecount != sizeof(ldt_info))
6122         return -TARGET_EINVAL;
6123     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6124         return -TARGET_EFAULT;
6125     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6126     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6127     ldt_info.limit = tswap32(target_ldt_info->limit);
6128     ldt_info.flags = tswap32(target_ldt_info->flags);
6129     unlock_user_struct(target_ldt_info, ptr, 0);
6130 
6131     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6132         return -TARGET_EINVAL;
6133     seg_32bit = ldt_info.flags & 1;
6134     contents = (ldt_info.flags >> 1) & 3;
6135     read_exec_only = (ldt_info.flags >> 3) & 1;
6136     limit_in_pages = (ldt_info.flags >> 4) & 1;
6137     seg_not_present = (ldt_info.flags >> 5) & 1;
6138     useable = (ldt_info.flags >> 6) & 1;
6139 #ifdef TARGET_ABI32
6140     lm = 0;
6141 #else
6142     lm = (ldt_info.flags >> 7) & 1;
6143 #endif
6144     if (contents == 3) {
6145         if (oldmode)
6146             return -TARGET_EINVAL;
6147         if (seg_not_present == 0)
6148             return -TARGET_EINVAL;
6149     }
6150     /* allocate the LDT */
6151     if (!ldt_table) {
6152         env->ldt.base = target_mmap(0,
6153                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6154                                     PROT_READ|PROT_WRITE,
6155                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6156         if (env->ldt.base == -1)
6157             return -TARGET_ENOMEM;
6158         memset(g2h_untagged(env->ldt.base), 0,
6159                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6160         env->ldt.limit = 0xffff;
6161         ldt_table = g2h_untagged(env->ldt.base);
6162     }
6163 
6164     /* NOTE: same code as Linux kernel */
6165     /* Allow LDTs to be cleared by the user. */
6166     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6167         if (oldmode ||
6168             (contents == 0		&&
6169              read_exec_only == 1	&&
6170              seg_32bit == 0		&&
6171              limit_in_pages == 0	&&
6172              seg_not_present == 1	&&
6173              useable == 0 )) {
6174             entry_1 = 0;
6175             entry_2 = 0;
6176             goto install;
6177         }
6178     }
6179 
6180     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6181         (ldt_info.limit & 0x0ffff);
6182     entry_2 = (ldt_info.base_addr & 0xff000000) |
6183         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6184         (ldt_info.limit & 0xf0000) |
6185         ((read_exec_only ^ 1) << 9) |
6186         (contents << 10) |
6187         ((seg_not_present ^ 1) << 15) |
6188         (seg_32bit << 22) |
6189         (limit_in_pages << 23) |
6190         (lm << 21) |
6191         0x7000;
6192     if (!oldmode)
6193         entry_2 |= (useable << 20);
6194 
6195     /* Install the new entry ...  */
6196 install:
6197     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6198     lp[0] = tswap32(entry_1);
6199     lp[1] = tswap32(entry_2);
6200     return 0;
6201 }
6202 
6203 /* specific and weird i386 syscalls */
6204 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6205                               unsigned long bytecount)
6206 {
6207     abi_long ret;
6208 
6209     switch (func) {
6210     case 0:
6211         ret = read_ldt(ptr, bytecount);
6212         break;
6213     case 1:
6214         ret = write_ldt(env, ptr, bytecount, 1);
6215         break;
6216     case 0x11:
6217         ret = write_ldt(env, ptr, bytecount, 0);
6218         break;
6219     default:
6220         ret = -TARGET_ENOSYS;
6221         break;
6222     }
6223     return ret;
6224 }
6225 
6226 #if defined(TARGET_ABI32)
6227 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6228 {
6229     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6230     struct target_modify_ldt_ldt_s ldt_info;
6231     struct target_modify_ldt_ldt_s *target_ldt_info;
6232     int seg_32bit, contents, read_exec_only, limit_in_pages;
6233     int seg_not_present, useable, lm;
6234     uint32_t *lp, entry_1, entry_2;
6235     int i;
6236 
6237     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6238     if (!target_ldt_info)
6239         return -TARGET_EFAULT;
6240     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6241     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6242     ldt_info.limit = tswap32(target_ldt_info->limit);
6243     ldt_info.flags = tswap32(target_ldt_info->flags);
6244     if (ldt_info.entry_number == -1) {
6245         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6246             if (gdt_table[i] == 0) {
6247                 ldt_info.entry_number = i;
6248                 target_ldt_info->entry_number = tswap32(i);
6249                 break;
6250             }
6251         }
6252     }
6253     unlock_user_struct(target_ldt_info, ptr, 1);
6254 
6255     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6256         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6257            return -TARGET_EINVAL;
6258     seg_32bit = ldt_info.flags & 1;
6259     contents = (ldt_info.flags >> 1) & 3;
6260     read_exec_only = (ldt_info.flags >> 3) & 1;
6261     limit_in_pages = (ldt_info.flags >> 4) & 1;
6262     seg_not_present = (ldt_info.flags >> 5) & 1;
6263     useable = (ldt_info.flags >> 6) & 1;
6264 #ifdef TARGET_ABI32
6265     lm = 0;
6266 #else
6267     lm = (ldt_info.flags >> 7) & 1;
6268 #endif
6269 
6270     if (contents == 3) {
6271         if (seg_not_present == 0)
6272             return -TARGET_EINVAL;
6273     }
6274 
6275     /* NOTE: same code as Linux kernel */
6276     /* Allow LDTs to be cleared by the user. */
6277     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6278         if ((contents == 0             &&
6279              read_exec_only == 1       &&
6280              seg_32bit == 0            &&
6281              limit_in_pages == 0       &&
6282              seg_not_present == 1      &&
6283              useable == 0 )) {
6284             entry_1 = 0;
6285             entry_2 = 0;
6286             goto install;
6287         }
6288     }
6289 
6290     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6291         (ldt_info.limit & 0x0ffff);
6292     entry_2 = (ldt_info.base_addr & 0xff000000) |
6293         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6294         (ldt_info.limit & 0xf0000) |
6295         ((read_exec_only ^ 1) << 9) |
6296         (contents << 10) |
6297         ((seg_not_present ^ 1) << 15) |
6298         (seg_32bit << 22) |
6299         (limit_in_pages << 23) |
6300         (useable << 20) |
6301         (lm << 21) |
6302         0x7000;
6303 
6304     /* Install the new entry ...  */
6305 install:
6306     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6307     lp[0] = tswap32(entry_1);
6308     lp[1] = tswap32(entry_2);
6309     return 0;
6310 }
6311 
6312 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6313 {
6314     struct target_modify_ldt_ldt_s *target_ldt_info;
6315     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6316     uint32_t base_addr, limit, flags;
6317     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6318     int seg_not_present, useable, lm;
6319     uint32_t *lp, entry_1, entry_2;
6320 
6321     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6322     if (!target_ldt_info)
6323         return -TARGET_EFAULT;
6324     idx = tswap32(target_ldt_info->entry_number);
6325     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6326         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6327         unlock_user_struct(target_ldt_info, ptr, 1);
6328         return -TARGET_EINVAL;
6329     }
6330     lp = (uint32_t *)(gdt_table + idx);
6331     entry_1 = tswap32(lp[0]);
6332     entry_2 = tswap32(lp[1]);
6333 
6334     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6335     contents = (entry_2 >> 10) & 3;
6336     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6337     seg_32bit = (entry_2 >> 22) & 1;
6338     limit_in_pages = (entry_2 >> 23) & 1;
6339     useable = (entry_2 >> 20) & 1;
6340 #ifdef TARGET_ABI32
6341     lm = 0;
6342 #else
6343     lm = (entry_2 >> 21) & 1;
6344 #endif
6345     flags = (seg_32bit << 0) | (contents << 1) |
6346         (read_exec_only << 3) | (limit_in_pages << 4) |
6347         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6348     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6349     base_addr = (entry_1 >> 16) |
6350         (entry_2 & 0xff000000) |
6351         ((entry_2 & 0xff) << 16);
6352     target_ldt_info->base_addr = tswapal(base_addr);
6353     target_ldt_info->limit = tswap32(limit);
6354     target_ldt_info->flags = tswap32(flags);
6355     unlock_user_struct(target_ldt_info, ptr, 1);
6356     return 0;
6357 }
6358 
6359 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6360 {
6361     return -TARGET_ENOSYS;
6362 }
6363 #else
6364 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6365 {
6366     abi_long ret = 0;
6367     abi_ulong val;
6368     int idx;
6369 
6370     switch(code) {
6371     case TARGET_ARCH_SET_GS:
6372     case TARGET_ARCH_SET_FS:
6373         if (code == TARGET_ARCH_SET_GS)
6374             idx = R_GS;
6375         else
6376             idx = R_FS;
6377         cpu_x86_load_seg(env, idx, 0);
6378         env->segs[idx].base = addr;
6379         break;
6380     case TARGET_ARCH_GET_GS:
6381     case TARGET_ARCH_GET_FS:
6382         if (code == TARGET_ARCH_GET_GS)
6383             idx = R_GS;
6384         else
6385             idx = R_FS;
6386         val = env->segs[idx].base;
6387         if (put_user(val, addr, abi_ulong))
6388             ret = -TARGET_EFAULT;
6389         break;
6390     default:
6391         ret = -TARGET_EINVAL;
6392         break;
6393     }
6394     return ret;
6395 }
6396 #endif /* defined(TARGET_ABI32 */
6397 #endif /* defined(TARGET_I386) */
6398 
6399 /*
6400  * These constants are generic.  Supply any that are missing from the host.
6401  */
6402 #ifndef PR_SET_NAME
6403 # define PR_SET_NAME    15
6404 # define PR_GET_NAME    16
6405 #endif
6406 #ifndef PR_SET_FP_MODE
6407 # define PR_SET_FP_MODE 45
6408 # define PR_GET_FP_MODE 46
6409 # define PR_FP_MODE_FR   (1 << 0)
6410 # define PR_FP_MODE_FRE  (1 << 1)
6411 #endif
6412 #ifndef PR_SVE_SET_VL
6413 # define PR_SVE_SET_VL  50
6414 # define PR_SVE_GET_VL  51
6415 # define PR_SVE_VL_LEN_MASK  0xffff
6416 # define PR_SVE_VL_INHERIT   (1 << 17)
6417 #endif
6418 #ifndef PR_PAC_RESET_KEYS
6419 # define PR_PAC_RESET_KEYS  54
6420 # define PR_PAC_APIAKEY   (1 << 0)
6421 # define PR_PAC_APIBKEY   (1 << 1)
6422 # define PR_PAC_APDAKEY   (1 << 2)
6423 # define PR_PAC_APDBKEY   (1 << 3)
6424 # define PR_PAC_APGAKEY   (1 << 4)
6425 #endif
6426 #ifndef PR_SET_TAGGED_ADDR_CTRL
6427 # define PR_SET_TAGGED_ADDR_CTRL 55
6428 # define PR_GET_TAGGED_ADDR_CTRL 56
6429 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6430 #endif
6431 #ifndef PR_MTE_TCF_SHIFT
6432 # define PR_MTE_TCF_SHIFT       1
6433 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6434 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6435 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6436 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6437 # define PR_MTE_TAG_SHIFT       3
6438 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6439 #endif
6440 #ifndef PR_SET_IO_FLUSHER
6441 # define PR_SET_IO_FLUSHER 57
6442 # define PR_GET_IO_FLUSHER 58
6443 #endif
6444 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6445 # define PR_SET_SYSCALL_USER_DISPATCH 59
6446 #endif
6447 #ifndef PR_SME_SET_VL
6448 # define PR_SME_SET_VL  63
6449 # define PR_SME_GET_VL  64
6450 # define PR_SME_VL_LEN_MASK  0xffff
6451 # define PR_SME_VL_INHERIT   (1 << 17)
6452 #endif
6453 
6454 #include "target_prctl.h"
6455 
6456 static abi_long do_prctl_inval0(CPUArchState *env)
6457 {
6458     return -TARGET_EINVAL;
6459 }
6460 
6461 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6462 {
6463     return -TARGET_EINVAL;
6464 }
6465 
6466 #ifndef do_prctl_get_fp_mode
6467 #define do_prctl_get_fp_mode do_prctl_inval0
6468 #endif
6469 #ifndef do_prctl_set_fp_mode
6470 #define do_prctl_set_fp_mode do_prctl_inval1
6471 #endif
6472 #ifndef do_prctl_sve_get_vl
6473 #define do_prctl_sve_get_vl do_prctl_inval0
6474 #endif
6475 #ifndef do_prctl_sve_set_vl
6476 #define do_prctl_sve_set_vl do_prctl_inval1
6477 #endif
6478 #ifndef do_prctl_reset_keys
6479 #define do_prctl_reset_keys do_prctl_inval1
6480 #endif
6481 #ifndef do_prctl_set_tagged_addr_ctrl
6482 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6483 #endif
6484 #ifndef do_prctl_get_tagged_addr_ctrl
6485 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6486 #endif
6487 #ifndef do_prctl_get_unalign
6488 #define do_prctl_get_unalign do_prctl_inval1
6489 #endif
6490 #ifndef do_prctl_set_unalign
6491 #define do_prctl_set_unalign do_prctl_inval1
6492 #endif
6493 #ifndef do_prctl_sme_get_vl
6494 #define do_prctl_sme_get_vl do_prctl_inval0
6495 #endif
6496 #ifndef do_prctl_sme_set_vl
6497 #define do_prctl_sme_set_vl do_prctl_inval1
6498 #endif
6499 
6500 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6501                          abi_long arg3, abi_long arg4, abi_long arg5)
6502 {
6503     abi_long ret;
6504 
6505     switch (option) {
6506     case PR_GET_PDEATHSIG:
6507         {
6508             int deathsig;
6509             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6510                                   arg3, arg4, arg5));
6511             if (!is_error(ret) &&
6512                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6513                 return -TARGET_EFAULT;
6514             }
6515             return ret;
6516         }
6517     case PR_SET_PDEATHSIG:
6518         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6519                                arg3, arg4, arg5));
6520     case PR_GET_NAME:
6521         {
6522             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6523             if (!name) {
6524                 return -TARGET_EFAULT;
6525             }
6526             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6527                                   arg3, arg4, arg5));
6528             unlock_user(name, arg2, 16);
6529             return ret;
6530         }
6531     case PR_SET_NAME:
6532         {
6533             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6534             if (!name) {
6535                 return -TARGET_EFAULT;
6536             }
6537             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6538                                   arg3, arg4, arg5));
6539             unlock_user(name, arg2, 0);
6540             return ret;
6541         }
6542     case PR_GET_FP_MODE:
6543         return do_prctl_get_fp_mode(env);
6544     case PR_SET_FP_MODE:
6545         return do_prctl_set_fp_mode(env, arg2);
6546     case PR_SVE_GET_VL:
6547         return do_prctl_sve_get_vl(env);
6548     case PR_SVE_SET_VL:
6549         return do_prctl_sve_set_vl(env, arg2);
6550     case PR_SME_GET_VL:
6551         return do_prctl_sme_get_vl(env);
6552     case PR_SME_SET_VL:
6553         return do_prctl_sme_set_vl(env, arg2);
6554     case PR_PAC_RESET_KEYS:
6555         if (arg3 || arg4 || arg5) {
6556             return -TARGET_EINVAL;
6557         }
6558         return do_prctl_reset_keys(env, arg2);
6559     case PR_SET_TAGGED_ADDR_CTRL:
6560         if (arg3 || arg4 || arg5) {
6561             return -TARGET_EINVAL;
6562         }
6563         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6564     case PR_GET_TAGGED_ADDR_CTRL:
6565         if (arg2 || arg3 || arg4 || arg5) {
6566             return -TARGET_EINVAL;
6567         }
6568         return do_prctl_get_tagged_addr_ctrl(env);
6569 
6570     case PR_GET_UNALIGN:
6571         return do_prctl_get_unalign(env, arg2);
6572     case PR_SET_UNALIGN:
6573         return do_prctl_set_unalign(env, arg2);
6574 
6575     case PR_CAP_AMBIENT:
6576     case PR_CAPBSET_READ:
6577     case PR_CAPBSET_DROP:
6578     case PR_GET_DUMPABLE:
6579     case PR_SET_DUMPABLE:
6580     case PR_GET_KEEPCAPS:
6581     case PR_SET_KEEPCAPS:
6582     case PR_GET_SECUREBITS:
6583     case PR_SET_SECUREBITS:
6584     case PR_GET_TIMING:
6585     case PR_SET_TIMING:
6586     case PR_GET_TIMERSLACK:
6587     case PR_SET_TIMERSLACK:
6588     case PR_MCE_KILL:
6589     case PR_MCE_KILL_GET:
6590     case PR_GET_NO_NEW_PRIVS:
6591     case PR_SET_NO_NEW_PRIVS:
6592     case PR_GET_IO_FLUSHER:
6593     case PR_SET_IO_FLUSHER:
6594         /* Some prctl options have no pointer arguments and we can pass on. */
6595         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6596 
6597     case PR_GET_CHILD_SUBREAPER:
6598     case PR_SET_CHILD_SUBREAPER:
6599     case PR_GET_SPECULATION_CTRL:
6600     case PR_SET_SPECULATION_CTRL:
6601     case PR_GET_TID_ADDRESS:
6602         /* TODO */
6603         return -TARGET_EINVAL;
6604 
6605     case PR_GET_FPEXC:
6606     case PR_SET_FPEXC:
6607         /* Was used for SPE on PowerPC. */
6608         return -TARGET_EINVAL;
6609 
6610     case PR_GET_ENDIAN:
6611     case PR_SET_ENDIAN:
6612     case PR_GET_FPEMU:
6613     case PR_SET_FPEMU:
6614     case PR_SET_MM:
6615     case PR_GET_SECCOMP:
6616     case PR_SET_SECCOMP:
6617     case PR_SET_SYSCALL_USER_DISPATCH:
6618     case PR_GET_THP_DISABLE:
6619     case PR_SET_THP_DISABLE:
6620     case PR_GET_TSC:
6621     case PR_SET_TSC:
6622         /* Disable to prevent the target disabling stuff we need. */
6623         return -TARGET_EINVAL;
6624 
6625     default:
6626         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6627                       option);
6628         return -TARGET_EINVAL;
6629     }
6630 }
6631 
6632 #define NEW_STACK_SIZE 0x40000
6633 
6634 
6635 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6636 typedef struct {
6637     CPUArchState *env;
6638     pthread_mutex_t mutex;
6639     pthread_cond_t cond;
6640     pthread_t thread;
6641     uint32_t tid;
6642     abi_ulong child_tidptr;
6643     abi_ulong parent_tidptr;
6644     sigset_t sigmask;
6645 } new_thread_info;
6646 
6647 static void *clone_func(void *arg)
6648 {
6649     new_thread_info *info = arg;
6650     CPUArchState *env;
6651     CPUState *cpu;
6652     TaskState *ts;
6653 
6654     rcu_register_thread();
6655     tcg_register_thread();
6656     env = info->env;
6657     cpu = env_cpu(env);
6658     thread_cpu = cpu;
6659     ts = (TaskState *)cpu->opaque;
6660     info->tid = sys_gettid();
6661     task_settid(ts);
6662     if (info->child_tidptr)
6663         put_user_u32(info->tid, info->child_tidptr);
6664     if (info->parent_tidptr)
6665         put_user_u32(info->tid, info->parent_tidptr);
6666     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6667     /* Enable signals.  */
6668     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6669     /* Signal to the parent that we're ready.  */
6670     pthread_mutex_lock(&info->mutex);
6671     pthread_cond_broadcast(&info->cond);
6672     pthread_mutex_unlock(&info->mutex);
6673     /* Wait until the parent has finished initializing the tls state.  */
6674     pthread_mutex_lock(&clone_lock);
6675     pthread_mutex_unlock(&clone_lock);
6676     cpu_loop(env);
6677     /* never exits */
6678     return NULL;
6679 }
6680 
6681 /* do_fork() Must return host values and target errnos (unlike most
6682    do_*() functions). */
6683 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6684                    abi_ulong parent_tidptr, target_ulong newtls,
6685                    abi_ulong child_tidptr)
6686 {
6687     CPUState *cpu = env_cpu(env);
6688     int ret;
6689     TaskState *ts;
6690     CPUState *new_cpu;
6691     CPUArchState *new_env;
6692     sigset_t sigmask;
6693 
6694     flags &= ~CLONE_IGNORED_FLAGS;
6695 
6696     /* Emulate vfork() with fork() */
6697     if (flags & CLONE_VFORK)
6698         flags &= ~(CLONE_VFORK | CLONE_VM);
6699 
6700     if (flags & CLONE_VM) {
6701         TaskState *parent_ts = (TaskState *)cpu->opaque;
6702         new_thread_info info;
6703         pthread_attr_t attr;
6704 
6705         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6706             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6707             return -TARGET_EINVAL;
6708         }
6709 
6710         ts = g_new0(TaskState, 1);
6711         init_task_state(ts);
6712 
6713         /* Grab a mutex so that thread setup appears atomic.  */
6714         pthread_mutex_lock(&clone_lock);
6715 
6716         /*
6717          * If this is our first additional thread, we need to ensure we
6718          * generate code for parallel execution and flush old translations.
6719          * Do this now so that the copy gets CF_PARALLEL too.
6720          */
6721         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6722             cpu->tcg_cflags |= CF_PARALLEL;
6723             tb_flush(cpu);
6724         }
6725 
6726         /* we create a new CPU instance. */
6727         new_env = cpu_copy(env);
6728         /* Init regs that differ from the parent.  */
6729         cpu_clone_regs_child(new_env, newsp, flags);
6730         cpu_clone_regs_parent(env, flags);
6731         new_cpu = env_cpu(new_env);
6732         new_cpu->opaque = ts;
6733         ts->bprm = parent_ts->bprm;
6734         ts->info = parent_ts->info;
6735         ts->signal_mask = parent_ts->signal_mask;
6736 
6737         if (flags & CLONE_CHILD_CLEARTID) {
6738             ts->child_tidptr = child_tidptr;
6739         }
6740 
6741         if (flags & CLONE_SETTLS) {
6742             cpu_set_tls (new_env, newtls);
6743         }
6744 
6745         memset(&info, 0, sizeof(info));
6746         pthread_mutex_init(&info.mutex, NULL);
6747         pthread_mutex_lock(&info.mutex);
6748         pthread_cond_init(&info.cond, NULL);
6749         info.env = new_env;
6750         if (flags & CLONE_CHILD_SETTID) {
6751             info.child_tidptr = child_tidptr;
6752         }
6753         if (flags & CLONE_PARENT_SETTID) {
6754             info.parent_tidptr = parent_tidptr;
6755         }
6756 
6757         ret = pthread_attr_init(&attr);
6758         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6759         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6760         /* It is not safe to deliver signals until the child has finished
6761            initializing, so temporarily block all signals.  */
6762         sigfillset(&sigmask);
6763         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6764         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6765 
6766         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6767         /* TODO: Free new CPU state if thread creation failed.  */
6768 
6769         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6770         pthread_attr_destroy(&attr);
6771         if (ret == 0) {
6772             /* Wait for the child to initialize.  */
6773             pthread_cond_wait(&info.cond, &info.mutex);
6774             ret = info.tid;
6775         } else {
6776             ret = -1;
6777         }
6778         pthread_mutex_unlock(&info.mutex);
6779         pthread_cond_destroy(&info.cond);
6780         pthread_mutex_destroy(&info.mutex);
6781         pthread_mutex_unlock(&clone_lock);
6782     } else {
6783         /* if no CLONE_VM, we consider it is a fork */
6784         if (flags & CLONE_INVALID_FORK_FLAGS) {
6785             return -TARGET_EINVAL;
6786         }
6787 
6788         /* We can't support custom termination signals */
6789         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6790             return -TARGET_EINVAL;
6791         }
6792 
6793 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6794         if (flags & CLONE_PIDFD) {
6795             return -TARGET_EINVAL;
6796         }
6797 #endif
6798 
6799         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6800         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6801             return -TARGET_EINVAL;
6802         }
6803 
6804         if (block_signals()) {
6805             return -QEMU_ERESTARTSYS;
6806         }
6807 
6808         fork_start();
6809         ret = fork();
6810         if (ret == 0) {
6811             /* Child Process.  */
6812             cpu_clone_regs_child(env, newsp, flags);
6813             fork_end(1);
6814             /* There is a race condition here.  The parent process could
6815                theoretically read the TID in the child process before the child
6816                tid is set.  This would require using either ptrace
6817                (not implemented) or having *_tidptr to point at a shared memory
6818                mapping.  We can't repeat the spinlock hack used above because
6819                the child process gets its own copy of the lock.  */
6820             if (flags & CLONE_CHILD_SETTID)
6821                 put_user_u32(sys_gettid(), child_tidptr);
6822             if (flags & CLONE_PARENT_SETTID)
6823                 put_user_u32(sys_gettid(), parent_tidptr);
6824             ts = (TaskState *)cpu->opaque;
6825             if (flags & CLONE_SETTLS)
6826                 cpu_set_tls (env, newtls);
6827             if (flags & CLONE_CHILD_CLEARTID)
6828                 ts->child_tidptr = child_tidptr;
6829         } else {
6830             cpu_clone_regs_parent(env, flags);
6831             if (flags & CLONE_PIDFD) {
6832                 int pid_fd = 0;
6833 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6834                 int pid_child = ret;
6835                 pid_fd = pidfd_open(pid_child, 0);
6836                 if (pid_fd >= 0) {
6837                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6838                                                | FD_CLOEXEC);
6839                 } else {
6840                         pid_fd = 0;
6841                 }
6842 #endif
6843                 put_user_u32(pid_fd, parent_tidptr);
6844                 }
6845             fork_end(0);
6846         }
6847         g_assert(!cpu_in_exclusive_context(cpu));
6848     }
6849     return ret;
6850 }
6851 
6852 /* warning : doesn't handle linux specific flags... */
6853 static int target_to_host_fcntl_cmd(int cmd)
6854 {
6855     int ret;
6856 
6857     switch(cmd) {
6858     case TARGET_F_DUPFD:
6859     case TARGET_F_GETFD:
6860     case TARGET_F_SETFD:
6861     case TARGET_F_GETFL:
6862     case TARGET_F_SETFL:
6863     case TARGET_F_OFD_GETLK:
6864     case TARGET_F_OFD_SETLK:
6865     case TARGET_F_OFD_SETLKW:
6866         ret = cmd;
6867         break;
6868     case TARGET_F_GETLK:
6869         ret = F_GETLK64;
6870         break;
6871     case TARGET_F_SETLK:
6872         ret = F_SETLK64;
6873         break;
6874     case TARGET_F_SETLKW:
6875         ret = F_SETLKW64;
6876         break;
6877     case TARGET_F_GETOWN:
6878         ret = F_GETOWN;
6879         break;
6880     case TARGET_F_SETOWN:
6881         ret = F_SETOWN;
6882         break;
6883     case TARGET_F_GETSIG:
6884         ret = F_GETSIG;
6885         break;
6886     case TARGET_F_SETSIG:
6887         ret = F_SETSIG;
6888         break;
6889 #if TARGET_ABI_BITS == 32
6890     case TARGET_F_GETLK64:
6891         ret = F_GETLK64;
6892         break;
6893     case TARGET_F_SETLK64:
6894         ret = F_SETLK64;
6895         break;
6896     case TARGET_F_SETLKW64:
6897         ret = F_SETLKW64;
6898         break;
6899 #endif
6900     case TARGET_F_SETLEASE:
6901         ret = F_SETLEASE;
6902         break;
6903     case TARGET_F_GETLEASE:
6904         ret = F_GETLEASE;
6905         break;
6906 #ifdef F_DUPFD_CLOEXEC
6907     case TARGET_F_DUPFD_CLOEXEC:
6908         ret = F_DUPFD_CLOEXEC;
6909         break;
6910 #endif
6911     case TARGET_F_NOTIFY:
6912         ret = F_NOTIFY;
6913         break;
6914 #ifdef F_GETOWN_EX
6915     case TARGET_F_GETOWN_EX:
6916         ret = F_GETOWN_EX;
6917         break;
6918 #endif
6919 #ifdef F_SETOWN_EX
6920     case TARGET_F_SETOWN_EX:
6921         ret = F_SETOWN_EX;
6922         break;
6923 #endif
6924 #ifdef F_SETPIPE_SZ
6925     case TARGET_F_SETPIPE_SZ:
6926         ret = F_SETPIPE_SZ;
6927         break;
6928     case TARGET_F_GETPIPE_SZ:
6929         ret = F_GETPIPE_SZ;
6930         break;
6931 #endif
6932 #ifdef F_ADD_SEALS
6933     case TARGET_F_ADD_SEALS:
6934         ret = F_ADD_SEALS;
6935         break;
6936     case TARGET_F_GET_SEALS:
6937         ret = F_GET_SEALS;
6938         break;
6939 #endif
6940     default:
6941         ret = -TARGET_EINVAL;
6942         break;
6943     }
6944 
6945 #if defined(__powerpc64__)
6946     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6947      * is not supported by kernel. The glibc fcntl call actually adjusts
6948      * them to 5, 6 and 7 before making the syscall(). Since we make the
6949      * syscall directly, adjust to what is supported by the kernel.
6950      */
6951     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6952         ret -= F_GETLK64 - 5;
6953     }
6954 #endif
6955 
6956     return ret;
6957 }
6958 
6959 #define FLOCK_TRANSTBL \
6960     switch (type) { \
6961     TRANSTBL_CONVERT(F_RDLCK); \
6962     TRANSTBL_CONVERT(F_WRLCK); \
6963     TRANSTBL_CONVERT(F_UNLCK); \
6964     }
6965 
6966 static int target_to_host_flock(int type)
6967 {
6968 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6969     FLOCK_TRANSTBL
6970 #undef  TRANSTBL_CONVERT
6971     return -TARGET_EINVAL;
6972 }
6973 
6974 static int host_to_target_flock(int type)
6975 {
6976 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6977     FLOCK_TRANSTBL
6978 #undef  TRANSTBL_CONVERT
6979     /* if we don't know how to convert the value coming
6980      * from the host we copy to the target field as-is
6981      */
6982     return type;
6983 }
6984 
6985 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6986                                             abi_ulong target_flock_addr)
6987 {
6988     struct target_flock *target_fl;
6989     int l_type;
6990 
6991     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6992         return -TARGET_EFAULT;
6993     }
6994 
6995     __get_user(l_type, &target_fl->l_type);
6996     l_type = target_to_host_flock(l_type);
6997     if (l_type < 0) {
6998         return l_type;
6999     }
7000     fl->l_type = l_type;
7001     __get_user(fl->l_whence, &target_fl->l_whence);
7002     __get_user(fl->l_start, &target_fl->l_start);
7003     __get_user(fl->l_len, &target_fl->l_len);
7004     __get_user(fl->l_pid, &target_fl->l_pid);
7005     unlock_user_struct(target_fl, target_flock_addr, 0);
7006     return 0;
7007 }
7008 
7009 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
7010                                           const struct flock64 *fl)
7011 {
7012     struct target_flock *target_fl;
7013     short l_type;
7014 
7015     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7016         return -TARGET_EFAULT;
7017     }
7018 
7019     l_type = host_to_target_flock(fl->l_type);
7020     __put_user(l_type, &target_fl->l_type);
7021     __put_user(fl->l_whence, &target_fl->l_whence);
7022     __put_user(fl->l_start, &target_fl->l_start);
7023     __put_user(fl->l_len, &target_fl->l_len);
7024     __put_user(fl->l_pid, &target_fl->l_pid);
7025     unlock_user_struct(target_fl, target_flock_addr, 1);
7026     return 0;
7027 }
7028 
7029 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
7030 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
7031 
7032 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
7033 struct target_oabi_flock64 {
7034     abi_short l_type;
7035     abi_short l_whence;
7036     abi_llong l_start;
7037     abi_llong l_len;
7038     abi_int   l_pid;
7039 } QEMU_PACKED;
7040 
7041 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
7042                                                    abi_ulong target_flock_addr)
7043 {
7044     struct target_oabi_flock64 *target_fl;
7045     int l_type;
7046 
7047     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7048         return -TARGET_EFAULT;
7049     }
7050 
7051     __get_user(l_type, &target_fl->l_type);
7052     l_type = target_to_host_flock(l_type);
7053     if (l_type < 0) {
7054         return l_type;
7055     }
7056     fl->l_type = l_type;
7057     __get_user(fl->l_whence, &target_fl->l_whence);
7058     __get_user(fl->l_start, &target_fl->l_start);
7059     __get_user(fl->l_len, &target_fl->l_len);
7060     __get_user(fl->l_pid, &target_fl->l_pid);
7061     unlock_user_struct(target_fl, target_flock_addr, 0);
7062     return 0;
7063 }
7064 
7065 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7066                                                  const struct flock64 *fl)
7067 {
7068     struct target_oabi_flock64 *target_fl;
7069     short l_type;
7070 
7071     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7072         return -TARGET_EFAULT;
7073     }
7074 
7075     l_type = host_to_target_flock(fl->l_type);
7076     __put_user(l_type, &target_fl->l_type);
7077     __put_user(fl->l_whence, &target_fl->l_whence);
7078     __put_user(fl->l_start, &target_fl->l_start);
7079     __put_user(fl->l_len, &target_fl->l_len);
7080     __put_user(fl->l_pid, &target_fl->l_pid);
7081     unlock_user_struct(target_fl, target_flock_addr, 1);
7082     return 0;
7083 }
7084 #endif
7085 
7086 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7087                                               abi_ulong target_flock_addr)
7088 {
7089     struct target_flock64 *target_fl;
7090     int l_type;
7091 
7092     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7093         return -TARGET_EFAULT;
7094     }
7095 
7096     __get_user(l_type, &target_fl->l_type);
7097     l_type = target_to_host_flock(l_type);
7098     if (l_type < 0) {
7099         return l_type;
7100     }
7101     fl->l_type = l_type;
7102     __get_user(fl->l_whence, &target_fl->l_whence);
7103     __get_user(fl->l_start, &target_fl->l_start);
7104     __get_user(fl->l_len, &target_fl->l_len);
7105     __get_user(fl->l_pid, &target_fl->l_pid);
7106     unlock_user_struct(target_fl, target_flock_addr, 0);
7107     return 0;
7108 }
7109 
7110 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7111                                             const struct flock64 *fl)
7112 {
7113     struct target_flock64 *target_fl;
7114     short l_type;
7115 
7116     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7117         return -TARGET_EFAULT;
7118     }
7119 
7120     l_type = host_to_target_flock(fl->l_type);
7121     __put_user(l_type, &target_fl->l_type);
7122     __put_user(fl->l_whence, &target_fl->l_whence);
7123     __put_user(fl->l_start, &target_fl->l_start);
7124     __put_user(fl->l_len, &target_fl->l_len);
7125     __put_user(fl->l_pid, &target_fl->l_pid);
7126     unlock_user_struct(target_fl, target_flock_addr, 1);
7127     return 0;
7128 }
7129 
7130 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7131 {
7132     struct flock64 fl64;
7133 #ifdef F_GETOWN_EX
7134     struct f_owner_ex fox;
7135     struct target_f_owner_ex *target_fox;
7136 #endif
7137     abi_long ret;
7138     int host_cmd = target_to_host_fcntl_cmd(cmd);
7139 
7140     if (host_cmd == -TARGET_EINVAL)
7141 	    return host_cmd;
7142 
7143     switch(cmd) {
7144     case TARGET_F_GETLK:
7145         ret = copy_from_user_flock(&fl64, arg);
7146         if (ret) {
7147             return ret;
7148         }
7149         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7150         if (ret == 0) {
7151             ret = copy_to_user_flock(arg, &fl64);
7152         }
7153         break;
7154 
7155     case TARGET_F_SETLK:
7156     case TARGET_F_SETLKW:
7157         ret = copy_from_user_flock(&fl64, arg);
7158         if (ret) {
7159             return ret;
7160         }
7161         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7162         break;
7163 
7164     case TARGET_F_GETLK64:
7165     case TARGET_F_OFD_GETLK:
7166         ret = copy_from_user_flock64(&fl64, arg);
7167         if (ret) {
7168             return ret;
7169         }
7170         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7171         if (ret == 0) {
7172             ret = copy_to_user_flock64(arg, &fl64);
7173         }
7174         break;
7175     case TARGET_F_SETLK64:
7176     case TARGET_F_SETLKW64:
7177     case TARGET_F_OFD_SETLK:
7178     case TARGET_F_OFD_SETLKW:
7179         ret = copy_from_user_flock64(&fl64, arg);
7180         if (ret) {
7181             return ret;
7182         }
7183         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7184         break;
7185 
7186     case TARGET_F_GETFL:
7187         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7188         if (ret >= 0) {
7189             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7190             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7191             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7192                 ret |= TARGET_O_LARGEFILE;
7193             }
7194         }
7195         break;
7196 
7197     case TARGET_F_SETFL:
7198         ret = get_errno(safe_fcntl(fd, host_cmd,
7199                                    target_to_host_bitmask(arg,
7200                                                           fcntl_flags_tbl)));
7201         break;
7202 
7203 #ifdef F_GETOWN_EX
7204     case TARGET_F_GETOWN_EX:
7205         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7206         if (ret >= 0) {
7207             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7208                 return -TARGET_EFAULT;
7209             target_fox->type = tswap32(fox.type);
7210             target_fox->pid = tswap32(fox.pid);
7211             unlock_user_struct(target_fox, arg, 1);
7212         }
7213         break;
7214 #endif
7215 
7216 #ifdef F_SETOWN_EX
7217     case TARGET_F_SETOWN_EX:
7218         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7219             return -TARGET_EFAULT;
7220         fox.type = tswap32(target_fox->type);
7221         fox.pid = tswap32(target_fox->pid);
7222         unlock_user_struct(target_fox, arg, 0);
7223         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7224         break;
7225 #endif
7226 
7227     case TARGET_F_SETSIG:
7228         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7229         break;
7230 
7231     case TARGET_F_GETSIG:
7232         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7233         break;
7234 
7235     case TARGET_F_SETOWN:
7236     case TARGET_F_GETOWN:
7237     case TARGET_F_SETLEASE:
7238     case TARGET_F_GETLEASE:
7239     case TARGET_F_SETPIPE_SZ:
7240     case TARGET_F_GETPIPE_SZ:
7241     case TARGET_F_ADD_SEALS:
7242     case TARGET_F_GET_SEALS:
7243         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7244         break;
7245 
7246     default:
7247         ret = get_errno(safe_fcntl(fd, cmd, arg));
7248         break;
7249     }
7250     return ret;
7251 }
7252 
7253 #ifdef USE_UID16
7254 
7255 static inline int high2lowuid(int uid)
7256 {
7257     if (uid > 65535)
7258         return 65534;
7259     else
7260         return uid;
7261 }
7262 
7263 static inline int high2lowgid(int gid)
7264 {
7265     if (gid > 65535)
7266         return 65534;
7267     else
7268         return gid;
7269 }
7270 
7271 static inline int low2highuid(int uid)
7272 {
7273     if ((int16_t)uid == -1)
7274         return -1;
7275     else
7276         return uid;
7277 }
7278 
7279 static inline int low2highgid(int gid)
7280 {
7281     if ((int16_t)gid == -1)
7282         return -1;
7283     else
7284         return gid;
7285 }
7286 static inline int tswapid(int id)
7287 {
7288     return tswap16(id);
7289 }
7290 
7291 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7292 
7293 #else /* !USE_UID16 */
7294 static inline int high2lowuid(int uid)
7295 {
7296     return uid;
7297 }
7298 static inline int high2lowgid(int gid)
7299 {
7300     return gid;
7301 }
7302 static inline int low2highuid(int uid)
7303 {
7304     return uid;
7305 }
7306 static inline int low2highgid(int gid)
7307 {
7308     return gid;
7309 }
7310 static inline int tswapid(int id)
7311 {
7312     return tswap32(id);
7313 }
7314 
7315 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7316 
7317 #endif /* USE_UID16 */
7318 
7319 /* We must do direct syscalls for setting UID/GID, because we want to
7320  * implement the Linux system call semantics of "change only for this thread",
7321  * not the libc/POSIX semantics of "change for all threads in process".
7322  * (See http://ewontfix.com/17/ for more details.)
7323  * We use the 32-bit version of the syscalls if present; if it is not
7324  * then either the host architecture supports 32-bit UIDs natively with
7325  * the standard syscall, or the 16-bit UID is the best we can do.
7326  */
7327 #ifdef __NR_setuid32
7328 #define __NR_sys_setuid __NR_setuid32
7329 #else
7330 #define __NR_sys_setuid __NR_setuid
7331 #endif
7332 #ifdef __NR_setgid32
7333 #define __NR_sys_setgid __NR_setgid32
7334 #else
7335 #define __NR_sys_setgid __NR_setgid
7336 #endif
7337 #ifdef __NR_setresuid32
7338 #define __NR_sys_setresuid __NR_setresuid32
7339 #else
7340 #define __NR_sys_setresuid __NR_setresuid
7341 #endif
7342 #ifdef __NR_setresgid32
7343 #define __NR_sys_setresgid __NR_setresgid32
7344 #else
7345 #define __NR_sys_setresgid __NR_setresgid
7346 #endif
7347 
7348 _syscall1(int, sys_setuid, uid_t, uid)
7349 _syscall1(int, sys_setgid, gid_t, gid)
7350 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7351 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7352 
7353 void syscall_init(void)
7354 {
7355     IOCTLEntry *ie;
7356     const argtype *arg_type;
7357     int size;
7358 
7359     thunk_init(STRUCT_MAX);
7360 
7361 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7362 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7363 #include "syscall_types.h"
7364 #undef STRUCT
7365 #undef STRUCT_SPECIAL
7366 
7367     /* we patch the ioctl size if necessary. We rely on the fact that
7368        no ioctl has all the bits at '1' in the size field */
7369     ie = ioctl_entries;
7370     while (ie->target_cmd != 0) {
7371         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7372             TARGET_IOC_SIZEMASK) {
7373             arg_type = ie->arg_type;
7374             if (arg_type[0] != TYPE_PTR) {
7375                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7376                         ie->target_cmd);
7377                 exit(1);
7378             }
7379             arg_type++;
7380             size = thunk_type_size(arg_type, 0);
7381             ie->target_cmd = (ie->target_cmd &
7382                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7383                 (size << TARGET_IOC_SIZESHIFT);
7384         }
7385 
7386         /* automatic consistency check if same arch */
7387 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7388     (defined(__x86_64__) && defined(TARGET_X86_64))
7389         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7390             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7391                     ie->name, ie->target_cmd, ie->host_cmd);
7392         }
7393 #endif
7394         ie++;
7395     }
7396 }
7397 
7398 #ifdef TARGET_NR_truncate64
7399 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7400                                          abi_long arg2,
7401                                          abi_long arg3,
7402                                          abi_long arg4)
7403 {
7404     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7405         arg2 = arg3;
7406         arg3 = arg4;
7407     }
7408     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7409 }
7410 #endif
7411 
7412 #ifdef TARGET_NR_ftruncate64
7413 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7414                                           abi_long arg2,
7415                                           abi_long arg3,
7416                                           abi_long arg4)
7417 {
7418     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7419         arg2 = arg3;
7420         arg3 = arg4;
7421     }
7422     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7423 }
7424 #endif
7425 
7426 #if defined(TARGET_NR_timer_settime) || \
7427     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7428 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7429                                                  abi_ulong target_addr)
7430 {
7431     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7432                                 offsetof(struct target_itimerspec,
7433                                          it_interval)) ||
7434         target_to_host_timespec(&host_its->it_value, target_addr +
7435                                 offsetof(struct target_itimerspec,
7436                                          it_value))) {
7437         return -TARGET_EFAULT;
7438     }
7439 
7440     return 0;
7441 }
7442 #endif
7443 
7444 #if defined(TARGET_NR_timer_settime64) || \
7445     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7446 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7447                                                    abi_ulong target_addr)
7448 {
7449     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7450                                   offsetof(struct target__kernel_itimerspec,
7451                                            it_interval)) ||
7452         target_to_host_timespec64(&host_its->it_value, target_addr +
7453                                   offsetof(struct target__kernel_itimerspec,
7454                                            it_value))) {
7455         return -TARGET_EFAULT;
7456     }
7457 
7458     return 0;
7459 }
7460 #endif
7461 
7462 #if ((defined(TARGET_NR_timerfd_gettime) || \
7463       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7464       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7465 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7466                                                  struct itimerspec *host_its)
7467 {
7468     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7469                                                        it_interval),
7470                                 &host_its->it_interval) ||
7471         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7472                                                        it_value),
7473                                 &host_its->it_value)) {
7474         return -TARGET_EFAULT;
7475     }
7476     return 0;
7477 }
7478 #endif
7479 
7480 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7481       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7482       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7483 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7484                                                    struct itimerspec *host_its)
7485 {
7486     if (host_to_target_timespec64(target_addr +
7487                                   offsetof(struct target__kernel_itimerspec,
7488                                            it_interval),
7489                                   &host_its->it_interval) ||
7490         host_to_target_timespec64(target_addr +
7491                                   offsetof(struct target__kernel_itimerspec,
7492                                            it_value),
7493                                   &host_its->it_value)) {
7494         return -TARGET_EFAULT;
7495     }
7496     return 0;
7497 }
7498 #endif
7499 
7500 #if defined(TARGET_NR_adjtimex) || \
7501     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7502 static inline abi_long target_to_host_timex(struct timex *host_tx,
7503                                             abi_long target_addr)
7504 {
7505     struct target_timex *target_tx;
7506 
7507     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7508         return -TARGET_EFAULT;
7509     }
7510 
7511     __get_user(host_tx->modes, &target_tx->modes);
7512     __get_user(host_tx->offset, &target_tx->offset);
7513     __get_user(host_tx->freq, &target_tx->freq);
7514     __get_user(host_tx->maxerror, &target_tx->maxerror);
7515     __get_user(host_tx->esterror, &target_tx->esterror);
7516     __get_user(host_tx->status, &target_tx->status);
7517     __get_user(host_tx->constant, &target_tx->constant);
7518     __get_user(host_tx->precision, &target_tx->precision);
7519     __get_user(host_tx->tolerance, &target_tx->tolerance);
7520     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7521     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7522     __get_user(host_tx->tick, &target_tx->tick);
7523     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7524     __get_user(host_tx->jitter, &target_tx->jitter);
7525     __get_user(host_tx->shift, &target_tx->shift);
7526     __get_user(host_tx->stabil, &target_tx->stabil);
7527     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7528     __get_user(host_tx->calcnt, &target_tx->calcnt);
7529     __get_user(host_tx->errcnt, &target_tx->errcnt);
7530     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7531     __get_user(host_tx->tai, &target_tx->tai);
7532 
7533     unlock_user_struct(target_tx, target_addr, 0);
7534     return 0;
7535 }
7536 
7537 static inline abi_long host_to_target_timex(abi_long target_addr,
7538                                             struct timex *host_tx)
7539 {
7540     struct target_timex *target_tx;
7541 
7542     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7543         return -TARGET_EFAULT;
7544     }
7545 
7546     __put_user(host_tx->modes, &target_tx->modes);
7547     __put_user(host_tx->offset, &target_tx->offset);
7548     __put_user(host_tx->freq, &target_tx->freq);
7549     __put_user(host_tx->maxerror, &target_tx->maxerror);
7550     __put_user(host_tx->esterror, &target_tx->esterror);
7551     __put_user(host_tx->status, &target_tx->status);
7552     __put_user(host_tx->constant, &target_tx->constant);
7553     __put_user(host_tx->precision, &target_tx->precision);
7554     __put_user(host_tx->tolerance, &target_tx->tolerance);
7555     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7556     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7557     __put_user(host_tx->tick, &target_tx->tick);
7558     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7559     __put_user(host_tx->jitter, &target_tx->jitter);
7560     __put_user(host_tx->shift, &target_tx->shift);
7561     __put_user(host_tx->stabil, &target_tx->stabil);
7562     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7563     __put_user(host_tx->calcnt, &target_tx->calcnt);
7564     __put_user(host_tx->errcnt, &target_tx->errcnt);
7565     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7566     __put_user(host_tx->tai, &target_tx->tai);
7567 
7568     unlock_user_struct(target_tx, target_addr, 1);
7569     return 0;
7570 }
7571 #endif
7572 
7573 
7574 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7575 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7576                                               abi_long target_addr)
7577 {
7578     struct target__kernel_timex *target_tx;
7579 
7580     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7581                                  offsetof(struct target__kernel_timex,
7582                                           time))) {
7583         return -TARGET_EFAULT;
7584     }
7585 
7586     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7587         return -TARGET_EFAULT;
7588     }
7589 
7590     __get_user(host_tx->modes, &target_tx->modes);
7591     __get_user(host_tx->offset, &target_tx->offset);
7592     __get_user(host_tx->freq, &target_tx->freq);
7593     __get_user(host_tx->maxerror, &target_tx->maxerror);
7594     __get_user(host_tx->esterror, &target_tx->esterror);
7595     __get_user(host_tx->status, &target_tx->status);
7596     __get_user(host_tx->constant, &target_tx->constant);
7597     __get_user(host_tx->precision, &target_tx->precision);
7598     __get_user(host_tx->tolerance, &target_tx->tolerance);
7599     __get_user(host_tx->tick, &target_tx->tick);
7600     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7601     __get_user(host_tx->jitter, &target_tx->jitter);
7602     __get_user(host_tx->shift, &target_tx->shift);
7603     __get_user(host_tx->stabil, &target_tx->stabil);
7604     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7605     __get_user(host_tx->calcnt, &target_tx->calcnt);
7606     __get_user(host_tx->errcnt, &target_tx->errcnt);
7607     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7608     __get_user(host_tx->tai, &target_tx->tai);
7609 
7610     unlock_user_struct(target_tx, target_addr, 0);
7611     return 0;
7612 }
7613 
7614 static inline abi_long host_to_target_timex64(abi_long target_addr,
7615                                               struct timex *host_tx)
7616 {
7617     struct target__kernel_timex *target_tx;
7618 
7619    if (copy_to_user_timeval64(target_addr +
7620                               offsetof(struct target__kernel_timex, time),
7621                               &host_tx->time)) {
7622         return -TARGET_EFAULT;
7623     }
7624 
7625     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7626         return -TARGET_EFAULT;
7627     }
7628 
7629     __put_user(host_tx->modes, &target_tx->modes);
7630     __put_user(host_tx->offset, &target_tx->offset);
7631     __put_user(host_tx->freq, &target_tx->freq);
7632     __put_user(host_tx->maxerror, &target_tx->maxerror);
7633     __put_user(host_tx->esterror, &target_tx->esterror);
7634     __put_user(host_tx->status, &target_tx->status);
7635     __put_user(host_tx->constant, &target_tx->constant);
7636     __put_user(host_tx->precision, &target_tx->precision);
7637     __put_user(host_tx->tolerance, &target_tx->tolerance);
7638     __put_user(host_tx->tick, &target_tx->tick);
7639     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7640     __put_user(host_tx->jitter, &target_tx->jitter);
7641     __put_user(host_tx->shift, &target_tx->shift);
7642     __put_user(host_tx->stabil, &target_tx->stabil);
7643     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7644     __put_user(host_tx->calcnt, &target_tx->calcnt);
7645     __put_user(host_tx->errcnt, &target_tx->errcnt);
7646     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7647     __put_user(host_tx->tai, &target_tx->tai);
7648 
7649     unlock_user_struct(target_tx, target_addr, 1);
7650     return 0;
7651 }
7652 #endif
7653 
7654 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7655 #define sigev_notify_thread_id _sigev_un._tid
7656 #endif
7657 
7658 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7659                                                abi_ulong target_addr)
7660 {
7661     struct target_sigevent *target_sevp;
7662 
7663     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7664         return -TARGET_EFAULT;
7665     }
7666 
7667     /* This union is awkward on 64 bit systems because it has a 32 bit
7668      * integer and a pointer in it; we follow the conversion approach
7669      * used for handling sigval types in signal.c so the guest should get
7670      * the correct value back even if we did a 64 bit byteswap and it's
7671      * using the 32 bit integer.
7672      */
7673     host_sevp->sigev_value.sival_ptr =
7674         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7675     host_sevp->sigev_signo =
7676         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7677     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7678     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7679 
7680     unlock_user_struct(target_sevp, target_addr, 1);
7681     return 0;
7682 }
7683 
7684 #if defined(TARGET_NR_mlockall)
7685 static inline int target_to_host_mlockall_arg(int arg)
7686 {
7687     int result = 0;
7688 
7689     if (arg & TARGET_MCL_CURRENT) {
7690         result |= MCL_CURRENT;
7691     }
7692     if (arg & TARGET_MCL_FUTURE) {
7693         result |= MCL_FUTURE;
7694     }
7695 #ifdef MCL_ONFAULT
7696     if (arg & TARGET_MCL_ONFAULT) {
7697         result |= MCL_ONFAULT;
7698     }
7699 #endif
7700 
7701     return result;
7702 }
7703 #endif
7704 
7705 static inline int target_to_host_msync_arg(abi_long arg)
7706 {
7707     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7708            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7709            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7710            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7711 }
7712 
7713 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7714      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7715      defined(TARGET_NR_newfstatat))
7716 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7717                                              abi_ulong target_addr,
7718                                              struct stat *host_st)
7719 {
7720 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7721     if (cpu_env->eabi) {
7722         struct target_eabi_stat64 *target_st;
7723 
7724         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7725             return -TARGET_EFAULT;
7726         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7727         __put_user(host_st->st_dev, &target_st->st_dev);
7728         __put_user(host_st->st_ino, &target_st->st_ino);
7729 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7730         __put_user(host_st->st_ino, &target_st->__st_ino);
7731 #endif
7732         __put_user(host_st->st_mode, &target_st->st_mode);
7733         __put_user(host_st->st_nlink, &target_st->st_nlink);
7734         __put_user(host_st->st_uid, &target_st->st_uid);
7735         __put_user(host_st->st_gid, &target_st->st_gid);
7736         __put_user(host_st->st_rdev, &target_st->st_rdev);
7737         __put_user(host_st->st_size, &target_st->st_size);
7738         __put_user(host_st->st_blksize, &target_st->st_blksize);
7739         __put_user(host_st->st_blocks, &target_st->st_blocks);
7740         __put_user(host_st->st_atime, &target_st->target_st_atime);
7741         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7742         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7743 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7744         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7745         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7746         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7747 #endif
7748         unlock_user_struct(target_st, target_addr, 1);
7749     } else
7750 #endif
7751     {
7752 #if defined(TARGET_HAS_STRUCT_STAT64)
7753         struct target_stat64 *target_st;
7754 #else
7755         struct target_stat *target_st;
7756 #endif
7757 
7758         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7759             return -TARGET_EFAULT;
7760         memset(target_st, 0, sizeof(*target_st));
7761         __put_user(host_st->st_dev, &target_st->st_dev);
7762         __put_user(host_st->st_ino, &target_st->st_ino);
7763 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7764         __put_user(host_st->st_ino, &target_st->__st_ino);
7765 #endif
7766         __put_user(host_st->st_mode, &target_st->st_mode);
7767         __put_user(host_st->st_nlink, &target_st->st_nlink);
7768         __put_user(host_st->st_uid, &target_st->st_uid);
7769         __put_user(host_st->st_gid, &target_st->st_gid);
7770         __put_user(host_st->st_rdev, &target_st->st_rdev);
7771         /* XXX: better use of kernel struct */
7772         __put_user(host_st->st_size, &target_st->st_size);
7773         __put_user(host_st->st_blksize, &target_st->st_blksize);
7774         __put_user(host_st->st_blocks, &target_st->st_blocks);
7775         __put_user(host_st->st_atime, &target_st->target_st_atime);
7776         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7777         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7778 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7779         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7780         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7781         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7782 #endif
7783         unlock_user_struct(target_st, target_addr, 1);
7784     }
7785 
7786     return 0;
7787 }
7788 #endif
7789 
7790 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7791 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7792                                             abi_ulong target_addr)
7793 {
7794     struct target_statx *target_stx;
7795 
7796     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7797         return -TARGET_EFAULT;
7798     }
7799     memset(target_stx, 0, sizeof(*target_stx));
7800 
7801     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7802     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7803     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7804     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7805     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7806     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7807     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7808     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7809     __put_user(host_stx->stx_size, &target_stx->stx_size);
7810     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7811     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7812     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7813     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7814     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7815     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7816     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7817     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7818     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7819     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7820     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7821     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7822     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7823     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7824 
7825     unlock_user_struct(target_stx, target_addr, 1);
7826 
7827     return 0;
7828 }
7829 #endif
7830 
7831 static int do_sys_futex(int *uaddr, int op, int val,
7832                          const struct timespec *timeout, int *uaddr2,
7833                          int val3)
7834 {
7835 #if HOST_LONG_BITS == 64
7836 #if defined(__NR_futex)
7837     /* always a 64-bit time_t, it doesn't define _time64 version  */
7838     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7839 
7840 #endif
7841 #else /* HOST_LONG_BITS == 64 */
7842 #if defined(__NR_futex_time64)
7843     if (sizeof(timeout->tv_sec) == 8) {
7844         /* _time64 function on 32bit arch */
7845         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7846     }
7847 #endif
7848 #if defined(__NR_futex)
7849     /* old function on 32bit arch */
7850     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7851 #endif
7852 #endif /* HOST_LONG_BITS == 64 */
7853     g_assert_not_reached();
7854 }
7855 
7856 static int do_safe_futex(int *uaddr, int op, int val,
7857                          const struct timespec *timeout, int *uaddr2,
7858                          int val3)
7859 {
7860 #if HOST_LONG_BITS == 64
7861 #if defined(__NR_futex)
7862     /* always a 64-bit time_t, it doesn't define _time64 version  */
7863     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7864 #endif
7865 #else /* HOST_LONG_BITS == 64 */
7866 #if defined(__NR_futex_time64)
7867     if (sizeof(timeout->tv_sec) == 8) {
7868         /* _time64 function on 32bit arch */
7869         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7870                                            val3));
7871     }
7872 #endif
7873 #if defined(__NR_futex)
7874     /* old function on 32bit arch */
7875     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7876 #endif
7877 #endif /* HOST_LONG_BITS == 64 */
7878     return -TARGET_ENOSYS;
7879 }
7880 
7881 /* ??? Using host futex calls even when target atomic operations
7882    are not really atomic probably breaks things.  However implementing
7883    futexes locally would make futexes shared between multiple processes
7884    tricky.  However they're probably useless because guest atomic
7885    operations won't work either.  */
7886 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7887 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7888                     int op, int val, target_ulong timeout,
7889                     target_ulong uaddr2, int val3)
7890 {
7891     struct timespec ts, *pts = NULL;
7892     void *haddr2 = NULL;
7893     int base_op;
7894 
7895     /* We assume FUTEX_* constants are the same on both host and target. */
7896 #ifdef FUTEX_CMD_MASK
7897     base_op = op & FUTEX_CMD_MASK;
7898 #else
7899     base_op = op;
7900 #endif
7901     switch (base_op) {
7902     case FUTEX_WAIT:
7903     case FUTEX_WAIT_BITSET:
7904         val = tswap32(val);
7905         break;
7906     case FUTEX_WAIT_REQUEUE_PI:
7907         val = tswap32(val);
7908         haddr2 = g2h(cpu, uaddr2);
7909         break;
7910     case FUTEX_LOCK_PI:
7911     case FUTEX_LOCK_PI2:
7912         break;
7913     case FUTEX_WAKE:
7914     case FUTEX_WAKE_BITSET:
7915     case FUTEX_TRYLOCK_PI:
7916     case FUTEX_UNLOCK_PI:
7917         timeout = 0;
7918         break;
7919     case FUTEX_FD:
7920         val = target_to_host_signal(val);
7921         timeout = 0;
7922         break;
7923     case FUTEX_CMP_REQUEUE:
7924     case FUTEX_CMP_REQUEUE_PI:
7925         val3 = tswap32(val3);
7926         /* fall through */
7927     case FUTEX_REQUEUE:
7928     case FUTEX_WAKE_OP:
7929         /*
7930          * For these, the 4th argument is not TIMEOUT, but VAL2.
7931          * But the prototype of do_safe_futex takes a pointer, so
7932          * insert casts to satisfy the compiler.  We do not need
7933          * to tswap VAL2 since it's not compared to guest memory.
7934           */
7935         pts = (struct timespec *)(uintptr_t)timeout;
7936         timeout = 0;
7937         haddr2 = g2h(cpu, uaddr2);
7938         break;
7939     default:
7940         return -TARGET_ENOSYS;
7941     }
7942     if (timeout) {
7943         pts = &ts;
7944         if (time64
7945             ? target_to_host_timespec64(pts, timeout)
7946             : target_to_host_timespec(pts, timeout)) {
7947             return -TARGET_EFAULT;
7948         }
7949     }
7950     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7951 }
7952 #endif
7953 
7954 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7955 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7956                                      abi_long handle, abi_long mount_id,
7957                                      abi_long flags)
7958 {
7959     struct file_handle *target_fh;
7960     struct file_handle *fh;
7961     int mid = 0;
7962     abi_long ret;
7963     char *name;
7964     unsigned int size, total_size;
7965 
7966     if (get_user_s32(size, handle)) {
7967         return -TARGET_EFAULT;
7968     }
7969 
7970     name = lock_user_string(pathname);
7971     if (!name) {
7972         return -TARGET_EFAULT;
7973     }
7974 
7975     total_size = sizeof(struct file_handle) + size;
7976     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7977     if (!target_fh) {
7978         unlock_user(name, pathname, 0);
7979         return -TARGET_EFAULT;
7980     }
7981 
7982     fh = g_malloc0(total_size);
7983     fh->handle_bytes = size;
7984 
7985     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7986     unlock_user(name, pathname, 0);
7987 
7988     /* man name_to_handle_at(2):
7989      * Other than the use of the handle_bytes field, the caller should treat
7990      * the file_handle structure as an opaque data type
7991      */
7992 
7993     memcpy(target_fh, fh, total_size);
7994     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7995     target_fh->handle_type = tswap32(fh->handle_type);
7996     g_free(fh);
7997     unlock_user(target_fh, handle, total_size);
7998 
7999     if (put_user_s32(mid, mount_id)) {
8000         return -TARGET_EFAULT;
8001     }
8002 
8003     return ret;
8004 
8005 }
8006 #endif
8007 
8008 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
8009 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
8010                                      abi_long flags)
8011 {
8012     struct file_handle *target_fh;
8013     struct file_handle *fh;
8014     unsigned int size, total_size;
8015     abi_long ret;
8016 
8017     if (get_user_s32(size, handle)) {
8018         return -TARGET_EFAULT;
8019     }
8020 
8021     total_size = sizeof(struct file_handle) + size;
8022     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
8023     if (!target_fh) {
8024         return -TARGET_EFAULT;
8025     }
8026 
8027     fh = g_memdup(target_fh, total_size);
8028     fh->handle_bytes = size;
8029     fh->handle_type = tswap32(target_fh->handle_type);
8030 
8031     ret = get_errno(open_by_handle_at(mount_fd, fh,
8032                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
8033 
8034     g_free(fh);
8035 
8036     unlock_user(target_fh, handle, total_size);
8037 
8038     return ret;
8039 }
8040 #endif
8041 
8042 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
8043 
8044 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
8045 {
8046     int host_flags;
8047     target_sigset_t *target_mask;
8048     sigset_t host_mask;
8049     abi_long ret;
8050 
8051     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
8052         return -TARGET_EINVAL;
8053     }
8054     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8055         return -TARGET_EFAULT;
8056     }
8057 
8058     target_to_host_sigset(&host_mask, target_mask);
8059 
8060     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8061 
8062     ret = get_errno(signalfd(fd, &host_mask, host_flags));
8063     if (ret >= 0) {
8064         fd_trans_register(ret, &target_signalfd_trans);
8065     }
8066 
8067     unlock_user_struct(target_mask, mask, 0);
8068 
8069     return ret;
8070 }
8071 #endif
8072 
8073 /* Map host to target signal numbers for the wait family of syscalls.
8074    Assume all other status bits are the same.  */
8075 int host_to_target_waitstatus(int status)
8076 {
8077     if (WIFSIGNALED(status)) {
8078         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8079     }
8080     if (WIFSTOPPED(status)) {
8081         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8082                | (status & 0xff);
8083     }
8084     return status;
8085 }
8086 
8087 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8088 {
8089     CPUState *cpu = env_cpu(cpu_env);
8090     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8091     int i;
8092 
8093     for (i = 0; i < bprm->argc; i++) {
8094         size_t len = strlen(bprm->argv[i]) + 1;
8095 
8096         if (write(fd, bprm->argv[i], len) != len) {
8097             return -1;
8098         }
8099     }
8100 
8101     return 0;
8102 }
8103 
8104 static void show_smaps(int fd, unsigned long size)
8105 {
8106     unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8107     unsigned long size_kb = size >> 10;
8108 
8109     dprintf(fd, "Size:                  %lu kB\n"
8110                 "KernelPageSize:        %lu kB\n"
8111                 "MMUPageSize:           %lu kB\n"
8112                 "Rss:                   0 kB\n"
8113                 "Pss:                   0 kB\n"
8114                 "Pss_Dirty:             0 kB\n"
8115                 "Shared_Clean:          0 kB\n"
8116                 "Shared_Dirty:          0 kB\n"
8117                 "Private_Clean:         0 kB\n"
8118                 "Private_Dirty:         0 kB\n"
8119                 "Referenced:            0 kB\n"
8120                 "Anonymous:             0 kB\n"
8121                 "LazyFree:              0 kB\n"
8122                 "AnonHugePages:         0 kB\n"
8123                 "ShmemPmdMapped:        0 kB\n"
8124                 "FilePmdMapped:         0 kB\n"
8125                 "Shared_Hugetlb:        0 kB\n"
8126                 "Private_Hugetlb:       0 kB\n"
8127                 "Swap:                  0 kB\n"
8128                 "SwapPss:               0 kB\n"
8129                 "Locked:                0 kB\n"
8130                 "THPeligible:    0\n", size_kb, page_size_kb, page_size_kb);
8131 }
8132 
8133 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8134 {
8135     CPUState *cpu = env_cpu(cpu_env);
8136     TaskState *ts = cpu->opaque;
8137     IntervalTreeRoot *map_info = read_self_maps();
8138     IntervalTreeNode *s;
8139     int count;
8140 
8141     for (s = interval_tree_iter_first(map_info, 0, -1); s;
8142          s = interval_tree_iter_next(s, 0, -1)) {
8143         MapInfo *e = container_of(s, MapInfo, itree);
8144 
8145         if (h2g_valid(e->itree.start)) {
8146             unsigned long min = e->itree.start;
8147             unsigned long max = e->itree.last + 1;
8148             int flags = page_get_flags(h2g(min));
8149             const char *path;
8150 
8151             max = h2g_valid(max - 1) ?
8152                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8153 
8154             if (!page_check_range(h2g(min), max - min, flags)) {
8155                 continue;
8156             }
8157 
8158 #ifdef TARGET_HPPA
8159             if (h2g(max) == ts->info->stack_limit) {
8160 #else
8161             if (h2g(min) == ts->info->stack_limit) {
8162 #endif
8163                 path = "[stack]";
8164             } else {
8165                 path = e->path;
8166             }
8167 
8168             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8169                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8170                             h2g(min), h2g(max - 1) + 1,
8171                             (flags & PAGE_READ) ? 'r' : '-',
8172                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8173                             (flags & PAGE_EXEC) ? 'x' : '-',
8174                             e->is_priv ? 'p' : 's',
8175                             (uint64_t) e->offset, e->dev, e->inode);
8176             if (path) {
8177                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8178             } else {
8179                 dprintf(fd, "\n");
8180             }
8181             if (smaps) {
8182                 show_smaps(fd, max - min);
8183                 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8184                         (flags & PAGE_READ) ? " rd" : "",
8185                         (flags & PAGE_WRITE_ORG) ? " wr" : "",
8186                         (flags & PAGE_EXEC) ? " ex" : "",
8187                         e->is_priv ? "" : " sh",
8188                         (flags & PAGE_READ) ? " mr" : "",
8189                         (flags & PAGE_WRITE_ORG) ? " mw" : "",
8190                         (flags & PAGE_EXEC) ? " me" : "",
8191                         e->is_priv ? "" : " ms");
8192             }
8193         }
8194     }
8195 
8196     free_self_maps(map_info);
8197 
8198 #ifdef TARGET_VSYSCALL_PAGE
8199     /*
8200      * We only support execution from the vsyscall page.
8201      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8202      */
8203     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8204                     " --xp 00000000 00:00 0",
8205                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8206     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8207     if (smaps) {
8208         show_smaps(fd, TARGET_PAGE_SIZE);
8209         dprintf(fd, "VmFlags: ex\n");
8210     }
8211 #endif
8212 
8213     return 0;
8214 }
8215 
8216 static int open_self_maps(CPUArchState *cpu_env, int fd)
8217 {
8218     return open_self_maps_1(cpu_env, fd, false);
8219 }
8220 
8221 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8222 {
8223     return open_self_maps_1(cpu_env, fd, true);
8224 }
8225 
8226 static int open_self_stat(CPUArchState *cpu_env, int fd)
8227 {
8228     CPUState *cpu = env_cpu(cpu_env);
8229     TaskState *ts = cpu->opaque;
8230     g_autoptr(GString) buf = g_string_new(NULL);
8231     int i;
8232 
8233     for (i = 0; i < 44; i++) {
8234         if (i == 0) {
8235             /* pid */
8236             g_string_printf(buf, FMT_pid " ", getpid());
8237         } else if (i == 1) {
8238             /* app name */
8239             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8240             bin = bin ? bin + 1 : ts->bprm->argv[0];
8241             g_string_printf(buf, "(%.15s) ", bin);
8242         } else if (i == 2) {
8243             /* task state */
8244             g_string_assign(buf, "R "); /* we are running right now */
8245         } else if (i == 3) {
8246             /* ppid */
8247             g_string_printf(buf, FMT_pid " ", getppid());
8248         } else if (i == 21) {
8249             /* starttime */
8250             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8251         } else if (i == 27) {
8252             /* stack bottom */
8253             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8254         } else {
8255             /* for the rest, there is MasterCard */
8256             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8257         }
8258 
8259         if (write(fd, buf->str, buf->len) != buf->len) {
8260             return -1;
8261         }
8262     }
8263 
8264     return 0;
8265 }
8266 
8267 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8268 {
8269     CPUState *cpu = env_cpu(cpu_env);
8270     TaskState *ts = cpu->opaque;
8271     abi_ulong auxv = ts->info->saved_auxv;
8272     abi_ulong len = ts->info->auxv_len;
8273     char *ptr;
8274 
8275     /*
8276      * Auxiliary vector is stored in target process stack.
8277      * read in whole auxv vector and copy it to file
8278      */
8279     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8280     if (ptr != NULL) {
8281         while (len > 0) {
8282             ssize_t r;
8283             r = write(fd, ptr, len);
8284             if (r <= 0) {
8285                 break;
8286             }
8287             len -= r;
8288             ptr += r;
8289         }
8290         lseek(fd, 0, SEEK_SET);
8291         unlock_user(ptr, auxv, len);
8292     }
8293 
8294     return 0;
8295 }
8296 
8297 static int is_proc_myself(const char *filename, const char *entry)
8298 {
8299     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8300         filename += strlen("/proc/");
8301         if (!strncmp(filename, "self/", strlen("self/"))) {
8302             filename += strlen("self/");
8303         } else if (*filename >= '1' && *filename <= '9') {
8304             char myself[80];
8305             snprintf(myself, sizeof(myself), "%d/", getpid());
8306             if (!strncmp(filename, myself, strlen(myself))) {
8307                 filename += strlen(myself);
8308             } else {
8309                 return 0;
8310             }
8311         } else {
8312             return 0;
8313         }
8314         if (!strcmp(filename, entry)) {
8315             return 1;
8316         }
8317     }
8318     return 0;
8319 }
8320 
8321 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8322                       const char *fmt, int code)
8323 {
8324     if (logfile) {
8325         CPUState *cs = env_cpu(env);
8326 
8327         fprintf(logfile, fmt, code);
8328         fprintf(logfile, "Failing executable: %s\n", exec_path);
8329         cpu_dump_state(cs, logfile, 0);
8330         open_self_maps(env, fileno(logfile));
8331     }
8332 }
8333 
8334 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8335 {
8336     /* dump to console */
8337     excp_dump_file(stderr, env, fmt, code);
8338 
8339     /* dump to log file */
8340     if (qemu_log_separate()) {
8341         FILE *logfile = qemu_log_trylock();
8342 
8343         excp_dump_file(logfile, env, fmt, code);
8344         qemu_log_unlock(logfile);
8345     }
8346 }
8347 
8348 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8349     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8350     defined(TARGET_RISCV) || defined(TARGET_S390X)
8351 static int is_proc(const char *filename, const char *entry)
8352 {
8353     return strcmp(filename, entry) == 0;
8354 }
8355 #endif
8356 
8357 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8358 static int open_net_route(CPUArchState *cpu_env, int fd)
8359 {
8360     FILE *fp;
8361     char *line = NULL;
8362     size_t len = 0;
8363     ssize_t read;
8364 
8365     fp = fopen("/proc/net/route", "r");
8366     if (fp == NULL) {
8367         return -1;
8368     }
8369 
8370     /* read header */
8371 
8372     read = getline(&line, &len, fp);
8373     dprintf(fd, "%s", line);
8374 
8375     /* read routes */
8376 
8377     while ((read = getline(&line, &len, fp)) != -1) {
8378         char iface[16];
8379         uint32_t dest, gw, mask;
8380         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8381         int fields;
8382 
8383         fields = sscanf(line,
8384                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8385                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8386                         &mask, &mtu, &window, &irtt);
8387         if (fields != 11) {
8388             continue;
8389         }
8390         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8391                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8392                 metric, tswap32(mask), mtu, window, irtt);
8393     }
8394 
8395     free(line);
8396     fclose(fp);
8397 
8398     return 0;
8399 }
8400 #endif
8401 
8402 #if defined(TARGET_SPARC)
8403 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8404 {
8405     dprintf(fd, "type\t\t: sun4u\n");
8406     return 0;
8407 }
8408 #endif
8409 
8410 #if defined(TARGET_HPPA)
8411 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8412 {
8413     int i, num_cpus;
8414 
8415     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8416     for (i = 0; i < num_cpus; i++) {
8417         dprintf(fd, "processor\t: %d\n", i);
8418         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8419         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8420         dprintf(fd, "capabilities\t: os32\n");
8421         dprintf(fd, "model\t\t: 9000/778/B160L - "
8422                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8423     }
8424     return 0;
8425 }
8426 #endif
8427 
8428 #if defined(TARGET_RISCV)
8429 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8430 {
8431     int i;
8432     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8433     RISCVCPU *cpu = env_archcpu(cpu_env);
8434     const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8435     char *isa_string = riscv_isa_string(cpu);
8436     const char *mmu;
8437 
8438     if (cfg->mmu) {
8439         mmu = (cpu_env->xl == MXL_RV32) ? "sv32"  : "sv48";
8440     } else {
8441         mmu = "none";
8442     }
8443 
8444     for (i = 0; i < num_cpus; i++) {
8445         dprintf(fd, "processor\t: %d\n", i);
8446         dprintf(fd, "hart\t\t: %d\n", i);
8447         dprintf(fd, "isa\t\t: %s\n", isa_string);
8448         dprintf(fd, "mmu\t\t: %s\n", mmu);
8449         dprintf(fd, "uarch\t\t: qemu\n\n");
8450     }
8451 
8452     g_free(isa_string);
8453     return 0;
8454 }
8455 #endif
8456 
8457 #if defined(TARGET_S390X)
8458 /*
8459  * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8460  * show in /proc/cpuinfo.
8461  *
8462  * Skip the following in order to match the missing support in op_ecag():
8463  * - show_cacheinfo().
8464  * - show_cpu_topology().
8465  * - show_cpu_mhz().
8466  *
8467  * Use fixed values for certain fields:
8468  * - bogomips per cpu - from a qemu-system-s390x run.
8469  * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8470  *
8471  * Keep the code structure close to arch/s390/kernel/processor.c.
8472  */
8473 
8474 static void show_facilities(int fd)
8475 {
8476     size_t sizeof_stfl_bytes = 2048;
8477     g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8478     unsigned int bit;
8479 
8480     dprintf(fd, "facilities      :");
8481     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8482     for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8483         if (test_be_bit(bit, stfl_bytes)) {
8484             dprintf(fd, " %d", bit);
8485         }
8486     }
8487     dprintf(fd, "\n");
8488 }
8489 
8490 static int cpu_ident(unsigned long n)
8491 {
8492     return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8493                      n);
8494 }
8495 
8496 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8497 {
8498     S390CPUModel *model = env_archcpu(cpu_env)->model;
8499     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8500     uint32_t elf_hwcap = get_elf_hwcap();
8501     const char *hwcap_str;
8502     int i;
8503 
8504     dprintf(fd, "vendor_id       : IBM/S390\n"
8505                 "# processors    : %i\n"
8506                 "bogomips per cpu: 13370.00\n",
8507             num_cpus);
8508     dprintf(fd, "max thread id   : 0\n");
8509     dprintf(fd, "features\t: ");
8510     for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8511         if (!(elf_hwcap & (1 << i))) {
8512             continue;
8513         }
8514         hwcap_str = elf_hwcap_str(i);
8515         if (hwcap_str) {
8516             dprintf(fd, "%s ", hwcap_str);
8517         }
8518     }
8519     dprintf(fd, "\n");
8520     show_facilities(fd);
8521     for (i = 0; i < num_cpus; i++) {
8522         dprintf(fd, "processor %d: "
8523                "version = %02X,  "
8524                "identification = %06X,  "
8525                "machine = %04X\n",
8526                i, model->cpu_ver, cpu_ident(i), model->def->type);
8527     }
8528 }
8529 
8530 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8531 {
8532     S390CPUModel *model = env_archcpu(cpu_env)->model;
8533 
8534     dprintf(fd, "version         : %02X\n", model->cpu_ver);
8535     dprintf(fd, "identification  : %06X\n", cpu_ident(n));
8536     dprintf(fd, "machine         : %04X\n", model->def->type);
8537 }
8538 
8539 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8540 {
8541     dprintf(fd, "\ncpu number      : %ld\n", n);
8542     show_cpu_ids(cpu_env, fd, n);
8543 }
8544 
8545 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8546 {
8547     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8548     int i;
8549 
8550     show_cpu_summary(cpu_env, fd);
8551     for (i = 0; i < num_cpus; i++) {
8552         show_cpuinfo(cpu_env, fd, i);
8553     }
8554     return 0;
8555 }
8556 #endif
8557 
8558 #if defined(TARGET_M68K)
8559 static int open_hardware(CPUArchState *cpu_env, int fd)
8560 {
8561     dprintf(fd, "Model:\t\tqemu-m68k\n");
8562     return 0;
8563 }
8564 #endif
8565 
8566 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8567                     int flags, mode_t mode, bool safe)
8568 {
8569     struct fake_open {
8570         const char *filename;
8571         int (*fill)(CPUArchState *cpu_env, int fd);
8572         int (*cmp)(const char *s1, const char *s2);
8573     };
8574     const struct fake_open *fake_open;
8575     static const struct fake_open fakes[] = {
8576         { "maps", open_self_maps, is_proc_myself },
8577         { "smaps", open_self_smaps, is_proc_myself },
8578         { "stat", open_self_stat, is_proc_myself },
8579         { "auxv", open_self_auxv, is_proc_myself },
8580         { "cmdline", open_self_cmdline, is_proc_myself },
8581 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8582         { "/proc/net/route", open_net_route, is_proc },
8583 #endif
8584 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8585     defined(TARGET_RISCV) || defined(TARGET_S390X)
8586         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8587 #endif
8588 #if defined(TARGET_M68K)
8589         { "/proc/hardware", open_hardware, is_proc },
8590 #endif
8591         { NULL, NULL, NULL }
8592     };
8593 
8594     if (is_proc_myself(pathname, "exe")) {
8595         if (safe) {
8596             return safe_openat(dirfd, exec_path, flags, mode);
8597         } else {
8598             return openat(dirfd, exec_path, flags, mode);
8599         }
8600     }
8601 
8602     for (fake_open = fakes; fake_open->filename; fake_open++) {
8603         if (fake_open->cmp(pathname, fake_open->filename)) {
8604             break;
8605         }
8606     }
8607 
8608     if (fake_open->filename) {
8609         const char *tmpdir;
8610         char filename[PATH_MAX];
8611         int fd, r;
8612 
8613         fd = memfd_create("qemu-open", 0);
8614         if (fd < 0) {
8615             if (errno != ENOSYS) {
8616                 return fd;
8617             }
8618             /* create temporary file to map stat to */
8619             tmpdir = getenv("TMPDIR");
8620             if (!tmpdir)
8621                 tmpdir = "/tmp";
8622             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8623             fd = mkstemp(filename);
8624             if (fd < 0) {
8625                 return fd;
8626             }
8627             unlink(filename);
8628         }
8629 
8630         if ((r = fake_open->fill(cpu_env, fd))) {
8631             int e = errno;
8632             close(fd);
8633             errno = e;
8634             return r;
8635         }
8636         lseek(fd, 0, SEEK_SET);
8637 
8638         return fd;
8639     }
8640 
8641     if (safe) {
8642         return safe_openat(dirfd, path(pathname), flags, mode);
8643     } else {
8644         return openat(dirfd, path(pathname), flags, mode);
8645     }
8646 }
8647 
8648 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8649 {
8650     ssize_t ret;
8651 
8652     if (!pathname || !buf) {
8653         errno = EFAULT;
8654         return -1;
8655     }
8656 
8657     if (!bufsiz) {
8658         /* Short circuit this for the magic exe check. */
8659         errno = EINVAL;
8660         return -1;
8661     }
8662 
8663     if (is_proc_myself((const char *)pathname, "exe")) {
8664         /*
8665          * Don't worry about sign mismatch as earlier mapping
8666          * logic would have thrown a bad address error.
8667          */
8668         ret = MIN(strlen(exec_path), bufsiz);
8669         /* We cannot NUL terminate the string. */
8670         memcpy(buf, exec_path, ret);
8671     } else {
8672         ret = readlink(path(pathname), buf, bufsiz);
8673     }
8674 
8675     return ret;
8676 }
8677 
8678 static int do_execv(CPUArchState *cpu_env, int dirfd,
8679                     abi_long pathname, abi_long guest_argp,
8680                     abi_long guest_envp, int flags, bool is_execveat)
8681 {
8682     int ret;
8683     char **argp, **envp;
8684     int argc, envc;
8685     abi_ulong gp;
8686     abi_ulong addr;
8687     char **q;
8688     void *p;
8689 
8690     argc = 0;
8691 
8692     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8693         if (get_user_ual(addr, gp)) {
8694             return -TARGET_EFAULT;
8695         }
8696         if (!addr) {
8697             break;
8698         }
8699         argc++;
8700     }
8701     envc = 0;
8702     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8703         if (get_user_ual(addr, gp)) {
8704             return -TARGET_EFAULT;
8705         }
8706         if (!addr) {
8707             break;
8708         }
8709         envc++;
8710     }
8711 
8712     argp = g_new0(char *, argc + 1);
8713     envp = g_new0(char *, envc + 1);
8714 
8715     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8716         if (get_user_ual(addr, gp)) {
8717             goto execve_efault;
8718         }
8719         if (!addr) {
8720             break;
8721         }
8722         *q = lock_user_string(addr);
8723         if (!*q) {
8724             goto execve_efault;
8725         }
8726     }
8727     *q = NULL;
8728 
8729     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8730         if (get_user_ual(addr, gp)) {
8731             goto execve_efault;
8732         }
8733         if (!addr) {
8734             break;
8735         }
8736         *q = lock_user_string(addr);
8737         if (!*q) {
8738             goto execve_efault;
8739         }
8740     }
8741     *q = NULL;
8742 
8743     /*
8744      * Although execve() is not an interruptible syscall it is
8745      * a special case where we must use the safe_syscall wrapper:
8746      * if we allow a signal to happen before we make the host
8747      * syscall then we will 'lose' it, because at the point of
8748      * execve the process leaves QEMU's control. So we use the
8749      * safe syscall wrapper to ensure that we either take the
8750      * signal as a guest signal, or else it does not happen
8751      * before the execve completes and makes it the other
8752      * program's problem.
8753      */
8754     p = lock_user_string(pathname);
8755     if (!p) {
8756         goto execve_efault;
8757     }
8758 
8759     const char *exe = p;
8760     if (is_proc_myself(p, "exe")) {
8761         exe = exec_path;
8762     }
8763     ret = is_execveat
8764         ? safe_execveat(dirfd, exe, argp, envp, flags)
8765         : safe_execve(exe, argp, envp);
8766     ret = get_errno(ret);
8767 
8768     unlock_user(p, pathname, 0);
8769 
8770     goto execve_end;
8771 
8772 execve_efault:
8773     ret = -TARGET_EFAULT;
8774 
8775 execve_end:
8776     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8777         if (get_user_ual(addr, gp) || !addr) {
8778             break;
8779         }
8780         unlock_user(*q, addr, 0);
8781     }
8782     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8783         if (get_user_ual(addr, gp) || !addr) {
8784             break;
8785         }
8786         unlock_user(*q, addr, 0);
8787     }
8788 
8789     g_free(argp);
8790     g_free(envp);
8791     return ret;
8792 }
8793 
8794 #define TIMER_MAGIC 0x0caf0000
8795 #define TIMER_MAGIC_MASK 0xffff0000
8796 
8797 /* Convert QEMU provided timer ID back to internal 16bit index format */
8798 static target_timer_t get_timer_id(abi_long arg)
8799 {
8800     target_timer_t timerid = arg;
8801 
8802     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8803         return -TARGET_EINVAL;
8804     }
8805 
8806     timerid &= 0xffff;
8807 
8808     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8809         return -TARGET_EINVAL;
8810     }
8811 
8812     return timerid;
8813 }
8814 
8815 static int target_to_host_cpu_mask(unsigned long *host_mask,
8816                                    size_t host_size,
8817                                    abi_ulong target_addr,
8818                                    size_t target_size)
8819 {
8820     unsigned target_bits = sizeof(abi_ulong) * 8;
8821     unsigned host_bits = sizeof(*host_mask) * 8;
8822     abi_ulong *target_mask;
8823     unsigned i, j;
8824 
8825     assert(host_size >= target_size);
8826 
8827     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8828     if (!target_mask) {
8829         return -TARGET_EFAULT;
8830     }
8831     memset(host_mask, 0, host_size);
8832 
8833     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8834         unsigned bit = i * target_bits;
8835         abi_ulong val;
8836 
8837         __get_user(val, &target_mask[i]);
8838         for (j = 0; j < target_bits; j++, bit++) {
8839             if (val & (1UL << j)) {
8840                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8841             }
8842         }
8843     }
8844 
8845     unlock_user(target_mask, target_addr, 0);
8846     return 0;
8847 }
8848 
8849 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8850                                    size_t host_size,
8851                                    abi_ulong target_addr,
8852                                    size_t target_size)
8853 {
8854     unsigned target_bits = sizeof(abi_ulong) * 8;
8855     unsigned host_bits = sizeof(*host_mask) * 8;
8856     abi_ulong *target_mask;
8857     unsigned i, j;
8858 
8859     assert(host_size >= target_size);
8860 
8861     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8862     if (!target_mask) {
8863         return -TARGET_EFAULT;
8864     }
8865 
8866     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8867         unsigned bit = i * target_bits;
8868         abi_ulong val = 0;
8869 
8870         for (j = 0; j < target_bits; j++, bit++) {
8871             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8872                 val |= 1UL << j;
8873             }
8874         }
8875         __put_user(val, &target_mask[i]);
8876     }
8877 
8878     unlock_user(target_mask, target_addr, target_size);
8879     return 0;
8880 }
8881 
8882 #ifdef TARGET_NR_getdents
8883 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8884 {
8885     g_autofree void *hdirp = NULL;
8886     void *tdirp;
8887     int hlen, hoff, toff;
8888     int hreclen, treclen;
8889     off64_t prev_diroff = 0;
8890 
8891     hdirp = g_try_malloc(count);
8892     if (!hdirp) {
8893         return -TARGET_ENOMEM;
8894     }
8895 
8896 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8897     hlen = sys_getdents(dirfd, hdirp, count);
8898 #else
8899     hlen = sys_getdents64(dirfd, hdirp, count);
8900 #endif
8901 
8902     hlen = get_errno(hlen);
8903     if (is_error(hlen)) {
8904         return hlen;
8905     }
8906 
8907     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8908     if (!tdirp) {
8909         return -TARGET_EFAULT;
8910     }
8911 
8912     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8913 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8914         struct linux_dirent *hde = hdirp + hoff;
8915 #else
8916         struct linux_dirent64 *hde = hdirp + hoff;
8917 #endif
8918         struct target_dirent *tde = tdirp + toff;
8919         int namelen;
8920         uint8_t type;
8921 
8922         namelen = strlen(hde->d_name);
8923         hreclen = hde->d_reclen;
8924         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8925         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8926 
8927         if (toff + treclen > count) {
8928             /*
8929              * If the host struct is smaller than the target struct, or
8930              * requires less alignment and thus packs into less space,
8931              * then the host can return more entries than we can pass
8932              * on to the guest.
8933              */
8934             if (toff == 0) {
8935                 toff = -TARGET_EINVAL; /* result buffer is too small */
8936                 break;
8937             }
8938             /*
8939              * Return what we have, resetting the file pointer to the
8940              * location of the first record not returned.
8941              */
8942             lseek64(dirfd, prev_diroff, SEEK_SET);
8943             break;
8944         }
8945 
8946         prev_diroff = hde->d_off;
8947         tde->d_ino = tswapal(hde->d_ino);
8948         tde->d_off = tswapal(hde->d_off);
8949         tde->d_reclen = tswap16(treclen);
8950         memcpy(tde->d_name, hde->d_name, namelen + 1);
8951 
8952         /*
8953          * The getdents type is in what was formerly a padding byte at the
8954          * end of the structure.
8955          */
8956 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8957         type = *((uint8_t *)hde + hreclen - 1);
8958 #else
8959         type = hde->d_type;
8960 #endif
8961         *((uint8_t *)tde + treclen - 1) = type;
8962     }
8963 
8964     unlock_user(tdirp, arg2, toff);
8965     return toff;
8966 }
8967 #endif /* TARGET_NR_getdents */
8968 
8969 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8970 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8971 {
8972     g_autofree void *hdirp = NULL;
8973     void *tdirp;
8974     int hlen, hoff, toff;
8975     int hreclen, treclen;
8976     off64_t prev_diroff = 0;
8977 
8978     hdirp = g_try_malloc(count);
8979     if (!hdirp) {
8980         return -TARGET_ENOMEM;
8981     }
8982 
8983     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8984     if (is_error(hlen)) {
8985         return hlen;
8986     }
8987 
8988     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8989     if (!tdirp) {
8990         return -TARGET_EFAULT;
8991     }
8992 
8993     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8994         struct linux_dirent64 *hde = hdirp + hoff;
8995         struct target_dirent64 *tde = tdirp + toff;
8996         int namelen;
8997 
8998         namelen = strlen(hde->d_name) + 1;
8999         hreclen = hde->d_reclen;
9000         treclen = offsetof(struct target_dirent64, d_name) + namelen;
9001         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
9002 
9003         if (toff + treclen > count) {
9004             /*
9005              * If the host struct is smaller than the target struct, or
9006              * requires less alignment and thus packs into less space,
9007              * then the host can return more entries than we can pass
9008              * on to the guest.
9009              */
9010             if (toff == 0) {
9011                 toff = -TARGET_EINVAL; /* result buffer is too small */
9012                 break;
9013             }
9014             /*
9015              * Return what we have, resetting the file pointer to the
9016              * location of the first record not returned.
9017              */
9018             lseek64(dirfd, prev_diroff, SEEK_SET);
9019             break;
9020         }
9021 
9022         prev_diroff = hde->d_off;
9023         tde->d_ino = tswap64(hde->d_ino);
9024         tde->d_off = tswap64(hde->d_off);
9025         tde->d_reclen = tswap16(treclen);
9026         tde->d_type = hde->d_type;
9027         memcpy(tde->d_name, hde->d_name, namelen);
9028     }
9029 
9030     unlock_user(tdirp, arg2, toff);
9031     return toff;
9032 }
9033 #endif /* TARGET_NR_getdents64 */
9034 
9035 #if defined(TARGET_NR_riscv_hwprobe)
9036 
9037 #define RISCV_HWPROBE_KEY_MVENDORID     0
9038 #define RISCV_HWPROBE_KEY_MARCHID       1
9039 #define RISCV_HWPROBE_KEY_MIMPID        2
9040 
9041 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
9042 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
9043 
9044 #define RISCV_HWPROBE_KEY_IMA_EXT_0     4
9045 #define     RISCV_HWPROBE_IMA_FD       (1 << 0)
9046 #define     RISCV_HWPROBE_IMA_C        (1 << 1)
9047 
9048 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
9049 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
9050 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
9051 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
9052 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
9053 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9054 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9055 
9056 struct riscv_hwprobe {
9057     abi_llong  key;
9058     abi_ullong value;
9059 };
9060 
9061 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9062                                     struct riscv_hwprobe *pair,
9063                                     size_t pair_count)
9064 {
9065     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9066 
9067     for (; pair_count > 0; pair_count--, pair++) {
9068         abi_llong key;
9069         abi_ullong value;
9070         __put_user(0, &pair->value);
9071         __get_user(key, &pair->key);
9072         switch (key) {
9073         case RISCV_HWPROBE_KEY_MVENDORID:
9074             __put_user(cfg->mvendorid, &pair->value);
9075             break;
9076         case RISCV_HWPROBE_KEY_MARCHID:
9077             __put_user(cfg->marchid, &pair->value);
9078             break;
9079         case RISCV_HWPROBE_KEY_MIMPID:
9080             __put_user(cfg->mimpid, &pair->value);
9081             break;
9082         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9083             value = riscv_has_ext(env, RVI) &&
9084                     riscv_has_ext(env, RVM) &&
9085                     riscv_has_ext(env, RVA) ?
9086                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9087             __put_user(value, &pair->value);
9088             break;
9089         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9090             value = riscv_has_ext(env, RVF) &&
9091                     riscv_has_ext(env, RVD) ?
9092                     RISCV_HWPROBE_IMA_FD : 0;
9093             value |= riscv_has_ext(env, RVC) ?
9094                      RISCV_HWPROBE_IMA_C : pair->value;
9095             __put_user(value, &pair->value);
9096             break;
9097         case RISCV_HWPROBE_KEY_CPUPERF_0:
9098             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9099             break;
9100         default:
9101             __put_user(-1, &pair->key);
9102             break;
9103         }
9104     }
9105 }
9106 
9107 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9108 {
9109     int ret, i, tmp;
9110     size_t host_mask_size, target_mask_size;
9111     unsigned long *host_mask;
9112 
9113     /*
9114      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9115      * arg3 contains the cpu count.
9116      */
9117     tmp = (8 * sizeof(abi_ulong));
9118     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9119     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9120                      ~(sizeof(*host_mask) - 1);
9121 
9122     host_mask = alloca(host_mask_size);
9123 
9124     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9125                                   arg4, target_mask_size);
9126     if (ret != 0) {
9127         return ret;
9128     }
9129 
9130     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9131         if (host_mask[i] != 0) {
9132             return 0;
9133         }
9134     }
9135     return -TARGET_EINVAL;
9136 }
9137 
9138 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9139                                  abi_long arg2, abi_long arg3,
9140                                  abi_long arg4, abi_long arg5)
9141 {
9142     int ret;
9143     struct riscv_hwprobe *host_pairs;
9144 
9145     /* flags must be 0 */
9146     if (arg5 != 0) {
9147         return -TARGET_EINVAL;
9148     }
9149 
9150     /* check cpu_set */
9151     if (arg3 != 0) {
9152         ret = cpu_set_valid(arg3, arg4);
9153         if (ret != 0) {
9154             return ret;
9155         }
9156     } else if (arg4 != 0) {
9157         return -TARGET_EINVAL;
9158     }
9159 
9160     /* no pairs */
9161     if (arg2 == 0) {
9162         return 0;
9163     }
9164 
9165     host_pairs = lock_user(VERIFY_WRITE, arg1,
9166                            sizeof(*host_pairs) * (size_t)arg2, 0);
9167     if (host_pairs == NULL) {
9168         return -TARGET_EFAULT;
9169     }
9170     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9171     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9172     return 0;
9173 }
9174 #endif /* TARGET_NR_riscv_hwprobe */
9175 
9176 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9177 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9178 #endif
9179 
9180 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9181 #define __NR_sys_open_tree __NR_open_tree
9182 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9183           unsigned int, __flags)
9184 #endif
9185 
9186 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9187 #define __NR_sys_move_mount __NR_move_mount
9188 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9189            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9190 #endif
9191 
9192 /* This is an internal helper for do_syscall so that it is easier
9193  * to have a single return point, so that actions, such as logging
9194  * of syscall results, can be performed.
9195  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9196  */
9197 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9198                             abi_long arg2, abi_long arg3, abi_long arg4,
9199                             abi_long arg5, abi_long arg6, abi_long arg7,
9200                             abi_long arg8)
9201 {
9202     CPUState *cpu = env_cpu(cpu_env);
9203     abi_long ret;
9204 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9205     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9206     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9207     || defined(TARGET_NR_statx)
9208     struct stat st;
9209 #endif
9210 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9211     || defined(TARGET_NR_fstatfs)
9212     struct statfs stfs;
9213 #endif
9214     void *p;
9215 
9216     switch(num) {
9217     case TARGET_NR_exit:
9218         /* In old applications this may be used to implement _exit(2).
9219            However in threaded applications it is used for thread termination,
9220            and _exit_group is used for application termination.
9221            Do thread termination if we have more then one thread.  */
9222 
9223         if (block_signals()) {
9224             return -QEMU_ERESTARTSYS;
9225         }
9226 
9227         pthread_mutex_lock(&clone_lock);
9228 
9229         if (CPU_NEXT(first_cpu)) {
9230             TaskState *ts = cpu->opaque;
9231 
9232             if (ts->child_tidptr) {
9233                 put_user_u32(0, ts->child_tidptr);
9234                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9235                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9236             }
9237 
9238             object_unparent(OBJECT(cpu));
9239             object_unref(OBJECT(cpu));
9240             /*
9241              * At this point the CPU should be unrealized and removed
9242              * from cpu lists. We can clean-up the rest of the thread
9243              * data without the lock held.
9244              */
9245 
9246             pthread_mutex_unlock(&clone_lock);
9247 
9248             thread_cpu = NULL;
9249             g_free(ts);
9250             rcu_unregister_thread();
9251             pthread_exit(NULL);
9252         }
9253 
9254         pthread_mutex_unlock(&clone_lock);
9255         preexit_cleanup(cpu_env, arg1);
9256         _exit(arg1);
9257         return 0; /* avoid warning */
9258     case TARGET_NR_read:
9259         if (arg2 == 0 && arg3 == 0) {
9260             return get_errno(safe_read(arg1, 0, 0));
9261         } else {
9262             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9263                 return -TARGET_EFAULT;
9264             ret = get_errno(safe_read(arg1, p, arg3));
9265             if (ret >= 0 &&
9266                 fd_trans_host_to_target_data(arg1)) {
9267                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9268             }
9269             unlock_user(p, arg2, ret);
9270         }
9271         return ret;
9272     case TARGET_NR_write:
9273         if (arg2 == 0 && arg3 == 0) {
9274             return get_errno(safe_write(arg1, 0, 0));
9275         }
9276         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9277             return -TARGET_EFAULT;
9278         if (fd_trans_target_to_host_data(arg1)) {
9279             void *copy = g_malloc(arg3);
9280             memcpy(copy, p, arg3);
9281             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9282             if (ret >= 0) {
9283                 ret = get_errno(safe_write(arg1, copy, ret));
9284             }
9285             g_free(copy);
9286         } else {
9287             ret = get_errno(safe_write(arg1, p, arg3));
9288         }
9289         unlock_user(p, arg2, 0);
9290         return ret;
9291 
9292 #ifdef TARGET_NR_open
9293     case TARGET_NR_open:
9294         if (!(p = lock_user_string(arg1)))
9295             return -TARGET_EFAULT;
9296         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9297                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9298                                   arg3, true));
9299         fd_trans_unregister(ret);
9300         unlock_user(p, arg1, 0);
9301         return ret;
9302 #endif
9303     case TARGET_NR_openat:
9304         if (!(p = lock_user_string(arg2)))
9305             return -TARGET_EFAULT;
9306         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9307                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9308                                   arg4, true));
9309         fd_trans_unregister(ret);
9310         unlock_user(p, arg2, 0);
9311         return ret;
9312 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9313     case TARGET_NR_name_to_handle_at:
9314         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9315         return ret;
9316 #endif
9317 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9318     case TARGET_NR_open_by_handle_at:
9319         ret = do_open_by_handle_at(arg1, arg2, arg3);
9320         fd_trans_unregister(ret);
9321         return ret;
9322 #endif
9323 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9324     case TARGET_NR_pidfd_open:
9325         return get_errno(pidfd_open(arg1, arg2));
9326 #endif
9327 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9328     case TARGET_NR_pidfd_send_signal:
9329         {
9330             siginfo_t uinfo, *puinfo;
9331 
9332             if (arg3) {
9333                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9334                 if (!p) {
9335                     return -TARGET_EFAULT;
9336                  }
9337                  target_to_host_siginfo(&uinfo, p);
9338                  unlock_user(p, arg3, 0);
9339                  puinfo = &uinfo;
9340             } else {
9341                  puinfo = NULL;
9342             }
9343             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9344                                               puinfo, arg4));
9345         }
9346         return ret;
9347 #endif
9348 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9349     case TARGET_NR_pidfd_getfd:
9350         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9351 #endif
9352     case TARGET_NR_close:
9353         fd_trans_unregister(arg1);
9354         return get_errno(close(arg1));
9355 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9356     case TARGET_NR_close_range:
9357         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9358         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9359             abi_long fd, maxfd;
9360             maxfd = MIN(arg2, target_fd_max);
9361             for (fd = arg1; fd < maxfd; fd++) {
9362                 fd_trans_unregister(fd);
9363             }
9364         }
9365         return ret;
9366 #endif
9367 
9368     case TARGET_NR_brk:
9369         return do_brk(arg1);
9370 #ifdef TARGET_NR_fork
9371     case TARGET_NR_fork:
9372         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9373 #endif
9374 #ifdef TARGET_NR_waitpid
9375     case TARGET_NR_waitpid:
9376         {
9377             int status;
9378             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9379             if (!is_error(ret) && arg2 && ret
9380                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9381                 return -TARGET_EFAULT;
9382         }
9383         return ret;
9384 #endif
9385 #ifdef TARGET_NR_waitid
9386     case TARGET_NR_waitid:
9387         {
9388             siginfo_t info;
9389             info.si_pid = 0;
9390             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9391             if (!is_error(ret) && arg3 && info.si_pid != 0) {
9392                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9393                     return -TARGET_EFAULT;
9394                 host_to_target_siginfo(p, &info);
9395                 unlock_user(p, arg3, sizeof(target_siginfo_t));
9396             }
9397         }
9398         return ret;
9399 #endif
9400 #ifdef TARGET_NR_creat /* not on alpha */
9401     case TARGET_NR_creat:
9402         if (!(p = lock_user_string(arg1)))
9403             return -TARGET_EFAULT;
9404         ret = get_errno(creat(p, arg2));
9405         fd_trans_unregister(ret);
9406         unlock_user(p, arg1, 0);
9407         return ret;
9408 #endif
9409 #ifdef TARGET_NR_link
9410     case TARGET_NR_link:
9411         {
9412             void * p2;
9413             p = lock_user_string(arg1);
9414             p2 = lock_user_string(arg2);
9415             if (!p || !p2)
9416                 ret = -TARGET_EFAULT;
9417             else
9418                 ret = get_errno(link(p, p2));
9419             unlock_user(p2, arg2, 0);
9420             unlock_user(p, arg1, 0);
9421         }
9422         return ret;
9423 #endif
9424 #if defined(TARGET_NR_linkat)
9425     case TARGET_NR_linkat:
9426         {
9427             void * p2 = NULL;
9428             if (!arg2 || !arg4)
9429                 return -TARGET_EFAULT;
9430             p  = lock_user_string(arg2);
9431             p2 = lock_user_string(arg4);
9432             if (!p || !p2)
9433                 ret = -TARGET_EFAULT;
9434             else
9435                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9436             unlock_user(p, arg2, 0);
9437             unlock_user(p2, arg4, 0);
9438         }
9439         return ret;
9440 #endif
9441 #ifdef TARGET_NR_unlink
9442     case TARGET_NR_unlink:
9443         if (!(p = lock_user_string(arg1)))
9444             return -TARGET_EFAULT;
9445         ret = get_errno(unlink(p));
9446         unlock_user(p, arg1, 0);
9447         return ret;
9448 #endif
9449 #if defined(TARGET_NR_unlinkat)
9450     case TARGET_NR_unlinkat:
9451         if (!(p = lock_user_string(arg2)))
9452             return -TARGET_EFAULT;
9453         ret = get_errno(unlinkat(arg1, p, arg3));
9454         unlock_user(p, arg2, 0);
9455         return ret;
9456 #endif
9457     case TARGET_NR_execveat:
9458         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9459     case TARGET_NR_execve:
9460         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9461     case TARGET_NR_chdir:
9462         if (!(p = lock_user_string(arg1)))
9463             return -TARGET_EFAULT;
9464         ret = get_errno(chdir(p));
9465         unlock_user(p, arg1, 0);
9466         return ret;
9467 #ifdef TARGET_NR_time
9468     case TARGET_NR_time:
9469         {
9470             time_t host_time;
9471             ret = get_errno(time(&host_time));
9472             if (!is_error(ret)
9473                 && arg1
9474                 && put_user_sal(host_time, arg1))
9475                 return -TARGET_EFAULT;
9476         }
9477         return ret;
9478 #endif
9479 #ifdef TARGET_NR_mknod
9480     case TARGET_NR_mknod:
9481         if (!(p = lock_user_string(arg1)))
9482             return -TARGET_EFAULT;
9483         ret = get_errno(mknod(p, arg2, arg3));
9484         unlock_user(p, arg1, 0);
9485         return ret;
9486 #endif
9487 #if defined(TARGET_NR_mknodat)
9488     case TARGET_NR_mknodat:
9489         if (!(p = lock_user_string(arg2)))
9490             return -TARGET_EFAULT;
9491         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9492         unlock_user(p, arg2, 0);
9493         return ret;
9494 #endif
9495 #ifdef TARGET_NR_chmod
9496     case TARGET_NR_chmod:
9497         if (!(p = lock_user_string(arg1)))
9498             return -TARGET_EFAULT;
9499         ret = get_errno(chmod(p, arg2));
9500         unlock_user(p, arg1, 0);
9501         return ret;
9502 #endif
9503 #ifdef TARGET_NR_lseek
9504     case TARGET_NR_lseek:
9505         return get_errno(lseek(arg1, arg2, arg3));
9506 #endif
9507 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9508     /* Alpha specific */
9509     case TARGET_NR_getxpid:
9510         cpu_env->ir[IR_A4] = getppid();
9511         return get_errno(getpid());
9512 #endif
9513 #ifdef TARGET_NR_getpid
9514     case TARGET_NR_getpid:
9515         return get_errno(getpid());
9516 #endif
9517     case TARGET_NR_mount:
9518         {
9519             /* need to look at the data field */
9520             void *p2, *p3;
9521 
9522             if (arg1) {
9523                 p = lock_user_string(arg1);
9524                 if (!p) {
9525                     return -TARGET_EFAULT;
9526                 }
9527             } else {
9528                 p = NULL;
9529             }
9530 
9531             p2 = lock_user_string(arg2);
9532             if (!p2) {
9533                 if (arg1) {
9534                     unlock_user(p, arg1, 0);
9535                 }
9536                 return -TARGET_EFAULT;
9537             }
9538 
9539             if (arg3) {
9540                 p3 = lock_user_string(arg3);
9541                 if (!p3) {
9542                     if (arg1) {
9543                         unlock_user(p, arg1, 0);
9544                     }
9545                     unlock_user(p2, arg2, 0);
9546                     return -TARGET_EFAULT;
9547                 }
9548             } else {
9549                 p3 = NULL;
9550             }
9551 
9552             /* FIXME - arg5 should be locked, but it isn't clear how to
9553              * do that since it's not guaranteed to be a NULL-terminated
9554              * string.
9555              */
9556             if (!arg5) {
9557                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9558             } else {
9559                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9560             }
9561             ret = get_errno(ret);
9562 
9563             if (arg1) {
9564                 unlock_user(p, arg1, 0);
9565             }
9566             unlock_user(p2, arg2, 0);
9567             if (arg3) {
9568                 unlock_user(p3, arg3, 0);
9569             }
9570         }
9571         return ret;
9572 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9573 #if defined(TARGET_NR_umount)
9574     case TARGET_NR_umount:
9575 #endif
9576 #if defined(TARGET_NR_oldumount)
9577     case TARGET_NR_oldumount:
9578 #endif
9579         if (!(p = lock_user_string(arg1)))
9580             return -TARGET_EFAULT;
9581         ret = get_errno(umount(p));
9582         unlock_user(p, arg1, 0);
9583         return ret;
9584 #endif
9585 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9586     case TARGET_NR_move_mount:
9587         {
9588             void *p2, *p4;
9589 
9590             if (!arg2 || !arg4) {
9591                 return -TARGET_EFAULT;
9592             }
9593 
9594             p2 = lock_user_string(arg2);
9595             if (!p2) {
9596                 return -TARGET_EFAULT;
9597             }
9598 
9599             p4 = lock_user_string(arg4);
9600             if (!p4) {
9601                 unlock_user(p2, arg2, 0);
9602                 return -TARGET_EFAULT;
9603             }
9604             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9605 
9606             unlock_user(p2, arg2, 0);
9607             unlock_user(p4, arg4, 0);
9608 
9609             return ret;
9610         }
9611 #endif
9612 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9613     case TARGET_NR_open_tree:
9614         {
9615             void *p2;
9616             int host_flags;
9617 
9618             if (!arg2) {
9619                 return -TARGET_EFAULT;
9620             }
9621 
9622             p2 = lock_user_string(arg2);
9623             if (!p2) {
9624                 return -TARGET_EFAULT;
9625             }
9626 
9627             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9628             if (arg3 & TARGET_O_CLOEXEC) {
9629                 host_flags |= O_CLOEXEC;
9630             }
9631 
9632             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9633 
9634             unlock_user(p2, arg2, 0);
9635 
9636             return ret;
9637         }
9638 #endif
9639 #ifdef TARGET_NR_stime /* not on alpha */
9640     case TARGET_NR_stime:
9641         {
9642             struct timespec ts;
9643             ts.tv_nsec = 0;
9644             if (get_user_sal(ts.tv_sec, arg1)) {
9645                 return -TARGET_EFAULT;
9646             }
9647             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9648         }
9649 #endif
9650 #ifdef TARGET_NR_alarm /* not on alpha */
9651     case TARGET_NR_alarm:
9652         return alarm(arg1);
9653 #endif
9654 #ifdef TARGET_NR_pause /* not on alpha */
9655     case TARGET_NR_pause:
9656         if (!block_signals()) {
9657             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9658         }
9659         return -TARGET_EINTR;
9660 #endif
9661 #ifdef TARGET_NR_utime
9662     case TARGET_NR_utime:
9663         {
9664             struct utimbuf tbuf, *host_tbuf;
9665             struct target_utimbuf *target_tbuf;
9666             if (arg2) {
9667                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9668                     return -TARGET_EFAULT;
9669                 tbuf.actime = tswapal(target_tbuf->actime);
9670                 tbuf.modtime = tswapal(target_tbuf->modtime);
9671                 unlock_user_struct(target_tbuf, arg2, 0);
9672                 host_tbuf = &tbuf;
9673             } else {
9674                 host_tbuf = NULL;
9675             }
9676             if (!(p = lock_user_string(arg1)))
9677                 return -TARGET_EFAULT;
9678             ret = get_errno(utime(p, host_tbuf));
9679             unlock_user(p, arg1, 0);
9680         }
9681         return ret;
9682 #endif
9683 #ifdef TARGET_NR_utimes
9684     case TARGET_NR_utimes:
9685         {
9686             struct timeval *tvp, tv[2];
9687             if (arg2) {
9688                 if (copy_from_user_timeval(&tv[0], arg2)
9689                     || copy_from_user_timeval(&tv[1],
9690                                               arg2 + sizeof(struct target_timeval)))
9691                     return -TARGET_EFAULT;
9692                 tvp = tv;
9693             } else {
9694                 tvp = NULL;
9695             }
9696             if (!(p = lock_user_string(arg1)))
9697                 return -TARGET_EFAULT;
9698             ret = get_errno(utimes(p, tvp));
9699             unlock_user(p, arg1, 0);
9700         }
9701         return ret;
9702 #endif
9703 #if defined(TARGET_NR_futimesat)
9704     case TARGET_NR_futimesat:
9705         {
9706             struct timeval *tvp, tv[2];
9707             if (arg3) {
9708                 if (copy_from_user_timeval(&tv[0], arg3)
9709                     || copy_from_user_timeval(&tv[1],
9710                                               arg3 + sizeof(struct target_timeval)))
9711                     return -TARGET_EFAULT;
9712                 tvp = tv;
9713             } else {
9714                 tvp = NULL;
9715             }
9716             if (!(p = lock_user_string(arg2))) {
9717                 return -TARGET_EFAULT;
9718             }
9719             ret = get_errno(futimesat(arg1, path(p), tvp));
9720             unlock_user(p, arg2, 0);
9721         }
9722         return ret;
9723 #endif
9724 #ifdef TARGET_NR_access
9725     case TARGET_NR_access:
9726         if (!(p = lock_user_string(arg1))) {
9727             return -TARGET_EFAULT;
9728         }
9729         ret = get_errno(access(path(p), arg2));
9730         unlock_user(p, arg1, 0);
9731         return ret;
9732 #endif
9733 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9734     case TARGET_NR_faccessat:
9735         if (!(p = lock_user_string(arg2))) {
9736             return -TARGET_EFAULT;
9737         }
9738         ret = get_errno(faccessat(arg1, p, arg3, 0));
9739         unlock_user(p, arg2, 0);
9740         return ret;
9741 #endif
9742 #if defined(TARGET_NR_faccessat2)
9743     case TARGET_NR_faccessat2:
9744         if (!(p = lock_user_string(arg2))) {
9745             return -TARGET_EFAULT;
9746         }
9747         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9748         unlock_user(p, arg2, 0);
9749         return ret;
9750 #endif
9751 #ifdef TARGET_NR_nice /* not on alpha */
9752     case TARGET_NR_nice:
9753         return get_errno(nice(arg1));
9754 #endif
9755     case TARGET_NR_sync:
9756         sync();
9757         return 0;
9758 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9759     case TARGET_NR_syncfs:
9760         return get_errno(syncfs(arg1));
9761 #endif
9762     case TARGET_NR_kill:
9763         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9764 #ifdef TARGET_NR_rename
9765     case TARGET_NR_rename:
9766         {
9767             void *p2;
9768             p = lock_user_string(arg1);
9769             p2 = lock_user_string(arg2);
9770             if (!p || !p2)
9771                 ret = -TARGET_EFAULT;
9772             else
9773                 ret = get_errno(rename(p, p2));
9774             unlock_user(p2, arg2, 0);
9775             unlock_user(p, arg1, 0);
9776         }
9777         return ret;
9778 #endif
9779 #if defined(TARGET_NR_renameat)
9780     case TARGET_NR_renameat:
9781         {
9782             void *p2;
9783             p  = lock_user_string(arg2);
9784             p2 = lock_user_string(arg4);
9785             if (!p || !p2)
9786                 ret = -TARGET_EFAULT;
9787             else
9788                 ret = get_errno(renameat(arg1, p, arg3, p2));
9789             unlock_user(p2, arg4, 0);
9790             unlock_user(p, arg2, 0);
9791         }
9792         return ret;
9793 #endif
9794 #if defined(TARGET_NR_renameat2)
9795     case TARGET_NR_renameat2:
9796         {
9797             void *p2;
9798             p  = lock_user_string(arg2);
9799             p2 = lock_user_string(arg4);
9800             if (!p || !p2) {
9801                 ret = -TARGET_EFAULT;
9802             } else {
9803                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9804             }
9805             unlock_user(p2, arg4, 0);
9806             unlock_user(p, arg2, 0);
9807         }
9808         return ret;
9809 #endif
9810 #ifdef TARGET_NR_mkdir
9811     case TARGET_NR_mkdir:
9812         if (!(p = lock_user_string(arg1)))
9813             return -TARGET_EFAULT;
9814         ret = get_errno(mkdir(p, arg2));
9815         unlock_user(p, arg1, 0);
9816         return ret;
9817 #endif
9818 #if defined(TARGET_NR_mkdirat)
9819     case TARGET_NR_mkdirat:
9820         if (!(p = lock_user_string(arg2)))
9821             return -TARGET_EFAULT;
9822         ret = get_errno(mkdirat(arg1, p, arg3));
9823         unlock_user(p, arg2, 0);
9824         return ret;
9825 #endif
9826 #ifdef TARGET_NR_rmdir
9827     case TARGET_NR_rmdir:
9828         if (!(p = lock_user_string(arg1)))
9829             return -TARGET_EFAULT;
9830         ret = get_errno(rmdir(p));
9831         unlock_user(p, arg1, 0);
9832         return ret;
9833 #endif
9834     case TARGET_NR_dup:
9835         ret = get_errno(dup(arg1));
9836         if (ret >= 0) {
9837             fd_trans_dup(arg1, ret);
9838         }
9839         return ret;
9840 #ifdef TARGET_NR_pipe
9841     case TARGET_NR_pipe:
9842         return do_pipe(cpu_env, arg1, 0, 0);
9843 #endif
9844 #ifdef TARGET_NR_pipe2
9845     case TARGET_NR_pipe2:
9846         return do_pipe(cpu_env, arg1,
9847                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9848 #endif
9849     case TARGET_NR_times:
9850         {
9851             struct target_tms *tmsp;
9852             struct tms tms;
9853             ret = get_errno(times(&tms));
9854             if (arg1) {
9855                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9856                 if (!tmsp)
9857                     return -TARGET_EFAULT;
9858                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9859                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9860                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9861                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9862             }
9863             if (!is_error(ret))
9864                 ret = host_to_target_clock_t(ret);
9865         }
9866         return ret;
9867     case TARGET_NR_acct:
9868         if (arg1 == 0) {
9869             ret = get_errno(acct(NULL));
9870         } else {
9871             if (!(p = lock_user_string(arg1))) {
9872                 return -TARGET_EFAULT;
9873             }
9874             ret = get_errno(acct(path(p)));
9875             unlock_user(p, arg1, 0);
9876         }
9877         return ret;
9878 #ifdef TARGET_NR_umount2
9879     case TARGET_NR_umount2:
9880         if (!(p = lock_user_string(arg1)))
9881             return -TARGET_EFAULT;
9882         ret = get_errno(umount2(p, arg2));
9883         unlock_user(p, arg1, 0);
9884         return ret;
9885 #endif
9886     case TARGET_NR_ioctl:
9887         return do_ioctl(arg1, arg2, arg3);
9888 #ifdef TARGET_NR_fcntl
9889     case TARGET_NR_fcntl:
9890         return do_fcntl(arg1, arg2, arg3);
9891 #endif
9892     case TARGET_NR_setpgid:
9893         return get_errno(setpgid(arg1, arg2));
9894     case TARGET_NR_umask:
9895         return get_errno(umask(arg1));
9896     case TARGET_NR_chroot:
9897         if (!(p = lock_user_string(arg1)))
9898             return -TARGET_EFAULT;
9899         ret = get_errno(chroot(p));
9900         unlock_user(p, arg1, 0);
9901         return ret;
9902 #ifdef TARGET_NR_dup2
9903     case TARGET_NR_dup2:
9904         ret = get_errno(dup2(arg1, arg2));
9905         if (ret >= 0) {
9906             fd_trans_dup(arg1, arg2);
9907         }
9908         return ret;
9909 #endif
9910 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9911     case TARGET_NR_dup3:
9912     {
9913         int host_flags;
9914 
9915         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9916             return -EINVAL;
9917         }
9918         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9919         ret = get_errno(dup3(arg1, arg2, host_flags));
9920         if (ret >= 0) {
9921             fd_trans_dup(arg1, arg2);
9922         }
9923         return ret;
9924     }
9925 #endif
9926 #ifdef TARGET_NR_getppid /* not on alpha */
9927     case TARGET_NR_getppid:
9928         return get_errno(getppid());
9929 #endif
9930 #ifdef TARGET_NR_getpgrp
9931     case TARGET_NR_getpgrp:
9932         return get_errno(getpgrp());
9933 #endif
9934     case TARGET_NR_setsid:
9935         return get_errno(setsid());
9936 #ifdef TARGET_NR_sigaction
9937     case TARGET_NR_sigaction:
9938         {
9939 #if defined(TARGET_MIPS)
9940 	    struct target_sigaction act, oact, *pact, *old_act;
9941 
9942 	    if (arg2) {
9943                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9944                     return -TARGET_EFAULT;
9945 		act._sa_handler = old_act->_sa_handler;
9946 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9947 		act.sa_flags = old_act->sa_flags;
9948 		unlock_user_struct(old_act, arg2, 0);
9949 		pact = &act;
9950 	    } else {
9951 		pact = NULL;
9952 	    }
9953 
9954         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9955 
9956 	    if (!is_error(ret) && arg3) {
9957                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9958                     return -TARGET_EFAULT;
9959 		old_act->_sa_handler = oact._sa_handler;
9960 		old_act->sa_flags = oact.sa_flags;
9961 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9962 		old_act->sa_mask.sig[1] = 0;
9963 		old_act->sa_mask.sig[2] = 0;
9964 		old_act->sa_mask.sig[3] = 0;
9965 		unlock_user_struct(old_act, arg3, 1);
9966 	    }
9967 #else
9968             struct target_old_sigaction *old_act;
9969             struct target_sigaction act, oact, *pact;
9970             if (arg2) {
9971                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9972                     return -TARGET_EFAULT;
9973                 act._sa_handler = old_act->_sa_handler;
9974                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9975                 act.sa_flags = old_act->sa_flags;
9976 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9977                 act.sa_restorer = old_act->sa_restorer;
9978 #endif
9979                 unlock_user_struct(old_act, arg2, 0);
9980                 pact = &act;
9981             } else {
9982                 pact = NULL;
9983             }
9984             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9985             if (!is_error(ret) && arg3) {
9986                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9987                     return -TARGET_EFAULT;
9988                 old_act->_sa_handler = oact._sa_handler;
9989                 old_act->sa_mask = oact.sa_mask.sig[0];
9990                 old_act->sa_flags = oact.sa_flags;
9991 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9992                 old_act->sa_restorer = oact.sa_restorer;
9993 #endif
9994                 unlock_user_struct(old_act, arg3, 1);
9995             }
9996 #endif
9997         }
9998         return ret;
9999 #endif
10000     case TARGET_NR_rt_sigaction:
10001         {
10002             /*
10003              * For Alpha and SPARC this is a 5 argument syscall, with
10004              * a 'restorer' parameter which must be copied into the
10005              * sa_restorer field of the sigaction struct.
10006              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
10007              * and arg5 is the sigsetsize.
10008              */
10009 #if defined(TARGET_ALPHA)
10010             target_ulong sigsetsize = arg4;
10011             target_ulong restorer = arg5;
10012 #elif defined(TARGET_SPARC)
10013             target_ulong restorer = arg4;
10014             target_ulong sigsetsize = arg5;
10015 #else
10016             target_ulong sigsetsize = arg4;
10017             target_ulong restorer = 0;
10018 #endif
10019             struct target_sigaction *act = NULL;
10020             struct target_sigaction *oact = NULL;
10021 
10022             if (sigsetsize != sizeof(target_sigset_t)) {
10023                 return -TARGET_EINVAL;
10024             }
10025             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
10026                 return -TARGET_EFAULT;
10027             }
10028             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
10029                 ret = -TARGET_EFAULT;
10030             } else {
10031                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
10032                 if (oact) {
10033                     unlock_user_struct(oact, arg3, 1);
10034                 }
10035             }
10036             if (act) {
10037                 unlock_user_struct(act, arg2, 0);
10038             }
10039         }
10040         return ret;
10041 #ifdef TARGET_NR_sgetmask /* not on alpha */
10042     case TARGET_NR_sgetmask:
10043         {
10044             sigset_t cur_set;
10045             abi_ulong target_set;
10046             ret = do_sigprocmask(0, NULL, &cur_set);
10047             if (!ret) {
10048                 host_to_target_old_sigset(&target_set, &cur_set);
10049                 ret = target_set;
10050             }
10051         }
10052         return ret;
10053 #endif
10054 #ifdef TARGET_NR_ssetmask /* not on alpha */
10055     case TARGET_NR_ssetmask:
10056         {
10057             sigset_t set, oset;
10058             abi_ulong target_set = arg1;
10059             target_to_host_old_sigset(&set, &target_set);
10060             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10061             if (!ret) {
10062                 host_to_target_old_sigset(&target_set, &oset);
10063                 ret = target_set;
10064             }
10065         }
10066         return ret;
10067 #endif
10068 #ifdef TARGET_NR_sigprocmask
10069     case TARGET_NR_sigprocmask:
10070         {
10071 #if defined(TARGET_ALPHA)
10072             sigset_t set, oldset;
10073             abi_ulong mask;
10074             int how;
10075 
10076             switch (arg1) {
10077             case TARGET_SIG_BLOCK:
10078                 how = SIG_BLOCK;
10079                 break;
10080             case TARGET_SIG_UNBLOCK:
10081                 how = SIG_UNBLOCK;
10082                 break;
10083             case TARGET_SIG_SETMASK:
10084                 how = SIG_SETMASK;
10085                 break;
10086             default:
10087                 return -TARGET_EINVAL;
10088             }
10089             mask = arg2;
10090             target_to_host_old_sigset(&set, &mask);
10091 
10092             ret = do_sigprocmask(how, &set, &oldset);
10093             if (!is_error(ret)) {
10094                 host_to_target_old_sigset(&mask, &oldset);
10095                 ret = mask;
10096                 cpu_env->ir[IR_V0] = 0; /* force no error */
10097             }
10098 #else
10099             sigset_t set, oldset, *set_ptr;
10100             int how;
10101 
10102             if (arg2) {
10103                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10104                 if (!p) {
10105                     return -TARGET_EFAULT;
10106                 }
10107                 target_to_host_old_sigset(&set, p);
10108                 unlock_user(p, arg2, 0);
10109                 set_ptr = &set;
10110                 switch (arg1) {
10111                 case TARGET_SIG_BLOCK:
10112                     how = SIG_BLOCK;
10113                     break;
10114                 case TARGET_SIG_UNBLOCK:
10115                     how = SIG_UNBLOCK;
10116                     break;
10117                 case TARGET_SIG_SETMASK:
10118                     how = SIG_SETMASK;
10119                     break;
10120                 default:
10121                     return -TARGET_EINVAL;
10122                 }
10123             } else {
10124                 how = 0;
10125                 set_ptr = NULL;
10126             }
10127             ret = do_sigprocmask(how, set_ptr, &oldset);
10128             if (!is_error(ret) && arg3) {
10129                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10130                     return -TARGET_EFAULT;
10131                 host_to_target_old_sigset(p, &oldset);
10132                 unlock_user(p, arg3, sizeof(target_sigset_t));
10133             }
10134 #endif
10135         }
10136         return ret;
10137 #endif
10138     case TARGET_NR_rt_sigprocmask:
10139         {
10140             int how = arg1;
10141             sigset_t set, oldset, *set_ptr;
10142 
10143             if (arg4 != sizeof(target_sigset_t)) {
10144                 return -TARGET_EINVAL;
10145             }
10146 
10147             if (arg2) {
10148                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10149                 if (!p) {
10150                     return -TARGET_EFAULT;
10151                 }
10152                 target_to_host_sigset(&set, p);
10153                 unlock_user(p, arg2, 0);
10154                 set_ptr = &set;
10155                 switch(how) {
10156                 case TARGET_SIG_BLOCK:
10157                     how = SIG_BLOCK;
10158                     break;
10159                 case TARGET_SIG_UNBLOCK:
10160                     how = SIG_UNBLOCK;
10161                     break;
10162                 case TARGET_SIG_SETMASK:
10163                     how = SIG_SETMASK;
10164                     break;
10165                 default:
10166                     return -TARGET_EINVAL;
10167                 }
10168             } else {
10169                 how = 0;
10170                 set_ptr = NULL;
10171             }
10172             ret = do_sigprocmask(how, set_ptr, &oldset);
10173             if (!is_error(ret) && arg3) {
10174                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10175                     return -TARGET_EFAULT;
10176                 host_to_target_sigset(p, &oldset);
10177                 unlock_user(p, arg3, sizeof(target_sigset_t));
10178             }
10179         }
10180         return ret;
10181 #ifdef TARGET_NR_sigpending
10182     case TARGET_NR_sigpending:
10183         {
10184             sigset_t set;
10185             ret = get_errno(sigpending(&set));
10186             if (!is_error(ret)) {
10187                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10188                     return -TARGET_EFAULT;
10189                 host_to_target_old_sigset(p, &set);
10190                 unlock_user(p, arg1, sizeof(target_sigset_t));
10191             }
10192         }
10193         return ret;
10194 #endif
10195     case TARGET_NR_rt_sigpending:
10196         {
10197             sigset_t set;
10198 
10199             /* Yes, this check is >, not != like most. We follow the kernel's
10200              * logic and it does it like this because it implements
10201              * NR_sigpending through the same code path, and in that case
10202              * the old_sigset_t is smaller in size.
10203              */
10204             if (arg2 > sizeof(target_sigset_t)) {
10205                 return -TARGET_EINVAL;
10206             }
10207 
10208             ret = get_errno(sigpending(&set));
10209             if (!is_error(ret)) {
10210                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10211                     return -TARGET_EFAULT;
10212                 host_to_target_sigset(p, &set);
10213                 unlock_user(p, arg1, sizeof(target_sigset_t));
10214             }
10215         }
10216         return ret;
10217 #ifdef TARGET_NR_sigsuspend
10218     case TARGET_NR_sigsuspend:
10219         {
10220             sigset_t *set;
10221 
10222 #if defined(TARGET_ALPHA)
10223             TaskState *ts = cpu->opaque;
10224             /* target_to_host_old_sigset will bswap back */
10225             abi_ulong mask = tswapal(arg1);
10226             set = &ts->sigsuspend_mask;
10227             target_to_host_old_sigset(set, &mask);
10228 #else
10229             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10230             if (ret != 0) {
10231                 return ret;
10232             }
10233 #endif
10234             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10235             finish_sigsuspend_mask(ret);
10236         }
10237         return ret;
10238 #endif
10239     case TARGET_NR_rt_sigsuspend:
10240         {
10241             sigset_t *set;
10242 
10243             ret = process_sigsuspend_mask(&set, arg1, arg2);
10244             if (ret != 0) {
10245                 return ret;
10246             }
10247             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10248             finish_sigsuspend_mask(ret);
10249         }
10250         return ret;
10251 #ifdef TARGET_NR_rt_sigtimedwait
10252     case TARGET_NR_rt_sigtimedwait:
10253         {
10254             sigset_t set;
10255             struct timespec uts, *puts;
10256             siginfo_t uinfo;
10257 
10258             if (arg4 != sizeof(target_sigset_t)) {
10259                 return -TARGET_EINVAL;
10260             }
10261 
10262             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10263                 return -TARGET_EFAULT;
10264             target_to_host_sigset(&set, p);
10265             unlock_user(p, arg1, 0);
10266             if (arg3) {
10267                 puts = &uts;
10268                 if (target_to_host_timespec(puts, arg3)) {
10269                     return -TARGET_EFAULT;
10270                 }
10271             } else {
10272                 puts = NULL;
10273             }
10274             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10275                                                  SIGSET_T_SIZE));
10276             if (!is_error(ret)) {
10277                 if (arg2) {
10278                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10279                                   0);
10280                     if (!p) {
10281                         return -TARGET_EFAULT;
10282                     }
10283                     host_to_target_siginfo(p, &uinfo);
10284                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10285                 }
10286                 ret = host_to_target_signal(ret);
10287             }
10288         }
10289         return ret;
10290 #endif
10291 #ifdef TARGET_NR_rt_sigtimedwait_time64
10292     case TARGET_NR_rt_sigtimedwait_time64:
10293         {
10294             sigset_t set;
10295             struct timespec uts, *puts;
10296             siginfo_t uinfo;
10297 
10298             if (arg4 != sizeof(target_sigset_t)) {
10299                 return -TARGET_EINVAL;
10300             }
10301 
10302             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10303             if (!p) {
10304                 return -TARGET_EFAULT;
10305             }
10306             target_to_host_sigset(&set, p);
10307             unlock_user(p, arg1, 0);
10308             if (arg3) {
10309                 puts = &uts;
10310                 if (target_to_host_timespec64(puts, arg3)) {
10311                     return -TARGET_EFAULT;
10312                 }
10313             } else {
10314                 puts = NULL;
10315             }
10316             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10317                                                  SIGSET_T_SIZE));
10318             if (!is_error(ret)) {
10319                 if (arg2) {
10320                     p = lock_user(VERIFY_WRITE, arg2,
10321                                   sizeof(target_siginfo_t), 0);
10322                     if (!p) {
10323                         return -TARGET_EFAULT;
10324                     }
10325                     host_to_target_siginfo(p, &uinfo);
10326                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10327                 }
10328                 ret = host_to_target_signal(ret);
10329             }
10330         }
10331         return ret;
10332 #endif
10333     case TARGET_NR_rt_sigqueueinfo:
10334         {
10335             siginfo_t uinfo;
10336 
10337             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10338             if (!p) {
10339                 return -TARGET_EFAULT;
10340             }
10341             target_to_host_siginfo(&uinfo, p);
10342             unlock_user(p, arg3, 0);
10343             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10344         }
10345         return ret;
10346     case TARGET_NR_rt_tgsigqueueinfo:
10347         {
10348             siginfo_t uinfo;
10349 
10350             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10351             if (!p) {
10352                 return -TARGET_EFAULT;
10353             }
10354             target_to_host_siginfo(&uinfo, p);
10355             unlock_user(p, arg4, 0);
10356             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10357         }
10358         return ret;
10359 #ifdef TARGET_NR_sigreturn
10360     case TARGET_NR_sigreturn:
10361         if (block_signals()) {
10362             return -QEMU_ERESTARTSYS;
10363         }
10364         return do_sigreturn(cpu_env);
10365 #endif
10366     case TARGET_NR_rt_sigreturn:
10367         if (block_signals()) {
10368             return -QEMU_ERESTARTSYS;
10369         }
10370         return do_rt_sigreturn(cpu_env);
10371     case TARGET_NR_sethostname:
10372         if (!(p = lock_user_string(arg1)))
10373             return -TARGET_EFAULT;
10374         ret = get_errno(sethostname(p, arg2));
10375         unlock_user(p, arg1, 0);
10376         return ret;
10377 #ifdef TARGET_NR_setrlimit
10378     case TARGET_NR_setrlimit:
10379         {
10380             int resource = target_to_host_resource(arg1);
10381             struct target_rlimit *target_rlim;
10382             struct rlimit rlim;
10383             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10384                 return -TARGET_EFAULT;
10385             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10386             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10387             unlock_user_struct(target_rlim, arg2, 0);
10388             /*
10389              * If we just passed through resource limit settings for memory then
10390              * they would also apply to QEMU's own allocations, and QEMU will
10391              * crash or hang or die if its allocations fail. Ideally we would
10392              * track the guest allocations in QEMU and apply the limits ourselves.
10393              * For now, just tell the guest the call succeeded but don't actually
10394              * limit anything.
10395              */
10396             if (resource != RLIMIT_AS &&
10397                 resource != RLIMIT_DATA &&
10398                 resource != RLIMIT_STACK) {
10399                 return get_errno(setrlimit(resource, &rlim));
10400             } else {
10401                 return 0;
10402             }
10403         }
10404 #endif
10405 #ifdef TARGET_NR_getrlimit
10406     case TARGET_NR_getrlimit:
10407         {
10408             int resource = target_to_host_resource(arg1);
10409             struct target_rlimit *target_rlim;
10410             struct rlimit rlim;
10411 
10412             ret = get_errno(getrlimit(resource, &rlim));
10413             if (!is_error(ret)) {
10414                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10415                     return -TARGET_EFAULT;
10416                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10417                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10418                 unlock_user_struct(target_rlim, arg2, 1);
10419             }
10420         }
10421         return ret;
10422 #endif
10423     case TARGET_NR_getrusage:
10424         {
10425             struct rusage rusage;
10426             ret = get_errno(getrusage(arg1, &rusage));
10427             if (!is_error(ret)) {
10428                 ret = host_to_target_rusage(arg2, &rusage);
10429             }
10430         }
10431         return ret;
10432 #if defined(TARGET_NR_gettimeofday)
10433     case TARGET_NR_gettimeofday:
10434         {
10435             struct timeval tv;
10436             struct timezone tz;
10437 
10438             ret = get_errno(gettimeofday(&tv, &tz));
10439             if (!is_error(ret)) {
10440                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10441                     return -TARGET_EFAULT;
10442                 }
10443                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10444                     return -TARGET_EFAULT;
10445                 }
10446             }
10447         }
10448         return ret;
10449 #endif
10450 #if defined(TARGET_NR_settimeofday)
10451     case TARGET_NR_settimeofday:
10452         {
10453             struct timeval tv, *ptv = NULL;
10454             struct timezone tz, *ptz = NULL;
10455 
10456             if (arg1) {
10457                 if (copy_from_user_timeval(&tv, arg1)) {
10458                     return -TARGET_EFAULT;
10459                 }
10460                 ptv = &tv;
10461             }
10462 
10463             if (arg2) {
10464                 if (copy_from_user_timezone(&tz, arg2)) {
10465                     return -TARGET_EFAULT;
10466                 }
10467                 ptz = &tz;
10468             }
10469 
10470             return get_errno(settimeofday(ptv, ptz));
10471         }
10472 #endif
10473 #if defined(TARGET_NR_select)
10474     case TARGET_NR_select:
10475 #if defined(TARGET_WANT_NI_OLD_SELECT)
10476         /* some architectures used to have old_select here
10477          * but now ENOSYS it.
10478          */
10479         ret = -TARGET_ENOSYS;
10480 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10481         ret = do_old_select(arg1);
10482 #else
10483         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10484 #endif
10485         return ret;
10486 #endif
10487 #ifdef TARGET_NR_pselect6
10488     case TARGET_NR_pselect6:
10489         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10490 #endif
10491 #ifdef TARGET_NR_pselect6_time64
10492     case TARGET_NR_pselect6_time64:
10493         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10494 #endif
10495 #ifdef TARGET_NR_symlink
10496     case TARGET_NR_symlink:
10497         {
10498             void *p2;
10499             p = lock_user_string(arg1);
10500             p2 = lock_user_string(arg2);
10501             if (!p || !p2)
10502                 ret = -TARGET_EFAULT;
10503             else
10504                 ret = get_errno(symlink(p, p2));
10505             unlock_user(p2, arg2, 0);
10506             unlock_user(p, arg1, 0);
10507         }
10508         return ret;
10509 #endif
10510 #if defined(TARGET_NR_symlinkat)
10511     case TARGET_NR_symlinkat:
10512         {
10513             void *p2;
10514             p  = lock_user_string(arg1);
10515             p2 = lock_user_string(arg3);
10516             if (!p || !p2)
10517                 ret = -TARGET_EFAULT;
10518             else
10519                 ret = get_errno(symlinkat(p, arg2, p2));
10520             unlock_user(p2, arg3, 0);
10521             unlock_user(p, arg1, 0);
10522         }
10523         return ret;
10524 #endif
10525 #ifdef TARGET_NR_readlink
10526     case TARGET_NR_readlink:
10527         {
10528             void *p2;
10529             p = lock_user_string(arg1);
10530             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10531             ret = get_errno(do_guest_readlink(p, p2, arg3));
10532             unlock_user(p2, arg2, ret);
10533             unlock_user(p, arg1, 0);
10534         }
10535         return ret;
10536 #endif
10537 #if defined(TARGET_NR_readlinkat)
10538     case TARGET_NR_readlinkat:
10539         {
10540             void *p2;
10541             p  = lock_user_string(arg2);
10542             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10543             if (!p || !p2) {
10544                 ret = -TARGET_EFAULT;
10545             } else if (!arg4) {
10546                 /* Short circuit this for the magic exe check. */
10547                 ret = -TARGET_EINVAL;
10548             } else if (is_proc_myself((const char *)p, "exe")) {
10549                 /*
10550                  * Don't worry about sign mismatch as earlier mapping
10551                  * logic would have thrown a bad address error.
10552                  */
10553                 ret = MIN(strlen(exec_path), arg4);
10554                 /* We cannot NUL terminate the string. */
10555                 memcpy(p2, exec_path, ret);
10556             } else {
10557                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10558             }
10559             unlock_user(p2, arg3, ret);
10560             unlock_user(p, arg2, 0);
10561         }
10562         return ret;
10563 #endif
10564 #ifdef TARGET_NR_swapon
10565     case TARGET_NR_swapon:
10566         if (!(p = lock_user_string(arg1)))
10567             return -TARGET_EFAULT;
10568         ret = get_errno(swapon(p, arg2));
10569         unlock_user(p, arg1, 0);
10570         return ret;
10571 #endif
10572     case TARGET_NR_reboot:
10573         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10574            /* arg4 must be ignored in all other cases */
10575            p = lock_user_string(arg4);
10576            if (!p) {
10577                return -TARGET_EFAULT;
10578            }
10579            ret = get_errno(reboot(arg1, arg2, arg3, p));
10580            unlock_user(p, arg4, 0);
10581         } else {
10582            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10583         }
10584         return ret;
10585 #ifdef TARGET_NR_mmap
10586     case TARGET_NR_mmap:
10587 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10588     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10589     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10590     || defined(TARGET_S390X)
10591         {
10592             abi_ulong *v;
10593             abi_ulong v1, v2, v3, v4, v5, v6;
10594             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10595                 return -TARGET_EFAULT;
10596             v1 = tswapal(v[0]);
10597             v2 = tswapal(v[1]);
10598             v3 = tswapal(v[2]);
10599             v4 = tswapal(v[3]);
10600             v5 = tswapal(v[4]);
10601             v6 = tswapal(v[5]);
10602             unlock_user(v, arg1, 0);
10603             return do_mmap(v1, v2, v3, v4, v5, v6);
10604         }
10605 #else
10606         /* mmap pointers are always untagged */
10607         return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
10608 #endif
10609 #endif
10610 #ifdef TARGET_NR_mmap2
10611     case TARGET_NR_mmap2:
10612 #ifndef MMAP_SHIFT
10613 #define MMAP_SHIFT 12
10614 #endif
10615         return do_mmap(arg1, arg2, arg3, arg4, arg5,
10616                        (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10617 #endif
10618     case TARGET_NR_munmap:
10619         arg1 = cpu_untagged_addr(cpu, arg1);
10620         return get_errno(target_munmap(arg1, arg2));
10621     case TARGET_NR_mprotect:
10622         arg1 = cpu_untagged_addr(cpu, arg1);
10623         {
10624             TaskState *ts = cpu->opaque;
10625             /* Special hack to detect libc making the stack executable.  */
10626             if ((arg3 & PROT_GROWSDOWN)
10627                 && arg1 >= ts->info->stack_limit
10628                 && arg1 <= ts->info->start_stack) {
10629                 arg3 &= ~PROT_GROWSDOWN;
10630                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10631                 arg1 = ts->info->stack_limit;
10632             }
10633         }
10634         return get_errno(target_mprotect(arg1, arg2, arg3));
10635 #ifdef TARGET_NR_mremap
10636     case TARGET_NR_mremap:
10637         arg1 = cpu_untagged_addr(cpu, arg1);
10638         /* mremap new_addr (arg5) is always untagged */
10639         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10640 #endif
10641         /* ??? msync/mlock/munlock are broken for softmmu.  */
10642 #ifdef TARGET_NR_msync
10643     case TARGET_NR_msync:
10644         return get_errno(msync(g2h(cpu, arg1), arg2,
10645                                target_to_host_msync_arg(arg3)));
10646 #endif
10647 #ifdef TARGET_NR_mlock
10648     case TARGET_NR_mlock:
10649         return get_errno(mlock(g2h(cpu, arg1), arg2));
10650 #endif
10651 #ifdef TARGET_NR_munlock
10652     case TARGET_NR_munlock:
10653         return get_errno(munlock(g2h(cpu, arg1), arg2));
10654 #endif
10655 #ifdef TARGET_NR_mlockall
10656     case TARGET_NR_mlockall:
10657         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10658 #endif
10659 #ifdef TARGET_NR_munlockall
10660     case TARGET_NR_munlockall:
10661         return get_errno(munlockall());
10662 #endif
10663 #ifdef TARGET_NR_truncate
10664     case TARGET_NR_truncate:
10665         if (!(p = lock_user_string(arg1)))
10666             return -TARGET_EFAULT;
10667         ret = get_errno(truncate(p, arg2));
10668         unlock_user(p, arg1, 0);
10669         return ret;
10670 #endif
10671 #ifdef TARGET_NR_ftruncate
10672     case TARGET_NR_ftruncate:
10673         return get_errno(ftruncate(arg1, arg2));
10674 #endif
10675     case TARGET_NR_fchmod:
10676         return get_errno(fchmod(arg1, arg2));
10677 #if defined(TARGET_NR_fchmodat)
10678     case TARGET_NR_fchmodat:
10679         if (!(p = lock_user_string(arg2)))
10680             return -TARGET_EFAULT;
10681         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10682         unlock_user(p, arg2, 0);
10683         return ret;
10684 #endif
10685     case TARGET_NR_getpriority:
10686         /* Note that negative values are valid for getpriority, so we must
10687            differentiate based on errno settings.  */
10688         errno = 0;
10689         ret = getpriority(arg1, arg2);
10690         if (ret == -1 && errno != 0) {
10691             return -host_to_target_errno(errno);
10692         }
10693 #ifdef TARGET_ALPHA
10694         /* Return value is the unbiased priority.  Signal no error.  */
10695         cpu_env->ir[IR_V0] = 0;
10696 #else
10697         /* Return value is a biased priority to avoid negative numbers.  */
10698         ret = 20 - ret;
10699 #endif
10700         return ret;
10701     case TARGET_NR_setpriority:
10702         return get_errno(setpriority(arg1, arg2, arg3));
10703 #ifdef TARGET_NR_statfs
10704     case TARGET_NR_statfs:
10705         if (!(p = lock_user_string(arg1))) {
10706             return -TARGET_EFAULT;
10707         }
10708         ret = get_errno(statfs(path(p), &stfs));
10709         unlock_user(p, arg1, 0);
10710     convert_statfs:
10711         if (!is_error(ret)) {
10712             struct target_statfs *target_stfs;
10713 
10714             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10715                 return -TARGET_EFAULT;
10716             __put_user(stfs.f_type, &target_stfs->f_type);
10717             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10718             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10719             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10720             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10721             __put_user(stfs.f_files, &target_stfs->f_files);
10722             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10723             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10724             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10725             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10726             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10727 #ifdef _STATFS_F_FLAGS
10728             __put_user(stfs.f_flags, &target_stfs->f_flags);
10729 #else
10730             __put_user(0, &target_stfs->f_flags);
10731 #endif
10732             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10733             unlock_user_struct(target_stfs, arg2, 1);
10734         }
10735         return ret;
10736 #endif
10737 #ifdef TARGET_NR_fstatfs
10738     case TARGET_NR_fstatfs:
10739         ret = get_errno(fstatfs(arg1, &stfs));
10740         goto convert_statfs;
10741 #endif
10742 #ifdef TARGET_NR_statfs64
10743     case TARGET_NR_statfs64:
10744         if (!(p = lock_user_string(arg1))) {
10745             return -TARGET_EFAULT;
10746         }
10747         ret = get_errno(statfs(path(p), &stfs));
10748         unlock_user(p, arg1, 0);
10749     convert_statfs64:
10750         if (!is_error(ret)) {
10751             struct target_statfs64 *target_stfs;
10752 
10753             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10754                 return -TARGET_EFAULT;
10755             __put_user(stfs.f_type, &target_stfs->f_type);
10756             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10757             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10758             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10759             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10760             __put_user(stfs.f_files, &target_stfs->f_files);
10761             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10762             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10763             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10764             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10765             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10766 #ifdef _STATFS_F_FLAGS
10767             __put_user(stfs.f_flags, &target_stfs->f_flags);
10768 #else
10769             __put_user(0, &target_stfs->f_flags);
10770 #endif
10771             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10772             unlock_user_struct(target_stfs, arg3, 1);
10773         }
10774         return ret;
10775     case TARGET_NR_fstatfs64:
10776         ret = get_errno(fstatfs(arg1, &stfs));
10777         goto convert_statfs64;
10778 #endif
10779 #ifdef TARGET_NR_socketcall
10780     case TARGET_NR_socketcall:
10781         return do_socketcall(arg1, arg2);
10782 #endif
10783 #ifdef TARGET_NR_accept
10784     case TARGET_NR_accept:
10785         return do_accept4(arg1, arg2, arg3, 0);
10786 #endif
10787 #ifdef TARGET_NR_accept4
10788     case TARGET_NR_accept4:
10789         return do_accept4(arg1, arg2, arg3, arg4);
10790 #endif
10791 #ifdef TARGET_NR_bind
10792     case TARGET_NR_bind:
10793         return do_bind(arg1, arg2, arg3);
10794 #endif
10795 #ifdef TARGET_NR_connect
10796     case TARGET_NR_connect:
10797         return do_connect(arg1, arg2, arg3);
10798 #endif
10799 #ifdef TARGET_NR_getpeername
10800     case TARGET_NR_getpeername:
10801         return do_getpeername(arg1, arg2, arg3);
10802 #endif
10803 #ifdef TARGET_NR_getsockname
10804     case TARGET_NR_getsockname:
10805         return do_getsockname(arg1, arg2, arg3);
10806 #endif
10807 #ifdef TARGET_NR_getsockopt
10808     case TARGET_NR_getsockopt:
10809         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10810 #endif
10811 #ifdef TARGET_NR_listen
10812     case TARGET_NR_listen:
10813         return get_errno(listen(arg1, arg2));
10814 #endif
10815 #ifdef TARGET_NR_recv
10816     case TARGET_NR_recv:
10817         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10818 #endif
10819 #ifdef TARGET_NR_recvfrom
10820     case TARGET_NR_recvfrom:
10821         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10822 #endif
10823 #ifdef TARGET_NR_recvmsg
10824     case TARGET_NR_recvmsg:
10825         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10826 #endif
10827 #ifdef TARGET_NR_send
10828     case TARGET_NR_send:
10829         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10830 #endif
10831 #ifdef TARGET_NR_sendmsg
10832     case TARGET_NR_sendmsg:
10833         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10834 #endif
10835 #ifdef TARGET_NR_sendmmsg
10836     case TARGET_NR_sendmmsg:
10837         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10838 #endif
10839 #ifdef TARGET_NR_recvmmsg
10840     case TARGET_NR_recvmmsg:
10841         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10842 #endif
10843 #ifdef TARGET_NR_sendto
10844     case TARGET_NR_sendto:
10845         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10846 #endif
10847 #ifdef TARGET_NR_shutdown
10848     case TARGET_NR_shutdown:
10849         return get_errno(shutdown(arg1, arg2));
10850 #endif
10851 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10852     case TARGET_NR_getrandom:
10853         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10854         if (!p) {
10855             return -TARGET_EFAULT;
10856         }
10857         ret = get_errno(getrandom(p, arg2, arg3));
10858         unlock_user(p, arg1, ret);
10859         return ret;
10860 #endif
10861 #ifdef TARGET_NR_socket
10862     case TARGET_NR_socket:
10863         return do_socket(arg1, arg2, arg3);
10864 #endif
10865 #ifdef TARGET_NR_socketpair
10866     case TARGET_NR_socketpair:
10867         return do_socketpair(arg1, arg2, arg3, arg4);
10868 #endif
10869 #ifdef TARGET_NR_setsockopt
10870     case TARGET_NR_setsockopt:
10871         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10872 #endif
10873 #if defined(TARGET_NR_syslog)
10874     case TARGET_NR_syslog:
10875         {
10876             int len = arg2;
10877 
10878             switch (arg1) {
10879             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10880             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10881             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10882             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10883             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10884             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10885             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10886             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10887                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10888             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10889             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10890             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10891                 {
10892                     if (len < 0) {
10893                         return -TARGET_EINVAL;
10894                     }
10895                     if (len == 0) {
10896                         return 0;
10897                     }
10898                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10899                     if (!p) {
10900                         return -TARGET_EFAULT;
10901                     }
10902                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10903                     unlock_user(p, arg2, arg3);
10904                 }
10905                 return ret;
10906             default:
10907                 return -TARGET_EINVAL;
10908             }
10909         }
10910         break;
10911 #endif
10912     case TARGET_NR_setitimer:
10913         {
10914             struct itimerval value, ovalue, *pvalue;
10915 
10916             if (arg2) {
10917                 pvalue = &value;
10918                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10919                     || copy_from_user_timeval(&pvalue->it_value,
10920                                               arg2 + sizeof(struct target_timeval)))
10921                     return -TARGET_EFAULT;
10922             } else {
10923                 pvalue = NULL;
10924             }
10925             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10926             if (!is_error(ret) && arg3) {
10927                 if (copy_to_user_timeval(arg3,
10928                                          &ovalue.it_interval)
10929                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10930                                             &ovalue.it_value))
10931                     return -TARGET_EFAULT;
10932             }
10933         }
10934         return ret;
10935     case TARGET_NR_getitimer:
10936         {
10937             struct itimerval value;
10938 
10939             ret = get_errno(getitimer(arg1, &value));
10940             if (!is_error(ret) && arg2) {
10941                 if (copy_to_user_timeval(arg2,
10942                                          &value.it_interval)
10943                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10944                                             &value.it_value))
10945                     return -TARGET_EFAULT;
10946             }
10947         }
10948         return ret;
10949 #ifdef TARGET_NR_stat
10950     case TARGET_NR_stat:
10951         if (!(p = lock_user_string(arg1))) {
10952             return -TARGET_EFAULT;
10953         }
10954         ret = get_errno(stat(path(p), &st));
10955         unlock_user(p, arg1, 0);
10956         goto do_stat;
10957 #endif
10958 #ifdef TARGET_NR_lstat
10959     case TARGET_NR_lstat:
10960         if (!(p = lock_user_string(arg1))) {
10961             return -TARGET_EFAULT;
10962         }
10963         ret = get_errno(lstat(path(p), &st));
10964         unlock_user(p, arg1, 0);
10965         goto do_stat;
10966 #endif
10967 #ifdef TARGET_NR_fstat
10968     case TARGET_NR_fstat:
10969         {
10970             ret = get_errno(fstat(arg1, &st));
10971 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10972         do_stat:
10973 #endif
10974             if (!is_error(ret)) {
10975                 struct target_stat *target_st;
10976 
10977                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10978                     return -TARGET_EFAULT;
10979                 memset(target_st, 0, sizeof(*target_st));
10980                 __put_user(st.st_dev, &target_st->st_dev);
10981                 __put_user(st.st_ino, &target_st->st_ino);
10982                 __put_user(st.st_mode, &target_st->st_mode);
10983                 __put_user(st.st_uid, &target_st->st_uid);
10984                 __put_user(st.st_gid, &target_st->st_gid);
10985                 __put_user(st.st_nlink, &target_st->st_nlink);
10986                 __put_user(st.st_rdev, &target_st->st_rdev);
10987                 __put_user(st.st_size, &target_st->st_size);
10988                 __put_user(st.st_blksize, &target_st->st_blksize);
10989                 __put_user(st.st_blocks, &target_st->st_blocks);
10990                 __put_user(st.st_atime, &target_st->target_st_atime);
10991                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10992                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10993 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10994                 __put_user(st.st_atim.tv_nsec,
10995                            &target_st->target_st_atime_nsec);
10996                 __put_user(st.st_mtim.tv_nsec,
10997                            &target_st->target_st_mtime_nsec);
10998                 __put_user(st.st_ctim.tv_nsec,
10999                            &target_st->target_st_ctime_nsec);
11000 #endif
11001                 unlock_user_struct(target_st, arg2, 1);
11002             }
11003         }
11004         return ret;
11005 #endif
11006     case TARGET_NR_vhangup:
11007         return get_errno(vhangup());
11008 #ifdef TARGET_NR_syscall
11009     case TARGET_NR_syscall:
11010         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
11011                           arg6, arg7, arg8, 0);
11012 #endif
11013 #if defined(TARGET_NR_wait4)
11014     case TARGET_NR_wait4:
11015         {
11016             int status;
11017             abi_long status_ptr = arg2;
11018             struct rusage rusage, *rusage_ptr;
11019             abi_ulong target_rusage = arg4;
11020             abi_long rusage_err;
11021             if (target_rusage)
11022                 rusage_ptr = &rusage;
11023             else
11024                 rusage_ptr = NULL;
11025             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
11026             if (!is_error(ret)) {
11027                 if (status_ptr && ret) {
11028                     status = host_to_target_waitstatus(status);
11029                     if (put_user_s32(status, status_ptr))
11030                         return -TARGET_EFAULT;
11031                 }
11032                 if (target_rusage) {
11033                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11034                     if (rusage_err) {
11035                         ret = rusage_err;
11036                     }
11037                 }
11038             }
11039         }
11040         return ret;
11041 #endif
11042 #ifdef TARGET_NR_swapoff
11043     case TARGET_NR_swapoff:
11044         if (!(p = lock_user_string(arg1)))
11045             return -TARGET_EFAULT;
11046         ret = get_errno(swapoff(p));
11047         unlock_user(p, arg1, 0);
11048         return ret;
11049 #endif
11050     case TARGET_NR_sysinfo:
11051         {
11052             struct target_sysinfo *target_value;
11053             struct sysinfo value;
11054             ret = get_errno(sysinfo(&value));
11055             if (!is_error(ret) && arg1)
11056             {
11057                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11058                     return -TARGET_EFAULT;
11059                 __put_user(value.uptime, &target_value->uptime);
11060                 __put_user(value.loads[0], &target_value->loads[0]);
11061                 __put_user(value.loads[1], &target_value->loads[1]);
11062                 __put_user(value.loads[2], &target_value->loads[2]);
11063                 __put_user(value.totalram, &target_value->totalram);
11064                 __put_user(value.freeram, &target_value->freeram);
11065                 __put_user(value.sharedram, &target_value->sharedram);
11066                 __put_user(value.bufferram, &target_value->bufferram);
11067                 __put_user(value.totalswap, &target_value->totalswap);
11068                 __put_user(value.freeswap, &target_value->freeswap);
11069                 __put_user(value.procs, &target_value->procs);
11070                 __put_user(value.totalhigh, &target_value->totalhigh);
11071                 __put_user(value.freehigh, &target_value->freehigh);
11072                 __put_user(value.mem_unit, &target_value->mem_unit);
11073                 unlock_user_struct(target_value, arg1, 1);
11074             }
11075         }
11076         return ret;
11077 #ifdef TARGET_NR_ipc
11078     case TARGET_NR_ipc:
11079         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11080 #endif
11081 #ifdef TARGET_NR_semget
11082     case TARGET_NR_semget:
11083         return get_errno(semget(arg1, arg2, arg3));
11084 #endif
11085 #ifdef TARGET_NR_semop
11086     case TARGET_NR_semop:
11087         return do_semtimedop(arg1, arg2, arg3, 0, false);
11088 #endif
11089 #ifdef TARGET_NR_semtimedop
11090     case TARGET_NR_semtimedop:
11091         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11092 #endif
11093 #ifdef TARGET_NR_semtimedop_time64
11094     case TARGET_NR_semtimedop_time64:
11095         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11096 #endif
11097 #ifdef TARGET_NR_semctl
11098     case TARGET_NR_semctl:
11099         return do_semctl(arg1, arg2, arg3, arg4);
11100 #endif
11101 #ifdef TARGET_NR_msgctl
11102     case TARGET_NR_msgctl:
11103         return do_msgctl(arg1, arg2, arg3);
11104 #endif
11105 #ifdef TARGET_NR_msgget
11106     case TARGET_NR_msgget:
11107         return get_errno(msgget(arg1, arg2));
11108 #endif
11109 #ifdef TARGET_NR_msgrcv
11110     case TARGET_NR_msgrcv:
11111         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11112 #endif
11113 #ifdef TARGET_NR_msgsnd
11114     case TARGET_NR_msgsnd:
11115         return do_msgsnd(arg1, arg2, arg3, arg4);
11116 #endif
11117 #ifdef TARGET_NR_shmget
11118     case TARGET_NR_shmget:
11119         return get_errno(shmget(arg1, arg2, arg3));
11120 #endif
11121 #ifdef TARGET_NR_shmctl
11122     case TARGET_NR_shmctl:
11123         return do_shmctl(arg1, arg2, arg3);
11124 #endif
11125 #ifdef TARGET_NR_shmat
11126     case TARGET_NR_shmat:
11127         return do_shmat(cpu_env, arg1, arg2, arg3);
11128 #endif
11129 #ifdef TARGET_NR_shmdt
11130     case TARGET_NR_shmdt:
11131         return do_shmdt(arg1);
11132 #endif
11133     case TARGET_NR_fsync:
11134         return get_errno(fsync(arg1));
11135     case TARGET_NR_clone:
11136         /* Linux manages to have three different orderings for its
11137          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11138          * match the kernel's CONFIG_CLONE_* settings.
11139          * Microblaze is further special in that it uses a sixth
11140          * implicit argument to clone for the TLS pointer.
11141          */
11142 #if defined(TARGET_MICROBLAZE)
11143         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11144 #elif defined(TARGET_CLONE_BACKWARDS)
11145         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11146 #elif defined(TARGET_CLONE_BACKWARDS2)
11147         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11148 #else
11149         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11150 #endif
11151         return ret;
11152 #ifdef __NR_exit_group
11153         /* new thread calls */
11154     case TARGET_NR_exit_group:
11155         preexit_cleanup(cpu_env, arg1);
11156         return get_errno(exit_group(arg1));
11157 #endif
11158     case TARGET_NR_setdomainname:
11159         if (!(p = lock_user_string(arg1)))
11160             return -TARGET_EFAULT;
11161         ret = get_errno(setdomainname(p, arg2));
11162         unlock_user(p, arg1, 0);
11163         return ret;
11164     case TARGET_NR_uname:
11165         /* no need to transcode because we use the linux syscall */
11166         {
11167             struct new_utsname * buf;
11168 
11169             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11170                 return -TARGET_EFAULT;
11171             ret = get_errno(sys_uname(buf));
11172             if (!is_error(ret)) {
11173                 /* Overwrite the native machine name with whatever is being
11174                    emulated. */
11175                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11176                           sizeof(buf->machine));
11177                 /* Allow the user to override the reported release.  */
11178                 if (qemu_uname_release && *qemu_uname_release) {
11179                     g_strlcpy(buf->release, qemu_uname_release,
11180                               sizeof(buf->release));
11181                 }
11182             }
11183             unlock_user_struct(buf, arg1, 1);
11184         }
11185         return ret;
11186 #ifdef TARGET_I386
11187     case TARGET_NR_modify_ldt:
11188         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11189 #if !defined(TARGET_X86_64)
11190     case TARGET_NR_vm86:
11191         return do_vm86(cpu_env, arg1, arg2);
11192 #endif
11193 #endif
11194 #if defined(TARGET_NR_adjtimex)
11195     case TARGET_NR_adjtimex:
11196         {
11197             struct timex host_buf;
11198 
11199             if (target_to_host_timex(&host_buf, arg1) != 0) {
11200                 return -TARGET_EFAULT;
11201             }
11202             ret = get_errno(adjtimex(&host_buf));
11203             if (!is_error(ret)) {
11204                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11205                     return -TARGET_EFAULT;
11206                 }
11207             }
11208         }
11209         return ret;
11210 #endif
11211 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11212     case TARGET_NR_clock_adjtime:
11213         {
11214             struct timex htx;
11215 
11216             if (target_to_host_timex(&htx, arg2) != 0) {
11217                 return -TARGET_EFAULT;
11218             }
11219             ret = get_errno(clock_adjtime(arg1, &htx));
11220             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11221                 return -TARGET_EFAULT;
11222             }
11223         }
11224         return ret;
11225 #endif
11226 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11227     case TARGET_NR_clock_adjtime64:
11228         {
11229             struct timex htx;
11230 
11231             if (target_to_host_timex64(&htx, arg2) != 0) {
11232                 return -TARGET_EFAULT;
11233             }
11234             ret = get_errno(clock_adjtime(arg1, &htx));
11235             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11236                     return -TARGET_EFAULT;
11237             }
11238         }
11239         return ret;
11240 #endif
11241     case TARGET_NR_getpgid:
11242         return get_errno(getpgid(arg1));
11243     case TARGET_NR_fchdir:
11244         return get_errno(fchdir(arg1));
11245     case TARGET_NR_personality:
11246         return get_errno(personality(arg1));
11247 #ifdef TARGET_NR__llseek /* Not on alpha */
11248     case TARGET_NR__llseek:
11249         {
11250             int64_t res;
11251 #if !defined(__NR_llseek)
11252             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11253             if (res == -1) {
11254                 ret = get_errno(res);
11255             } else {
11256                 ret = 0;
11257             }
11258 #else
11259             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11260 #endif
11261             if ((ret == 0) && put_user_s64(res, arg4)) {
11262                 return -TARGET_EFAULT;
11263             }
11264         }
11265         return ret;
11266 #endif
11267 #ifdef TARGET_NR_getdents
11268     case TARGET_NR_getdents:
11269         return do_getdents(arg1, arg2, arg3);
11270 #endif /* TARGET_NR_getdents */
11271 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11272     case TARGET_NR_getdents64:
11273         return do_getdents64(arg1, arg2, arg3);
11274 #endif /* TARGET_NR_getdents64 */
11275 #if defined(TARGET_NR__newselect)
11276     case TARGET_NR__newselect:
11277         return do_select(arg1, arg2, arg3, arg4, arg5);
11278 #endif
11279 #ifdef TARGET_NR_poll
11280     case TARGET_NR_poll:
11281         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11282 #endif
11283 #ifdef TARGET_NR_ppoll
11284     case TARGET_NR_ppoll:
11285         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11286 #endif
11287 #ifdef TARGET_NR_ppoll_time64
11288     case TARGET_NR_ppoll_time64:
11289         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11290 #endif
11291     case TARGET_NR_flock:
11292         /* NOTE: the flock constant seems to be the same for every
11293            Linux platform */
11294         return get_errno(safe_flock(arg1, arg2));
11295     case TARGET_NR_readv:
11296         {
11297             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11298             if (vec != NULL) {
11299                 ret = get_errno(safe_readv(arg1, vec, arg3));
11300                 unlock_iovec(vec, arg2, arg3, 1);
11301             } else {
11302                 ret = -host_to_target_errno(errno);
11303             }
11304         }
11305         return ret;
11306     case TARGET_NR_writev:
11307         {
11308             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11309             if (vec != NULL) {
11310                 ret = get_errno(safe_writev(arg1, vec, arg3));
11311                 unlock_iovec(vec, arg2, arg3, 0);
11312             } else {
11313                 ret = -host_to_target_errno(errno);
11314             }
11315         }
11316         return ret;
11317 #if defined(TARGET_NR_preadv)
11318     case TARGET_NR_preadv:
11319         {
11320             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11321             if (vec != NULL) {
11322                 unsigned long low, high;
11323 
11324                 target_to_host_low_high(arg4, arg5, &low, &high);
11325                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11326                 unlock_iovec(vec, arg2, arg3, 1);
11327             } else {
11328                 ret = -host_to_target_errno(errno);
11329            }
11330         }
11331         return ret;
11332 #endif
11333 #if defined(TARGET_NR_pwritev)
11334     case TARGET_NR_pwritev:
11335         {
11336             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11337             if (vec != NULL) {
11338                 unsigned long low, high;
11339 
11340                 target_to_host_low_high(arg4, arg5, &low, &high);
11341                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11342                 unlock_iovec(vec, arg2, arg3, 0);
11343             } else {
11344                 ret = -host_to_target_errno(errno);
11345            }
11346         }
11347         return ret;
11348 #endif
11349     case TARGET_NR_getsid:
11350         return get_errno(getsid(arg1));
11351 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11352     case TARGET_NR_fdatasync:
11353         return get_errno(fdatasync(arg1));
11354 #endif
11355     case TARGET_NR_sched_getaffinity:
11356         {
11357             unsigned int mask_size;
11358             unsigned long *mask;
11359 
11360             /*
11361              * sched_getaffinity needs multiples of ulong, so need to take
11362              * care of mismatches between target ulong and host ulong sizes.
11363              */
11364             if (arg2 & (sizeof(abi_ulong) - 1)) {
11365                 return -TARGET_EINVAL;
11366             }
11367             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11368 
11369             mask = alloca(mask_size);
11370             memset(mask, 0, mask_size);
11371             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11372 
11373             if (!is_error(ret)) {
11374                 if (ret > arg2) {
11375                     /* More data returned than the caller's buffer will fit.
11376                      * This only happens if sizeof(abi_long) < sizeof(long)
11377                      * and the caller passed us a buffer holding an odd number
11378                      * of abi_longs. If the host kernel is actually using the
11379                      * extra 4 bytes then fail EINVAL; otherwise we can just
11380                      * ignore them and only copy the interesting part.
11381                      */
11382                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11383                     if (numcpus > arg2 * 8) {
11384                         return -TARGET_EINVAL;
11385                     }
11386                     ret = arg2;
11387                 }
11388 
11389                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11390                     return -TARGET_EFAULT;
11391                 }
11392             }
11393         }
11394         return ret;
11395     case TARGET_NR_sched_setaffinity:
11396         {
11397             unsigned int mask_size;
11398             unsigned long *mask;
11399 
11400             /*
11401              * sched_setaffinity needs multiples of ulong, so need to take
11402              * care of mismatches between target ulong and host ulong sizes.
11403              */
11404             if (arg2 & (sizeof(abi_ulong) - 1)) {
11405                 return -TARGET_EINVAL;
11406             }
11407             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11408             mask = alloca(mask_size);
11409 
11410             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11411             if (ret) {
11412                 return ret;
11413             }
11414 
11415             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11416         }
11417     case TARGET_NR_getcpu:
11418         {
11419             unsigned cpu, node;
11420             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11421                                        arg2 ? &node : NULL,
11422                                        NULL));
11423             if (is_error(ret)) {
11424                 return ret;
11425             }
11426             if (arg1 && put_user_u32(cpu, arg1)) {
11427                 return -TARGET_EFAULT;
11428             }
11429             if (arg2 && put_user_u32(node, arg2)) {
11430                 return -TARGET_EFAULT;
11431             }
11432         }
11433         return ret;
11434     case TARGET_NR_sched_setparam:
11435         {
11436             struct target_sched_param *target_schp;
11437             struct sched_param schp;
11438 
11439             if (arg2 == 0) {
11440                 return -TARGET_EINVAL;
11441             }
11442             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11443                 return -TARGET_EFAULT;
11444             }
11445             schp.sched_priority = tswap32(target_schp->sched_priority);
11446             unlock_user_struct(target_schp, arg2, 0);
11447             return get_errno(sys_sched_setparam(arg1, &schp));
11448         }
11449     case TARGET_NR_sched_getparam:
11450         {
11451             struct target_sched_param *target_schp;
11452             struct sched_param schp;
11453 
11454             if (arg2 == 0) {
11455                 return -TARGET_EINVAL;
11456             }
11457             ret = get_errno(sys_sched_getparam(arg1, &schp));
11458             if (!is_error(ret)) {
11459                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11460                     return -TARGET_EFAULT;
11461                 }
11462                 target_schp->sched_priority = tswap32(schp.sched_priority);
11463                 unlock_user_struct(target_schp, arg2, 1);
11464             }
11465         }
11466         return ret;
11467     case TARGET_NR_sched_setscheduler:
11468         {
11469             struct target_sched_param *target_schp;
11470             struct sched_param schp;
11471             if (arg3 == 0) {
11472                 return -TARGET_EINVAL;
11473             }
11474             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11475                 return -TARGET_EFAULT;
11476             }
11477             schp.sched_priority = tswap32(target_schp->sched_priority);
11478             unlock_user_struct(target_schp, arg3, 0);
11479             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11480         }
11481     case TARGET_NR_sched_getscheduler:
11482         return get_errno(sys_sched_getscheduler(arg1));
11483     case TARGET_NR_sched_getattr:
11484         {
11485             struct target_sched_attr *target_scha;
11486             struct sched_attr scha;
11487             if (arg2 == 0) {
11488                 return -TARGET_EINVAL;
11489             }
11490             if (arg3 > sizeof(scha)) {
11491                 arg3 = sizeof(scha);
11492             }
11493             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11494             if (!is_error(ret)) {
11495                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11496                 if (!target_scha) {
11497                     return -TARGET_EFAULT;
11498                 }
11499                 target_scha->size = tswap32(scha.size);
11500                 target_scha->sched_policy = tswap32(scha.sched_policy);
11501                 target_scha->sched_flags = tswap64(scha.sched_flags);
11502                 target_scha->sched_nice = tswap32(scha.sched_nice);
11503                 target_scha->sched_priority = tswap32(scha.sched_priority);
11504                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11505                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11506                 target_scha->sched_period = tswap64(scha.sched_period);
11507                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11508                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11509                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11510                 }
11511                 unlock_user(target_scha, arg2, arg3);
11512             }
11513             return ret;
11514         }
11515     case TARGET_NR_sched_setattr:
11516         {
11517             struct target_sched_attr *target_scha;
11518             struct sched_attr scha;
11519             uint32_t size;
11520             int zeroed;
11521             if (arg2 == 0) {
11522                 return -TARGET_EINVAL;
11523             }
11524             if (get_user_u32(size, arg2)) {
11525                 return -TARGET_EFAULT;
11526             }
11527             if (!size) {
11528                 size = offsetof(struct target_sched_attr, sched_util_min);
11529             }
11530             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11531                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11532                     return -TARGET_EFAULT;
11533                 }
11534                 return -TARGET_E2BIG;
11535             }
11536 
11537             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11538             if (zeroed < 0) {
11539                 return zeroed;
11540             } else if (zeroed == 0) {
11541                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11542                     return -TARGET_EFAULT;
11543                 }
11544                 return -TARGET_E2BIG;
11545             }
11546             if (size > sizeof(struct target_sched_attr)) {
11547                 size = sizeof(struct target_sched_attr);
11548             }
11549 
11550             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11551             if (!target_scha) {
11552                 return -TARGET_EFAULT;
11553             }
11554             scha.size = size;
11555             scha.sched_policy = tswap32(target_scha->sched_policy);
11556             scha.sched_flags = tswap64(target_scha->sched_flags);
11557             scha.sched_nice = tswap32(target_scha->sched_nice);
11558             scha.sched_priority = tswap32(target_scha->sched_priority);
11559             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11560             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11561             scha.sched_period = tswap64(target_scha->sched_period);
11562             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11563                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11564                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11565             }
11566             unlock_user(target_scha, arg2, 0);
11567             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11568         }
11569     case TARGET_NR_sched_yield:
11570         return get_errno(sched_yield());
11571     case TARGET_NR_sched_get_priority_max:
11572         return get_errno(sched_get_priority_max(arg1));
11573     case TARGET_NR_sched_get_priority_min:
11574         return get_errno(sched_get_priority_min(arg1));
11575 #ifdef TARGET_NR_sched_rr_get_interval
11576     case TARGET_NR_sched_rr_get_interval:
11577         {
11578             struct timespec ts;
11579             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11580             if (!is_error(ret)) {
11581                 ret = host_to_target_timespec(arg2, &ts);
11582             }
11583         }
11584         return ret;
11585 #endif
11586 #ifdef TARGET_NR_sched_rr_get_interval_time64
11587     case TARGET_NR_sched_rr_get_interval_time64:
11588         {
11589             struct timespec ts;
11590             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11591             if (!is_error(ret)) {
11592                 ret = host_to_target_timespec64(arg2, &ts);
11593             }
11594         }
11595         return ret;
11596 #endif
11597 #if defined(TARGET_NR_nanosleep)
11598     case TARGET_NR_nanosleep:
11599         {
11600             struct timespec req, rem;
11601             target_to_host_timespec(&req, arg1);
11602             ret = get_errno(safe_nanosleep(&req, &rem));
11603             if (is_error(ret) && arg2) {
11604                 host_to_target_timespec(arg2, &rem);
11605             }
11606         }
11607         return ret;
11608 #endif
11609     case TARGET_NR_prctl:
11610         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11611         break;
11612 #ifdef TARGET_NR_arch_prctl
11613     case TARGET_NR_arch_prctl:
11614         return do_arch_prctl(cpu_env, arg1, arg2);
11615 #endif
11616 #ifdef TARGET_NR_pread64
11617     case TARGET_NR_pread64:
11618         if (regpairs_aligned(cpu_env, num)) {
11619             arg4 = arg5;
11620             arg5 = arg6;
11621         }
11622         if (arg2 == 0 && arg3 == 0) {
11623             /* Special-case NULL buffer and zero length, which should succeed */
11624             p = 0;
11625         } else {
11626             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11627             if (!p) {
11628                 return -TARGET_EFAULT;
11629             }
11630         }
11631         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11632         unlock_user(p, arg2, ret);
11633         return ret;
11634     case TARGET_NR_pwrite64:
11635         if (regpairs_aligned(cpu_env, num)) {
11636             arg4 = arg5;
11637             arg5 = arg6;
11638         }
11639         if (arg2 == 0 && arg3 == 0) {
11640             /* Special-case NULL buffer and zero length, which should succeed */
11641             p = 0;
11642         } else {
11643             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11644             if (!p) {
11645                 return -TARGET_EFAULT;
11646             }
11647         }
11648         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11649         unlock_user(p, arg2, 0);
11650         return ret;
11651 #endif
11652     case TARGET_NR_getcwd:
11653         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11654             return -TARGET_EFAULT;
11655         ret = get_errno(sys_getcwd1(p, arg2));
11656         unlock_user(p, arg1, ret);
11657         return ret;
11658     case TARGET_NR_capget:
11659     case TARGET_NR_capset:
11660     {
11661         struct target_user_cap_header *target_header;
11662         struct target_user_cap_data *target_data = NULL;
11663         struct __user_cap_header_struct header;
11664         struct __user_cap_data_struct data[2];
11665         struct __user_cap_data_struct *dataptr = NULL;
11666         int i, target_datalen;
11667         int data_items = 1;
11668 
11669         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11670             return -TARGET_EFAULT;
11671         }
11672         header.version = tswap32(target_header->version);
11673         header.pid = tswap32(target_header->pid);
11674 
11675         if (header.version != _LINUX_CAPABILITY_VERSION) {
11676             /* Version 2 and up takes pointer to two user_data structs */
11677             data_items = 2;
11678         }
11679 
11680         target_datalen = sizeof(*target_data) * data_items;
11681 
11682         if (arg2) {
11683             if (num == TARGET_NR_capget) {
11684                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11685             } else {
11686                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11687             }
11688             if (!target_data) {
11689                 unlock_user_struct(target_header, arg1, 0);
11690                 return -TARGET_EFAULT;
11691             }
11692 
11693             if (num == TARGET_NR_capset) {
11694                 for (i = 0; i < data_items; i++) {
11695                     data[i].effective = tswap32(target_data[i].effective);
11696                     data[i].permitted = tswap32(target_data[i].permitted);
11697                     data[i].inheritable = tswap32(target_data[i].inheritable);
11698                 }
11699             }
11700 
11701             dataptr = data;
11702         }
11703 
11704         if (num == TARGET_NR_capget) {
11705             ret = get_errno(capget(&header, dataptr));
11706         } else {
11707             ret = get_errno(capset(&header, dataptr));
11708         }
11709 
11710         /* The kernel always updates version for both capget and capset */
11711         target_header->version = tswap32(header.version);
11712         unlock_user_struct(target_header, arg1, 1);
11713 
11714         if (arg2) {
11715             if (num == TARGET_NR_capget) {
11716                 for (i = 0; i < data_items; i++) {
11717                     target_data[i].effective = tswap32(data[i].effective);
11718                     target_data[i].permitted = tswap32(data[i].permitted);
11719                     target_data[i].inheritable = tswap32(data[i].inheritable);
11720                 }
11721                 unlock_user(target_data, arg2, target_datalen);
11722             } else {
11723                 unlock_user(target_data, arg2, 0);
11724             }
11725         }
11726         return ret;
11727     }
11728     case TARGET_NR_sigaltstack:
11729         return do_sigaltstack(arg1, arg2, cpu_env);
11730 
11731 #ifdef CONFIG_SENDFILE
11732 #ifdef TARGET_NR_sendfile
11733     case TARGET_NR_sendfile:
11734     {
11735         off_t *offp = NULL;
11736         off_t off;
11737         if (arg3) {
11738             ret = get_user_sal(off, arg3);
11739             if (is_error(ret)) {
11740                 return ret;
11741             }
11742             offp = &off;
11743         }
11744         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11745         if (!is_error(ret) && arg3) {
11746             abi_long ret2 = put_user_sal(off, arg3);
11747             if (is_error(ret2)) {
11748                 ret = ret2;
11749             }
11750         }
11751         return ret;
11752     }
11753 #endif
11754 #ifdef TARGET_NR_sendfile64
11755     case TARGET_NR_sendfile64:
11756     {
11757         off_t *offp = NULL;
11758         off_t off;
11759         if (arg3) {
11760             ret = get_user_s64(off, arg3);
11761             if (is_error(ret)) {
11762                 return ret;
11763             }
11764             offp = &off;
11765         }
11766         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11767         if (!is_error(ret) && arg3) {
11768             abi_long ret2 = put_user_s64(off, arg3);
11769             if (is_error(ret2)) {
11770                 ret = ret2;
11771             }
11772         }
11773         return ret;
11774     }
11775 #endif
11776 #endif
11777 #ifdef TARGET_NR_vfork
11778     case TARGET_NR_vfork:
11779         return get_errno(do_fork(cpu_env,
11780                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11781                          0, 0, 0, 0));
11782 #endif
11783 #ifdef TARGET_NR_ugetrlimit
11784     case TARGET_NR_ugetrlimit:
11785     {
11786 	struct rlimit rlim;
11787 	int resource = target_to_host_resource(arg1);
11788 	ret = get_errno(getrlimit(resource, &rlim));
11789 	if (!is_error(ret)) {
11790 	    struct target_rlimit *target_rlim;
11791             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11792                 return -TARGET_EFAULT;
11793 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11794 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11795             unlock_user_struct(target_rlim, arg2, 1);
11796 	}
11797         return ret;
11798     }
11799 #endif
11800 #ifdef TARGET_NR_truncate64
11801     case TARGET_NR_truncate64:
11802         if (!(p = lock_user_string(arg1)))
11803             return -TARGET_EFAULT;
11804 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11805         unlock_user(p, arg1, 0);
11806         return ret;
11807 #endif
11808 #ifdef TARGET_NR_ftruncate64
11809     case TARGET_NR_ftruncate64:
11810         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11811 #endif
11812 #ifdef TARGET_NR_stat64
11813     case TARGET_NR_stat64:
11814         if (!(p = lock_user_string(arg1))) {
11815             return -TARGET_EFAULT;
11816         }
11817         ret = get_errno(stat(path(p), &st));
11818         unlock_user(p, arg1, 0);
11819         if (!is_error(ret))
11820             ret = host_to_target_stat64(cpu_env, arg2, &st);
11821         return ret;
11822 #endif
11823 #ifdef TARGET_NR_lstat64
11824     case TARGET_NR_lstat64:
11825         if (!(p = lock_user_string(arg1))) {
11826             return -TARGET_EFAULT;
11827         }
11828         ret = get_errno(lstat(path(p), &st));
11829         unlock_user(p, arg1, 0);
11830         if (!is_error(ret))
11831             ret = host_to_target_stat64(cpu_env, arg2, &st);
11832         return ret;
11833 #endif
11834 #ifdef TARGET_NR_fstat64
11835     case TARGET_NR_fstat64:
11836         ret = get_errno(fstat(arg1, &st));
11837         if (!is_error(ret))
11838             ret = host_to_target_stat64(cpu_env, arg2, &st);
11839         return ret;
11840 #endif
11841 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11842 #ifdef TARGET_NR_fstatat64
11843     case TARGET_NR_fstatat64:
11844 #endif
11845 #ifdef TARGET_NR_newfstatat
11846     case TARGET_NR_newfstatat:
11847 #endif
11848         if (!(p = lock_user_string(arg2))) {
11849             return -TARGET_EFAULT;
11850         }
11851         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11852         unlock_user(p, arg2, 0);
11853         if (!is_error(ret))
11854             ret = host_to_target_stat64(cpu_env, arg3, &st);
11855         return ret;
11856 #endif
11857 #if defined(TARGET_NR_statx)
11858     case TARGET_NR_statx:
11859         {
11860             struct target_statx *target_stx;
11861             int dirfd = arg1;
11862             int flags = arg3;
11863 
11864             p = lock_user_string(arg2);
11865             if (p == NULL) {
11866                 return -TARGET_EFAULT;
11867             }
11868 #if defined(__NR_statx)
11869             {
11870                 /*
11871                  * It is assumed that struct statx is architecture independent.
11872                  */
11873                 struct target_statx host_stx;
11874                 int mask = arg4;
11875 
11876                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11877                 if (!is_error(ret)) {
11878                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11879                         unlock_user(p, arg2, 0);
11880                         return -TARGET_EFAULT;
11881                     }
11882                 }
11883 
11884                 if (ret != -TARGET_ENOSYS) {
11885                     unlock_user(p, arg2, 0);
11886                     return ret;
11887                 }
11888             }
11889 #endif
11890             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11891             unlock_user(p, arg2, 0);
11892 
11893             if (!is_error(ret)) {
11894                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11895                     return -TARGET_EFAULT;
11896                 }
11897                 memset(target_stx, 0, sizeof(*target_stx));
11898                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11899                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11900                 __put_user(st.st_ino, &target_stx->stx_ino);
11901                 __put_user(st.st_mode, &target_stx->stx_mode);
11902                 __put_user(st.st_uid, &target_stx->stx_uid);
11903                 __put_user(st.st_gid, &target_stx->stx_gid);
11904                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11905                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11906                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11907                 __put_user(st.st_size, &target_stx->stx_size);
11908                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11909                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11910                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11911                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11912                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11913                 unlock_user_struct(target_stx, arg5, 1);
11914             }
11915         }
11916         return ret;
11917 #endif
11918 #ifdef TARGET_NR_lchown
11919     case TARGET_NR_lchown:
11920         if (!(p = lock_user_string(arg1)))
11921             return -TARGET_EFAULT;
11922         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11923         unlock_user(p, arg1, 0);
11924         return ret;
11925 #endif
11926 #ifdef TARGET_NR_getuid
11927     case TARGET_NR_getuid:
11928         return get_errno(high2lowuid(getuid()));
11929 #endif
11930 #ifdef TARGET_NR_getgid
11931     case TARGET_NR_getgid:
11932         return get_errno(high2lowgid(getgid()));
11933 #endif
11934 #ifdef TARGET_NR_geteuid
11935     case TARGET_NR_geteuid:
11936         return get_errno(high2lowuid(geteuid()));
11937 #endif
11938 #ifdef TARGET_NR_getegid
11939     case TARGET_NR_getegid:
11940         return get_errno(high2lowgid(getegid()));
11941 #endif
11942     case TARGET_NR_setreuid:
11943         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11944     case TARGET_NR_setregid:
11945         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11946     case TARGET_NR_getgroups:
11947         { /* the same code as for TARGET_NR_getgroups32 */
11948             int gidsetsize = arg1;
11949             target_id *target_grouplist;
11950             g_autofree gid_t *grouplist = NULL;
11951             int i;
11952 
11953             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11954                 return -TARGET_EINVAL;
11955             }
11956             if (gidsetsize > 0) {
11957                 grouplist = g_try_new(gid_t, gidsetsize);
11958                 if (!grouplist) {
11959                     return -TARGET_ENOMEM;
11960                 }
11961             }
11962             ret = get_errno(getgroups(gidsetsize, grouplist));
11963             if (!is_error(ret) && gidsetsize > 0) {
11964                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11965                                              gidsetsize * sizeof(target_id), 0);
11966                 if (!target_grouplist) {
11967                     return -TARGET_EFAULT;
11968                 }
11969                 for (i = 0; i < ret; i++) {
11970                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11971                 }
11972                 unlock_user(target_grouplist, arg2,
11973                             gidsetsize * sizeof(target_id));
11974             }
11975             return ret;
11976         }
11977     case TARGET_NR_setgroups:
11978         { /* the same code as for TARGET_NR_setgroups32 */
11979             int gidsetsize = arg1;
11980             target_id *target_grouplist;
11981             g_autofree gid_t *grouplist = NULL;
11982             int i;
11983 
11984             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11985                 return -TARGET_EINVAL;
11986             }
11987             if (gidsetsize > 0) {
11988                 grouplist = g_try_new(gid_t, gidsetsize);
11989                 if (!grouplist) {
11990                     return -TARGET_ENOMEM;
11991                 }
11992                 target_grouplist = lock_user(VERIFY_READ, arg2,
11993                                              gidsetsize * sizeof(target_id), 1);
11994                 if (!target_grouplist) {
11995                     return -TARGET_EFAULT;
11996                 }
11997                 for (i = 0; i < gidsetsize; i++) {
11998                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11999                 }
12000                 unlock_user(target_grouplist, arg2,
12001                             gidsetsize * sizeof(target_id));
12002             }
12003             return get_errno(setgroups(gidsetsize, grouplist));
12004         }
12005     case TARGET_NR_fchown:
12006         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
12007 #if defined(TARGET_NR_fchownat)
12008     case TARGET_NR_fchownat:
12009         if (!(p = lock_user_string(arg2)))
12010             return -TARGET_EFAULT;
12011         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
12012                                  low2highgid(arg4), arg5));
12013         unlock_user(p, arg2, 0);
12014         return ret;
12015 #endif
12016 #ifdef TARGET_NR_setresuid
12017     case TARGET_NR_setresuid:
12018         return get_errno(sys_setresuid(low2highuid(arg1),
12019                                        low2highuid(arg2),
12020                                        low2highuid(arg3)));
12021 #endif
12022 #ifdef TARGET_NR_getresuid
12023     case TARGET_NR_getresuid:
12024         {
12025             uid_t ruid, euid, suid;
12026             ret = get_errno(getresuid(&ruid, &euid, &suid));
12027             if (!is_error(ret)) {
12028                 if (put_user_id(high2lowuid(ruid), arg1)
12029                     || put_user_id(high2lowuid(euid), arg2)
12030                     || put_user_id(high2lowuid(suid), arg3))
12031                     return -TARGET_EFAULT;
12032             }
12033         }
12034         return ret;
12035 #endif
12036 #ifdef TARGET_NR_getresgid
12037     case TARGET_NR_setresgid:
12038         return get_errno(sys_setresgid(low2highgid(arg1),
12039                                        low2highgid(arg2),
12040                                        low2highgid(arg3)));
12041 #endif
12042 #ifdef TARGET_NR_getresgid
12043     case TARGET_NR_getresgid:
12044         {
12045             gid_t rgid, egid, sgid;
12046             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12047             if (!is_error(ret)) {
12048                 if (put_user_id(high2lowgid(rgid), arg1)
12049                     || put_user_id(high2lowgid(egid), arg2)
12050                     || put_user_id(high2lowgid(sgid), arg3))
12051                     return -TARGET_EFAULT;
12052             }
12053         }
12054         return ret;
12055 #endif
12056 #ifdef TARGET_NR_chown
12057     case TARGET_NR_chown:
12058         if (!(p = lock_user_string(arg1)))
12059             return -TARGET_EFAULT;
12060         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12061         unlock_user(p, arg1, 0);
12062         return ret;
12063 #endif
12064     case TARGET_NR_setuid:
12065         return get_errno(sys_setuid(low2highuid(arg1)));
12066     case TARGET_NR_setgid:
12067         return get_errno(sys_setgid(low2highgid(arg1)));
12068     case TARGET_NR_setfsuid:
12069         return get_errno(setfsuid(arg1));
12070     case TARGET_NR_setfsgid:
12071         return get_errno(setfsgid(arg1));
12072 
12073 #ifdef TARGET_NR_lchown32
12074     case TARGET_NR_lchown32:
12075         if (!(p = lock_user_string(arg1)))
12076             return -TARGET_EFAULT;
12077         ret = get_errno(lchown(p, arg2, arg3));
12078         unlock_user(p, arg1, 0);
12079         return ret;
12080 #endif
12081 #ifdef TARGET_NR_getuid32
12082     case TARGET_NR_getuid32:
12083         return get_errno(getuid());
12084 #endif
12085 
12086 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12087    /* Alpha specific */
12088     case TARGET_NR_getxuid:
12089          {
12090             uid_t euid;
12091             euid=geteuid();
12092             cpu_env->ir[IR_A4]=euid;
12093          }
12094         return get_errno(getuid());
12095 #endif
12096 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12097    /* Alpha specific */
12098     case TARGET_NR_getxgid:
12099          {
12100             uid_t egid;
12101             egid=getegid();
12102             cpu_env->ir[IR_A4]=egid;
12103          }
12104         return get_errno(getgid());
12105 #endif
12106 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12107     /* Alpha specific */
12108     case TARGET_NR_osf_getsysinfo:
12109         ret = -TARGET_EOPNOTSUPP;
12110         switch (arg1) {
12111           case TARGET_GSI_IEEE_FP_CONTROL:
12112             {
12113                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12114                 uint64_t swcr = cpu_env->swcr;
12115 
12116                 swcr &= ~SWCR_STATUS_MASK;
12117                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12118 
12119                 if (put_user_u64 (swcr, arg2))
12120                         return -TARGET_EFAULT;
12121                 ret = 0;
12122             }
12123             break;
12124 
12125           /* case GSI_IEEE_STATE_AT_SIGNAL:
12126              -- Not implemented in linux kernel.
12127              case GSI_UACPROC:
12128              -- Retrieves current unaligned access state; not much used.
12129              case GSI_PROC_TYPE:
12130              -- Retrieves implver information; surely not used.
12131              case GSI_GET_HWRPB:
12132              -- Grabs a copy of the HWRPB; surely not used.
12133           */
12134         }
12135         return ret;
12136 #endif
12137 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12138     /* Alpha specific */
12139     case TARGET_NR_osf_setsysinfo:
12140         ret = -TARGET_EOPNOTSUPP;
12141         switch (arg1) {
12142           case TARGET_SSI_IEEE_FP_CONTROL:
12143             {
12144                 uint64_t swcr, fpcr;
12145 
12146                 if (get_user_u64 (swcr, arg2)) {
12147                     return -TARGET_EFAULT;
12148                 }
12149 
12150                 /*
12151                  * The kernel calls swcr_update_status to update the
12152                  * status bits from the fpcr at every point that it
12153                  * could be queried.  Therefore, we store the status
12154                  * bits only in FPCR.
12155                  */
12156                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12157 
12158                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12159                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12160                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12161                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12162                 ret = 0;
12163             }
12164             break;
12165 
12166           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12167             {
12168                 uint64_t exc, fpcr, fex;
12169 
12170                 if (get_user_u64(exc, arg2)) {
12171                     return -TARGET_EFAULT;
12172                 }
12173                 exc &= SWCR_STATUS_MASK;
12174                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12175 
12176                 /* Old exceptions are not signaled.  */
12177                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12178                 fex = exc & ~fex;
12179                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12180                 fex &= (cpu_env)->swcr;
12181 
12182                 /* Update the hardware fpcr.  */
12183                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12184                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12185 
12186                 if (fex) {
12187                     int si_code = TARGET_FPE_FLTUNK;
12188                     target_siginfo_t info;
12189 
12190                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12191                         si_code = TARGET_FPE_FLTUND;
12192                     }
12193                     if (fex & SWCR_TRAP_ENABLE_INE) {
12194                         si_code = TARGET_FPE_FLTRES;
12195                     }
12196                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12197                         si_code = TARGET_FPE_FLTUND;
12198                     }
12199                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12200                         si_code = TARGET_FPE_FLTOVF;
12201                     }
12202                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12203                         si_code = TARGET_FPE_FLTDIV;
12204                     }
12205                     if (fex & SWCR_TRAP_ENABLE_INV) {
12206                         si_code = TARGET_FPE_FLTINV;
12207                     }
12208 
12209                     info.si_signo = SIGFPE;
12210                     info.si_errno = 0;
12211                     info.si_code = si_code;
12212                     info._sifields._sigfault._addr = (cpu_env)->pc;
12213                     queue_signal(cpu_env, info.si_signo,
12214                                  QEMU_SI_FAULT, &info);
12215                 }
12216                 ret = 0;
12217             }
12218             break;
12219 
12220           /* case SSI_NVPAIRS:
12221              -- Used with SSIN_UACPROC to enable unaligned accesses.
12222              case SSI_IEEE_STATE_AT_SIGNAL:
12223              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12224              -- Not implemented in linux kernel
12225           */
12226         }
12227         return ret;
12228 #endif
12229 #ifdef TARGET_NR_osf_sigprocmask
12230     /* Alpha specific.  */
12231     case TARGET_NR_osf_sigprocmask:
12232         {
12233             abi_ulong mask;
12234             int how;
12235             sigset_t set, oldset;
12236 
12237             switch(arg1) {
12238             case TARGET_SIG_BLOCK:
12239                 how = SIG_BLOCK;
12240                 break;
12241             case TARGET_SIG_UNBLOCK:
12242                 how = SIG_UNBLOCK;
12243                 break;
12244             case TARGET_SIG_SETMASK:
12245                 how = SIG_SETMASK;
12246                 break;
12247             default:
12248                 return -TARGET_EINVAL;
12249             }
12250             mask = arg2;
12251             target_to_host_old_sigset(&set, &mask);
12252             ret = do_sigprocmask(how, &set, &oldset);
12253             if (!ret) {
12254                 host_to_target_old_sigset(&mask, &oldset);
12255                 ret = mask;
12256             }
12257         }
12258         return ret;
12259 #endif
12260 
12261 #ifdef TARGET_NR_getgid32
12262     case TARGET_NR_getgid32:
12263         return get_errno(getgid());
12264 #endif
12265 #ifdef TARGET_NR_geteuid32
12266     case TARGET_NR_geteuid32:
12267         return get_errno(geteuid());
12268 #endif
12269 #ifdef TARGET_NR_getegid32
12270     case TARGET_NR_getegid32:
12271         return get_errno(getegid());
12272 #endif
12273 #ifdef TARGET_NR_setreuid32
12274     case TARGET_NR_setreuid32:
12275         return get_errno(setreuid(arg1, arg2));
12276 #endif
12277 #ifdef TARGET_NR_setregid32
12278     case TARGET_NR_setregid32:
12279         return get_errno(setregid(arg1, arg2));
12280 #endif
12281 #ifdef TARGET_NR_getgroups32
12282     case TARGET_NR_getgroups32:
12283         { /* the same code as for TARGET_NR_getgroups */
12284             int gidsetsize = arg1;
12285             uint32_t *target_grouplist;
12286             g_autofree gid_t *grouplist = NULL;
12287             int i;
12288 
12289             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12290                 return -TARGET_EINVAL;
12291             }
12292             if (gidsetsize > 0) {
12293                 grouplist = g_try_new(gid_t, gidsetsize);
12294                 if (!grouplist) {
12295                     return -TARGET_ENOMEM;
12296                 }
12297             }
12298             ret = get_errno(getgroups(gidsetsize, grouplist));
12299             if (!is_error(ret) && gidsetsize > 0) {
12300                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12301                                              gidsetsize * 4, 0);
12302                 if (!target_grouplist) {
12303                     return -TARGET_EFAULT;
12304                 }
12305                 for (i = 0; i < ret; i++) {
12306                     target_grouplist[i] = tswap32(grouplist[i]);
12307                 }
12308                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12309             }
12310             return ret;
12311         }
12312 #endif
12313 #ifdef TARGET_NR_setgroups32
12314     case TARGET_NR_setgroups32:
12315         { /* the same code as for TARGET_NR_setgroups */
12316             int gidsetsize = arg1;
12317             uint32_t *target_grouplist;
12318             g_autofree gid_t *grouplist = NULL;
12319             int i;
12320 
12321             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12322                 return -TARGET_EINVAL;
12323             }
12324             if (gidsetsize > 0) {
12325                 grouplist = g_try_new(gid_t, gidsetsize);
12326                 if (!grouplist) {
12327                     return -TARGET_ENOMEM;
12328                 }
12329                 target_grouplist = lock_user(VERIFY_READ, arg2,
12330                                              gidsetsize * 4, 1);
12331                 if (!target_grouplist) {
12332                     return -TARGET_EFAULT;
12333                 }
12334                 for (i = 0; i < gidsetsize; i++) {
12335                     grouplist[i] = tswap32(target_grouplist[i]);
12336                 }
12337                 unlock_user(target_grouplist, arg2, 0);
12338             }
12339             return get_errno(setgroups(gidsetsize, grouplist));
12340         }
12341 #endif
12342 #ifdef TARGET_NR_fchown32
12343     case TARGET_NR_fchown32:
12344         return get_errno(fchown(arg1, arg2, arg3));
12345 #endif
12346 #ifdef TARGET_NR_setresuid32
12347     case TARGET_NR_setresuid32:
12348         return get_errno(sys_setresuid(arg1, arg2, arg3));
12349 #endif
12350 #ifdef TARGET_NR_getresuid32
12351     case TARGET_NR_getresuid32:
12352         {
12353             uid_t ruid, euid, suid;
12354             ret = get_errno(getresuid(&ruid, &euid, &suid));
12355             if (!is_error(ret)) {
12356                 if (put_user_u32(ruid, arg1)
12357                     || put_user_u32(euid, arg2)
12358                     || put_user_u32(suid, arg3))
12359                     return -TARGET_EFAULT;
12360             }
12361         }
12362         return ret;
12363 #endif
12364 #ifdef TARGET_NR_setresgid32
12365     case TARGET_NR_setresgid32:
12366         return get_errno(sys_setresgid(arg1, arg2, arg3));
12367 #endif
12368 #ifdef TARGET_NR_getresgid32
12369     case TARGET_NR_getresgid32:
12370         {
12371             gid_t rgid, egid, sgid;
12372             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12373             if (!is_error(ret)) {
12374                 if (put_user_u32(rgid, arg1)
12375                     || put_user_u32(egid, arg2)
12376                     || put_user_u32(sgid, arg3))
12377                     return -TARGET_EFAULT;
12378             }
12379         }
12380         return ret;
12381 #endif
12382 #ifdef TARGET_NR_chown32
12383     case TARGET_NR_chown32:
12384         if (!(p = lock_user_string(arg1)))
12385             return -TARGET_EFAULT;
12386         ret = get_errno(chown(p, arg2, arg3));
12387         unlock_user(p, arg1, 0);
12388         return ret;
12389 #endif
12390 #ifdef TARGET_NR_setuid32
12391     case TARGET_NR_setuid32:
12392         return get_errno(sys_setuid(arg1));
12393 #endif
12394 #ifdef TARGET_NR_setgid32
12395     case TARGET_NR_setgid32:
12396         return get_errno(sys_setgid(arg1));
12397 #endif
12398 #ifdef TARGET_NR_setfsuid32
12399     case TARGET_NR_setfsuid32:
12400         return get_errno(setfsuid(arg1));
12401 #endif
12402 #ifdef TARGET_NR_setfsgid32
12403     case TARGET_NR_setfsgid32:
12404         return get_errno(setfsgid(arg1));
12405 #endif
12406 #ifdef TARGET_NR_mincore
12407     case TARGET_NR_mincore:
12408         {
12409             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12410             if (!a) {
12411                 return -TARGET_ENOMEM;
12412             }
12413             p = lock_user_string(arg3);
12414             if (!p) {
12415                 ret = -TARGET_EFAULT;
12416             } else {
12417                 ret = get_errno(mincore(a, arg2, p));
12418                 unlock_user(p, arg3, ret);
12419             }
12420             unlock_user(a, arg1, 0);
12421         }
12422         return ret;
12423 #endif
12424 #ifdef TARGET_NR_arm_fadvise64_64
12425     case TARGET_NR_arm_fadvise64_64:
12426         /* arm_fadvise64_64 looks like fadvise64_64 but
12427          * with different argument order: fd, advice, offset, len
12428          * rather than the usual fd, offset, len, advice.
12429          * Note that offset and len are both 64-bit so appear as
12430          * pairs of 32-bit registers.
12431          */
12432         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12433                             target_offset64(arg5, arg6), arg2);
12434         return -host_to_target_errno(ret);
12435 #endif
12436 
12437 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12438 
12439 #ifdef TARGET_NR_fadvise64_64
12440     case TARGET_NR_fadvise64_64:
12441 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12442         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12443         ret = arg2;
12444         arg2 = arg3;
12445         arg3 = arg4;
12446         arg4 = arg5;
12447         arg5 = arg6;
12448         arg6 = ret;
12449 #else
12450         /* 6 args: fd, offset (high, low), len (high, low), advice */
12451         if (regpairs_aligned(cpu_env, num)) {
12452             /* offset is in (3,4), len in (5,6) and advice in 7 */
12453             arg2 = arg3;
12454             arg3 = arg4;
12455             arg4 = arg5;
12456             arg5 = arg6;
12457             arg6 = arg7;
12458         }
12459 #endif
12460         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12461                             target_offset64(arg4, arg5), arg6);
12462         return -host_to_target_errno(ret);
12463 #endif
12464 
12465 #ifdef TARGET_NR_fadvise64
12466     case TARGET_NR_fadvise64:
12467         /* 5 args: fd, offset (high, low), len, advice */
12468         if (regpairs_aligned(cpu_env, num)) {
12469             /* offset is in (3,4), len in 5 and advice in 6 */
12470             arg2 = arg3;
12471             arg3 = arg4;
12472             arg4 = arg5;
12473             arg5 = arg6;
12474         }
12475         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12476         return -host_to_target_errno(ret);
12477 #endif
12478 
12479 #else /* not a 32-bit ABI */
12480 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12481 #ifdef TARGET_NR_fadvise64_64
12482     case TARGET_NR_fadvise64_64:
12483 #endif
12484 #ifdef TARGET_NR_fadvise64
12485     case TARGET_NR_fadvise64:
12486 #endif
12487 #ifdef TARGET_S390X
12488         switch (arg4) {
12489         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12490         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12491         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12492         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12493         default: break;
12494         }
12495 #endif
12496         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12497 #endif
12498 #endif /* end of 64-bit ABI fadvise handling */
12499 
12500 #ifdef TARGET_NR_madvise
12501     case TARGET_NR_madvise:
12502         return target_madvise(arg1, arg2, arg3);
12503 #endif
12504 #ifdef TARGET_NR_fcntl64
12505     case TARGET_NR_fcntl64:
12506     {
12507         int cmd;
12508         struct flock64 fl;
12509         from_flock64_fn *copyfrom = copy_from_user_flock64;
12510         to_flock64_fn *copyto = copy_to_user_flock64;
12511 
12512 #ifdef TARGET_ARM
12513         if (!cpu_env->eabi) {
12514             copyfrom = copy_from_user_oabi_flock64;
12515             copyto = copy_to_user_oabi_flock64;
12516         }
12517 #endif
12518 
12519         cmd = target_to_host_fcntl_cmd(arg2);
12520         if (cmd == -TARGET_EINVAL) {
12521             return cmd;
12522         }
12523 
12524         switch(arg2) {
12525         case TARGET_F_GETLK64:
12526             ret = copyfrom(&fl, arg3);
12527             if (ret) {
12528                 break;
12529             }
12530             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12531             if (ret == 0) {
12532                 ret = copyto(arg3, &fl);
12533             }
12534 	    break;
12535 
12536         case TARGET_F_SETLK64:
12537         case TARGET_F_SETLKW64:
12538             ret = copyfrom(&fl, arg3);
12539             if (ret) {
12540                 break;
12541             }
12542             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12543 	    break;
12544         default:
12545             ret = do_fcntl(arg1, arg2, arg3);
12546             break;
12547         }
12548         return ret;
12549     }
12550 #endif
12551 #ifdef TARGET_NR_cacheflush
12552     case TARGET_NR_cacheflush:
12553         /* self-modifying code is handled automatically, so nothing needed */
12554         return 0;
12555 #endif
12556 #ifdef TARGET_NR_getpagesize
12557     case TARGET_NR_getpagesize:
12558         return TARGET_PAGE_SIZE;
12559 #endif
12560     case TARGET_NR_gettid:
12561         return get_errno(sys_gettid());
12562 #ifdef TARGET_NR_readahead
12563     case TARGET_NR_readahead:
12564 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12565         if (regpairs_aligned(cpu_env, num)) {
12566             arg2 = arg3;
12567             arg3 = arg4;
12568             arg4 = arg5;
12569         }
12570         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12571 #else
12572         ret = get_errno(readahead(arg1, arg2, arg3));
12573 #endif
12574         return ret;
12575 #endif
12576 #ifdef CONFIG_ATTR
12577 #ifdef TARGET_NR_setxattr
12578     case TARGET_NR_listxattr:
12579     case TARGET_NR_llistxattr:
12580     {
12581         void *p, *b = 0;
12582         if (arg2) {
12583             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12584             if (!b) {
12585                 return -TARGET_EFAULT;
12586             }
12587         }
12588         p = lock_user_string(arg1);
12589         if (p) {
12590             if (num == TARGET_NR_listxattr) {
12591                 ret = get_errno(listxattr(p, b, arg3));
12592             } else {
12593                 ret = get_errno(llistxattr(p, b, arg3));
12594             }
12595         } else {
12596             ret = -TARGET_EFAULT;
12597         }
12598         unlock_user(p, arg1, 0);
12599         unlock_user(b, arg2, arg3);
12600         return ret;
12601     }
12602     case TARGET_NR_flistxattr:
12603     {
12604         void *b = 0;
12605         if (arg2) {
12606             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12607             if (!b) {
12608                 return -TARGET_EFAULT;
12609             }
12610         }
12611         ret = get_errno(flistxattr(arg1, b, arg3));
12612         unlock_user(b, arg2, arg3);
12613         return ret;
12614     }
12615     case TARGET_NR_setxattr:
12616     case TARGET_NR_lsetxattr:
12617         {
12618             void *p, *n, *v = 0;
12619             if (arg3) {
12620                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12621                 if (!v) {
12622                     return -TARGET_EFAULT;
12623                 }
12624             }
12625             p = lock_user_string(arg1);
12626             n = lock_user_string(arg2);
12627             if (p && n) {
12628                 if (num == TARGET_NR_setxattr) {
12629                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12630                 } else {
12631                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12632                 }
12633             } else {
12634                 ret = -TARGET_EFAULT;
12635             }
12636             unlock_user(p, arg1, 0);
12637             unlock_user(n, arg2, 0);
12638             unlock_user(v, arg3, 0);
12639         }
12640         return ret;
12641     case TARGET_NR_fsetxattr:
12642         {
12643             void *n, *v = 0;
12644             if (arg3) {
12645                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12646                 if (!v) {
12647                     return -TARGET_EFAULT;
12648                 }
12649             }
12650             n = lock_user_string(arg2);
12651             if (n) {
12652                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12653             } else {
12654                 ret = -TARGET_EFAULT;
12655             }
12656             unlock_user(n, arg2, 0);
12657             unlock_user(v, arg3, 0);
12658         }
12659         return ret;
12660     case TARGET_NR_getxattr:
12661     case TARGET_NR_lgetxattr:
12662         {
12663             void *p, *n, *v = 0;
12664             if (arg3) {
12665                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12666                 if (!v) {
12667                     return -TARGET_EFAULT;
12668                 }
12669             }
12670             p = lock_user_string(arg1);
12671             n = lock_user_string(arg2);
12672             if (p && n) {
12673                 if (num == TARGET_NR_getxattr) {
12674                     ret = get_errno(getxattr(p, n, v, arg4));
12675                 } else {
12676                     ret = get_errno(lgetxattr(p, n, v, arg4));
12677                 }
12678             } else {
12679                 ret = -TARGET_EFAULT;
12680             }
12681             unlock_user(p, arg1, 0);
12682             unlock_user(n, arg2, 0);
12683             unlock_user(v, arg3, arg4);
12684         }
12685         return ret;
12686     case TARGET_NR_fgetxattr:
12687         {
12688             void *n, *v = 0;
12689             if (arg3) {
12690                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12691                 if (!v) {
12692                     return -TARGET_EFAULT;
12693                 }
12694             }
12695             n = lock_user_string(arg2);
12696             if (n) {
12697                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12698             } else {
12699                 ret = -TARGET_EFAULT;
12700             }
12701             unlock_user(n, arg2, 0);
12702             unlock_user(v, arg3, arg4);
12703         }
12704         return ret;
12705     case TARGET_NR_removexattr:
12706     case TARGET_NR_lremovexattr:
12707         {
12708             void *p, *n;
12709             p = lock_user_string(arg1);
12710             n = lock_user_string(arg2);
12711             if (p && n) {
12712                 if (num == TARGET_NR_removexattr) {
12713                     ret = get_errno(removexattr(p, n));
12714                 } else {
12715                     ret = get_errno(lremovexattr(p, n));
12716                 }
12717             } else {
12718                 ret = -TARGET_EFAULT;
12719             }
12720             unlock_user(p, arg1, 0);
12721             unlock_user(n, arg2, 0);
12722         }
12723         return ret;
12724     case TARGET_NR_fremovexattr:
12725         {
12726             void *n;
12727             n = lock_user_string(arg2);
12728             if (n) {
12729                 ret = get_errno(fremovexattr(arg1, n));
12730             } else {
12731                 ret = -TARGET_EFAULT;
12732             }
12733             unlock_user(n, arg2, 0);
12734         }
12735         return ret;
12736 #endif
12737 #endif /* CONFIG_ATTR */
12738 #ifdef TARGET_NR_set_thread_area
12739     case TARGET_NR_set_thread_area:
12740 #if defined(TARGET_MIPS)
12741       cpu_env->active_tc.CP0_UserLocal = arg1;
12742       return 0;
12743 #elif defined(TARGET_CRIS)
12744       if (arg1 & 0xff)
12745           ret = -TARGET_EINVAL;
12746       else {
12747           cpu_env->pregs[PR_PID] = arg1;
12748           ret = 0;
12749       }
12750       return ret;
12751 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12752       return do_set_thread_area(cpu_env, arg1);
12753 #elif defined(TARGET_M68K)
12754       {
12755           TaskState *ts = cpu->opaque;
12756           ts->tp_value = arg1;
12757           return 0;
12758       }
12759 #else
12760       return -TARGET_ENOSYS;
12761 #endif
12762 #endif
12763 #ifdef TARGET_NR_get_thread_area
12764     case TARGET_NR_get_thread_area:
12765 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12766         return do_get_thread_area(cpu_env, arg1);
12767 #elif defined(TARGET_M68K)
12768         {
12769             TaskState *ts = cpu->opaque;
12770             return ts->tp_value;
12771         }
12772 #else
12773         return -TARGET_ENOSYS;
12774 #endif
12775 #endif
12776 #ifdef TARGET_NR_getdomainname
12777     case TARGET_NR_getdomainname:
12778         return -TARGET_ENOSYS;
12779 #endif
12780 
12781 #ifdef TARGET_NR_clock_settime
12782     case TARGET_NR_clock_settime:
12783     {
12784         struct timespec ts;
12785 
12786         ret = target_to_host_timespec(&ts, arg2);
12787         if (!is_error(ret)) {
12788             ret = get_errno(clock_settime(arg1, &ts));
12789         }
12790         return ret;
12791     }
12792 #endif
12793 #ifdef TARGET_NR_clock_settime64
12794     case TARGET_NR_clock_settime64:
12795     {
12796         struct timespec ts;
12797 
12798         ret = target_to_host_timespec64(&ts, arg2);
12799         if (!is_error(ret)) {
12800             ret = get_errno(clock_settime(arg1, &ts));
12801         }
12802         return ret;
12803     }
12804 #endif
12805 #ifdef TARGET_NR_clock_gettime
12806     case TARGET_NR_clock_gettime:
12807     {
12808         struct timespec ts;
12809         ret = get_errno(clock_gettime(arg1, &ts));
12810         if (!is_error(ret)) {
12811             ret = host_to_target_timespec(arg2, &ts);
12812         }
12813         return ret;
12814     }
12815 #endif
12816 #ifdef TARGET_NR_clock_gettime64
12817     case TARGET_NR_clock_gettime64:
12818     {
12819         struct timespec ts;
12820         ret = get_errno(clock_gettime(arg1, &ts));
12821         if (!is_error(ret)) {
12822             ret = host_to_target_timespec64(arg2, &ts);
12823         }
12824         return ret;
12825     }
12826 #endif
12827 #ifdef TARGET_NR_clock_getres
12828     case TARGET_NR_clock_getres:
12829     {
12830         struct timespec ts;
12831         ret = get_errno(clock_getres(arg1, &ts));
12832         if (!is_error(ret)) {
12833             host_to_target_timespec(arg2, &ts);
12834         }
12835         return ret;
12836     }
12837 #endif
12838 #ifdef TARGET_NR_clock_getres_time64
12839     case TARGET_NR_clock_getres_time64:
12840     {
12841         struct timespec ts;
12842         ret = get_errno(clock_getres(arg1, &ts));
12843         if (!is_error(ret)) {
12844             host_to_target_timespec64(arg2, &ts);
12845         }
12846         return ret;
12847     }
12848 #endif
12849 #ifdef TARGET_NR_clock_nanosleep
12850     case TARGET_NR_clock_nanosleep:
12851     {
12852         struct timespec ts;
12853         if (target_to_host_timespec(&ts, arg3)) {
12854             return -TARGET_EFAULT;
12855         }
12856         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12857                                              &ts, arg4 ? &ts : NULL));
12858         /*
12859          * if the call is interrupted by a signal handler, it fails
12860          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12861          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12862          */
12863         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12864             host_to_target_timespec(arg4, &ts)) {
12865               return -TARGET_EFAULT;
12866         }
12867 
12868         return ret;
12869     }
12870 #endif
12871 #ifdef TARGET_NR_clock_nanosleep_time64
12872     case TARGET_NR_clock_nanosleep_time64:
12873     {
12874         struct timespec ts;
12875 
12876         if (target_to_host_timespec64(&ts, arg3)) {
12877             return -TARGET_EFAULT;
12878         }
12879 
12880         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12881                                              &ts, arg4 ? &ts : NULL));
12882 
12883         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12884             host_to_target_timespec64(arg4, &ts)) {
12885             return -TARGET_EFAULT;
12886         }
12887         return ret;
12888     }
12889 #endif
12890 
12891 #if defined(TARGET_NR_set_tid_address)
12892     case TARGET_NR_set_tid_address:
12893     {
12894         TaskState *ts = cpu->opaque;
12895         ts->child_tidptr = arg1;
12896         /* do not call host set_tid_address() syscall, instead return tid() */
12897         return get_errno(sys_gettid());
12898     }
12899 #endif
12900 
12901     case TARGET_NR_tkill:
12902         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12903 
12904     case TARGET_NR_tgkill:
12905         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12906                          target_to_host_signal(arg3)));
12907 
12908 #ifdef TARGET_NR_set_robust_list
12909     case TARGET_NR_set_robust_list:
12910     case TARGET_NR_get_robust_list:
12911         /* The ABI for supporting robust futexes has userspace pass
12912          * the kernel a pointer to a linked list which is updated by
12913          * userspace after the syscall; the list is walked by the kernel
12914          * when the thread exits. Since the linked list in QEMU guest
12915          * memory isn't a valid linked list for the host and we have
12916          * no way to reliably intercept the thread-death event, we can't
12917          * support these. Silently return ENOSYS so that guest userspace
12918          * falls back to a non-robust futex implementation (which should
12919          * be OK except in the corner case of the guest crashing while
12920          * holding a mutex that is shared with another process via
12921          * shared memory).
12922          */
12923         return -TARGET_ENOSYS;
12924 #endif
12925 
12926 #if defined(TARGET_NR_utimensat)
12927     case TARGET_NR_utimensat:
12928         {
12929             struct timespec *tsp, ts[2];
12930             if (!arg3) {
12931                 tsp = NULL;
12932             } else {
12933                 if (target_to_host_timespec(ts, arg3)) {
12934                     return -TARGET_EFAULT;
12935                 }
12936                 if (target_to_host_timespec(ts + 1, arg3 +
12937                                             sizeof(struct target_timespec))) {
12938                     return -TARGET_EFAULT;
12939                 }
12940                 tsp = ts;
12941             }
12942             if (!arg2)
12943                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12944             else {
12945                 if (!(p = lock_user_string(arg2))) {
12946                     return -TARGET_EFAULT;
12947                 }
12948                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12949                 unlock_user(p, arg2, 0);
12950             }
12951         }
12952         return ret;
12953 #endif
12954 #ifdef TARGET_NR_utimensat_time64
12955     case TARGET_NR_utimensat_time64:
12956         {
12957             struct timespec *tsp, ts[2];
12958             if (!arg3) {
12959                 tsp = NULL;
12960             } else {
12961                 if (target_to_host_timespec64(ts, arg3)) {
12962                     return -TARGET_EFAULT;
12963                 }
12964                 if (target_to_host_timespec64(ts + 1, arg3 +
12965                                      sizeof(struct target__kernel_timespec))) {
12966                     return -TARGET_EFAULT;
12967                 }
12968                 tsp = ts;
12969             }
12970             if (!arg2)
12971                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12972             else {
12973                 p = lock_user_string(arg2);
12974                 if (!p) {
12975                     return -TARGET_EFAULT;
12976                 }
12977                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12978                 unlock_user(p, arg2, 0);
12979             }
12980         }
12981         return ret;
12982 #endif
12983 #ifdef TARGET_NR_futex
12984     case TARGET_NR_futex:
12985         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12986 #endif
12987 #ifdef TARGET_NR_futex_time64
12988     case TARGET_NR_futex_time64:
12989         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12990 #endif
12991 #ifdef CONFIG_INOTIFY
12992 #if defined(TARGET_NR_inotify_init)
12993     case TARGET_NR_inotify_init:
12994         ret = get_errno(inotify_init());
12995         if (ret >= 0) {
12996             fd_trans_register(ret, &target_inotify_trans);
12997         }
12998         return ret;
12999 #endif
13000 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
13001     case TARGET_NR_inotify_init1:
13002         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
13003                                           fcntl_flags_tbl)));
13004         if (ret >= 0) {
13005             fd_trans_register(ret, &target_inotify_trans);
13006         }
13007         return ret;
13008 #endif
13009 #if defined(TARGET_NR_inotify_add_watch)
13010     case TARGET_NR_inotify_add_watch:
13011         p = lock_user_string(arg2);
13012         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
13013         unlock_user(p, arg2, 0);
13014         return ret;
13015 #endif
13016 #if defined(TARGET_NR_inotify_rm_watch)
13017     case TARGET_NR_inotify_rm_watch:
13018         return get_errno(inotify_rm_watch(arg1, arg2));
13019 #endif
13020 #endif
13021 
13022 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
13023     case TARGET_NR_mq_open:
13024         {
13025             struct mq_attr posix_mq_attr;
13026             struct mq_attr *pposix_mq_attr;
13027             int host_flags;
13028 
13029             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13030             pposix_mq_attr = NULL;
13031             if (arg4) {
13032                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13033                     return -TARGET_EFAULT;
13034                 }
13035                 pposix_mq_attr = &posix_mq_attr;
13036             }
13037             p = lock_user_string(arg1 - 1);
13038             if (!p) {
13039                 return -TARGET_EFAULT;
13040             }
13041             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13042             unlock_user (p, arg1, 0);
13043         }
13044         return ret;
13045 
13046     case TARGET_NR_mq_unlink:
13047         p = lock_user_string(arg1 - 1);
13048         if (!p) {
13049             return -TARGET_EFAULT;
13050         }
13051         ret = get_errno(mq_unlink(p));
13052         unlock_user (p, arg1, 0);
13053         return ret;
13054 
13055 #ifdef TARGET_NR_mq_timedsend
13056     case TARGET_NR_mq_timedsend:
13057         {
13058             struct timespec ts;
13059 
13060             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13061             if (arg5 != 0) {
13062                 if (target_to_host_timespec(&ts, arg5)) {
13063                     return -TARGET_EFAULT;
13064                 }
13065                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13066                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13067                     return -TARGET_EFAULT;
13068                 }
13069             } else {
13070                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13071             }
13072             unlock_user (p, arg2, arg3);
13073         }
13074         return ret;
13075 #endif
13076 #ifdef TARGET_NR_mq_timedsend_time64
13077     case TARGET_NR_mq_timedsend_time64:
13078         {
13079             struct timespec ts;
13080 
13081             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13082             if (arg5 != 0) {
13083                 if (target_to_host_timespec64(&ts, arg5)) {
13084                     return -TARGET_EFAULT;
13085                 }
13086                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13087                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13088                     return -TARGET_EFAULT;
13089                 }
13090             } else {
13091                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13092             }
13093             unlock_user(p, arg2, arg3);
13094         }
13095         return ret;
13096 #endif
13097 
13098 #ifdef TARGET_NR_mq_timedreceive
13099     case TARGET_NR_mq_timedreceive:
13100         {
13101             struct timespec ts;
13102             unsigned int prio;
13103 
13104             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13105             if (arg5 != 0) {
13106                 if (target_to_host_timespec(&ts, arg5)) {
13107                     return -TARGET_EFAULT;
13108                 }
13109                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13110                                                      &prio, &ts));
13111                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13112                     return -TARGET_EFAULT;
13113                 }
13114             } else {
13115                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13116                                                      &prio, NULL));
13117             }
13118             unlock_user (p, arg2, arg3);
13119             if (arg4 != 0)
13120                 put_user_u32(prio, arg4);
13121         }
13122         return ret;
13123 #endif
13124 #ifdef TARGET_NR_mq_timedreceive_time64
13125     case TARGET_NR_mq_timedreceive_time64:
13126         {
13127             struct timespec ts;
13128             unsigned int prio;
13129 
13130             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13131             if (arg5 != 0) {
13132                 if (target_to_host_timespec64(&ts, arg5)) {
13133                     return -TARGET_EFAULT;
13134                 }
13135                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13136                                                      &prio, &ts));
13137                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13138                     return -TARGET_EFAULT;
13139                 }
13140             } else {
13141                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13142                                                      &prio, NULL));
13143             }
13144             unlock_user(p, arg2, arg3);
13145             if (arg4 != 0) {
13146                 put_user_u32(prio, arg4);
13147             }
13148         }
13149         return ret;
13150 #endif
13151 
13152     /* Not implemented for now... */
13153 /*     case TARGET_NR_mq_notify: */
13154 /*         break; */
13155 
13156     case TARGET_NR_mq_getsetattr:
13157         {
13158             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13159             ret = 0;
13160             if (arg2 != 0) {
13161                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13162                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13163                                            &posix_mq_attr_out));
13164             } else if (arg3 != 0) {
13165                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13166             }
13167             if (ret == 0 && arg3 != 0) {
13168                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13169             }
13170         }
13171         return ret;
13172 #endif
13173 
13174 #ifdef CONFIG_SPLICE
13175 #ifdef TARGET_NR_tee
13176     case TARGET_NR_tee:
13177         {
13178             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13179         }
13180         return ret;
13181 #endif
13182 #ifdef TARGET_NR_splice
13183     case TARGET_NR_splice:
13184         {
13185             loff_t loff_in, loff_out;
13186             loff_t *ploff_in = NULL, *ploff_out = NULL;
13187             if (arg2) {
13188                 if (get_user_u64(loff_in, arg2)) {
13189                     return -TARGET_EFAULT;
13190                 }
13191                 ploff_in = &loff_in;
13192             }
13193             if (arg4) {
13194                 if (get_user_u64(loff_out, arg4)) {
13195                     return -TARGET_EFAULT;
13196                 }
13197                 ploff_out = &loff_out;
13198             }
13199             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13200             if (arg2) {
13201                 if (put_user_u64(loff_in, arg2)) {
13202                     return -TARGET_EFAULT;
13203                 }
13204             }
13205             if (arg4) {
13206                 if (put_user_u64(loff_out, arg4)) {
13207                     return -TARGET_EFAULT;
13208                 }
13209             }
13210         }
13211         return ret;
13212 #endif
13213 #ifdef TARGET_NR_vmsplice
13214 	case TARGET_NR_vmsplice:
13215         {
13216             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13217             if (vec != NULL) {
13218                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13219                 unlock_iovec(vec, arg2, arg3, 0);
13220             } else {
13221                 ret = -host_to_target_errno(errno);
13222             }
13223         }
13224         return ret;
13225 #endif
13226 #endif /* CONFIG_SPLICE */
13227 #ifdef CONFIG_EVENTFD
13228 #if defined(TARGET_NR_eventfd)
13229     case TARGET_NR_eventfd:
13230         ret = get_errno(eventfd(arg1, 0));
13231         if (ret >= 0) {
13232             fd_trans_register(ret, &target_eventfd_trans);
13233         }
13234         return ret;
13235 #endif
13236 #if defined(TARGET_NR_eventfd2)
13237     case TARGET_NR_eventfd2:
13238     {
13239         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13240         if (arg2 & TARGET_O_NONBLOCK) {
13241             host_flags |= O_NONBLOCK;
13242         }
13243         if (arg2 & TARGET_O_CLOEXEC) {
13244             host_flags |= O_CLOEXEC;
13245         }
13246         ret = get_errno(eventfd(arg1, host_flags));
13247         if (ret >= 0) {
13248             fd_trans_register(ret, &target_eventfd_trans);
13249         }
13250         return ret;
13251     }
13252 #endif
13253 #endif /* CONFIG_EVENTFD  */
13254 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13255     case TARGET_NR_fallocate:
13256 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13257         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13258                                   target_offset64(arg5, arg6)));
13259 #else
13260         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13261 #endif
13262         return ret;
13263 #endif
13264 #if defined(CONFIG_SYNC_FILE_RANGE)
13265 #if defined(TARGET_NR_sync_file_range)
13266     case TARGET_NR_sync_file_range:
13267 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13268 #if defined(TARGET_MIPS)
13269         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13270                                         target_offset64(arg5, arg6), arg7));
13271 #else
13272         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13273                                         target_offset64(arg4, arg5), arg6));
13274 #endif /* !TARGET_MIPS */
13275 #else
13276         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13277 #endif
13278         return ret;
13279 #endif
13280 #if defined(TARGET_NR_sync_file_range2) || \
13281     defined(TARGET_NR_arm_sync_file_range)
13282 #if defined(TARGET_NR_sync_file_range2)
13283     case TARGET_NR_sync_file_range2:
13284 #endif
13285 #if defined(TARGET_NR_arm_sync_file_range)
13286     case TARGET_NR_arm_sync_file_range:
13287 #endif
13288         /* This is like sync_file_range but the arguments are reordered */
13289 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13290         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13291                                         target_offset64(arg5, arg6), arg2));
13292 #else
13293         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13294 #endif
13295         return ret;
13296 #endif
13297 #endif
13298 #if defined(TARGET_NR_signalfd4)
13299     case TARGET_NR_signalfd4:
13300         return do_signalfd4(arg1, arg2, arg4);
13301 #endif
13302 #if defined(TARGET_NR_signalfd)
13303     case TARGET_NR_signalfd:
13304         return do_signalfd4(arg1, arg2, 0);
13305 #endif
13306 #if defined(CONFIG_EPOLL)
13307 #if defined(TARGET_NR_epoll_create)
13308     case TARGET_NR_epoll_create:
13309         return get_errno(epoll_create(arg1));
13310 #endif
13311 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13312     case TARGET_NR_epoll_create1:
13313         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13314 #endif
13315 #if defined(TARGET_NR_epoll_ctl)
13316     case TARGET_NR_epoll_ctl:
13317     {
13318         struct epoll_event ep;
13319         struct epoll_event *epp = 0;
13320         if (arg4) {
13321             if (arg2 != EPOLL_CTL_DEL) {
13322                 struct target_epoll_event *target_ep;
13323                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13324                     return -TARGET_EFAULT;
13325                 }
13326                 ep.events = tswap32(target_ep->events);
13327                 /*
13328                  * The epoll_data_t union is just opaque data to the kernel,
13329                  * so we transfer all 64 bits across and need not worry what
13330                  * actual data type it is.
13331                  */
13332                 ep.data.u64 = tswap64(target_ep->data.u64);
13333                 unlock_user_struct(target_ep, arg4, 0);
13334             }
13335             /*
13336              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13337              * non-null pointer, even though this argument is ignored.
13338              *
13339              */
13340             epp = &ep;
13341         }
13342         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13343     }
13344 #endif
13345 
13346 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13347 #if defined(TARGET_NR_epoll_wait)
13348     case TARGET_NR_epoll_wait:
13349 #endif
13350 #if defined(TARGET_NR_epoll_pwait)
13351     case TARGET_NR_epoll_pwait:
13352 #endif
13353     {
13354         struct target_epoll_event *target_ep;
13355         struct epoll_event *ep;
13356         int epfd = arg1;
13357         int maxevents = arg3;
13358         int timeout = arg4;
13359 
13360         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13361             return -TARGET_EINVAL;
13362         }
13363 
13364         target_ep = lock_user(VERIFY_WRITE, arg2,
13365                               maxevents * sizeof(struct target_epoll_event), 1);
13366         if (!target_ep) {
13367             return -TARGET_EFAULT;
13368         }
13369 
13370         ep = g_try_new(struct epoll_event, maxevents);
13371         if (!ep) {
13372             unlock_user(target_ep, arg2, 0);
13373             return -TARGET_ENOMEM;
13374         }
13375 
13376         switch (num) {
13377 #if defined(TARGET_NR_epoll_pwait)
13378         case TARGET_NR_epoll_pwait:
13379         {
13380             sigset_t *set = NULL;
13381 
13382             if (arg5) {
13383                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13384                 if (ret != 0) {
13385                     break;
13386                 }
13387             }
13388 
13389             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13390                                              set, SIGSET_T_SIZE));
13391 
13392             if (set) {
13393                 finish_sigsuspend_mask(ret);
13394             }
13395             break;
13396         }
13397 #endif
13398 #if defined(TARGET_NR_epoll_wait)
13399         case TARGET_NR_epoll_wait:
13400             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13401                                              NULL, 0));
13402             break;
13403 #endif
13404         default:
13405             ret = -TARGET_ENOSYS;
13406         }
13407         if (!is_error(ret)) {
13408             int i;
13409             for (i = 0; i < ret; i++) {
13410                 target_ep[i].events = tswap32(ep[i].events);
13411                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13412             }
13413             unlock_user(target_ep, arg2,
13414                         ret * sizeof(struct target_epoll_event));
13415         } else {
13416             unlock_user(target_ep, arg2, 0);
13417         }
13418         g_free(ep);
13419         return ret;
13420     }
13421 #endif
13422 #endif
13423 #ifdef TARGET_NR_prlimit64
13424     case TARGET_NR_prlimit64:
13425     {
13426         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13427         struct target_rlimit64 *target_rnew, *target_rold;
13428         struct host_rlimit64 rnew, rold, *rnewp = 0;
13429         int resource = target_to_host_resource(arg2);
13430 
13431         if (arg3 && (resource != RLIMIT_AS &&
13432                      resource != RLIMIT_DATA &&
13433                      resource != RLIMIT_STACK)) {
13434             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13435                 return -TARGET_EFAULT;
13436             }
13437             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13438             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13439             unlock_user_struct(target_rnew, arg3, 0);
13440             rnewp = &rnew;
13441         }
13442 
13443         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13444         if (!is_error(ret) && arg4) {
13445             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13446                 return -TARGET_EFAULT;
13447             }
13448             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13449             __put_user(rold.rlim_max, &target_rold->rlim_max);
13450             unlock_user_struct(target_rold, arg4, 1);
13451         }
13452         return ret;
13453     }
13454 #endif
13455 #ifdef TARGET_NR_gethostname
13456     case TARGET_NR_gethostname:
13457     {
13458         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13459         if (name) {
13460             ret = get_errno(gethostname(name, arg2));
13461             unlock_user(name, arg1, arg2);
13462         } else {
13463             ret = -TARGET_EFAULT;
13464         }
13465         return ret;
13466     }
13467 #endif
13468 #ifdef TARGET_NR_atomic_cmpxchg_32
13469     case TARGET_NR_atomic_cmpxchg_32:
13470     {
13471         /* should use start_exclusive from main.c */
13472         abi_ulong mem_value;
13473         if (get_user_u32(mem_value, arg6)) {
13474             target_siginfo_t info;
13475             info.si_signo = SIGSEGV;
13476             info.si_errno = 0;
13477             info.si_code = TARGET_SEGV_MAPERR;
13478             info._sifields._sigfault._addr = arg6;
13479             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13480             ret = 0xdeadbeef;
13481 
13482         }
13483         if (mem_value == arg2)
13484             put_user_u32(arg1, arg6);
13485         return mem_value;
13486     }
13487 #endif
13488 #ifdef TARGET_NR_atomic_barrier
13489     case TARGET_NR_atomic_barrier:
13490         /* Like the kernel implementation and the
13491            qemu arm barrier, no-op this? */
13492         return 0;
13493 #endif
13494 
13495 #ifdef TARGET_NR_timer_create
13496     case TARGET_NR_timer_create:
13497     {
13498         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13499 
13500         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13501 
13502         int clkid = arg1;
13503         int timer_index = next_free_host_timer();
13504 
13505         if (timer_index < 0) {
13506             ret = -TARGET_EAGAIN;
13507         } else {
13508             timer_t *phtimer = g_posix_timers  + timer_index;
13509 
13510             if (arg2) {
13511                 phost_sevp = &host_sevp;
13512                 ret = target_to_host_sigevent(phost_sevp, arg2);
13513                 if (ret != 0) {
13514                     free_host_timer_slot(timer_index);
13515                     return ret;
13516                 }
13517             }
13518 
13519             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13520             if (ret) {
13521                 free_host_timer_slot(timer_index);
13522             } else {
13523                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13524                     timer_delete(*phtimer);
13525                     free_host_timer_slot(timer_index);
13526                     return -TARGET_EFAULT;
13527                 }
13528             }
13529         }
13530         return ret;
13531     }
13532 #endif
13533 
13534 #ifdef TARGET_NR_timer_settime
13535     case TARGET_NR_timer_settime:
13536     {
13537         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13538          * struct itimerspec * old_value */
13539         target_timer_t timerid = get_timer_id(arg1);
13540 
13541         if (timerid < 0) {
13542             ret = timerid;
13543         } else if (arg3 == 0) {
13544             ret = -TARGET_EINVAL;
13545         } else {
13546             timer_t htimer = g_posix_timers[timerid];
13547             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13548 
13549             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13550                 return -TARGET_EFAULT;
13551             }
13552             ret = get_errno(
13553                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13554             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13555                 return -TARGET_EFAULT;
13556             }
13557         }
13558         return ret;
13559     }
13560 #endif
13561 
13562 #ifdef TARGET_NR_timer_settime64
13563     case TARGET_NR_timer_settime64:
13564     {
13565         target_timer_t timerid = get_timer_id(arg1);
13566 
13567         if (timerid < 0) {
13568             ret = timerid;
13569         } else if (arg3 == 0) {
13570             ret = -TARGET_EINVAL;
13571         } else {
13572             timer_t htimer = g_posix_timers[timerid];
13573             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13574 
13575             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13576                 return -TARGET_EFAULT;
13577             }
13578             ret = get_errno(
13579                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13580             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13581                 return -TARGET_EFAULT;
13582             }
13583         }
13584         return ret;
13585     }
13586 #endif
13587 
13588 #ifdef TARGET_NR_timer_gettime
13589     case TARGET_NR_timer_gettime:
13590     {
13591         /* args: timer_t timerid, struct itimerspec *curr_value */
13592         target_timer_t timerid = get_timer_id(arg1);
13593 
13594         if (timerid < 0) {
13595             ret = timerid;
13596         } else if (!arg2) {
13597             ret = -TARGET_EFAULT;
13598         } else {
13599             timer_t htimer = g_posix_timers[timerid];
13600             struct itimerspec hspec;
13601             ret = get_errno(timer_gettime(htimer, &hspec));
13602 
13603             if (host_to_target_itimerspec(arg2, &hspec)) {
13604                 ret = -TARGET_EFAULT;
13605             }
13606         }
13607         return ret;
13608     }
13609 #endif
13610 
13611 #ifdef TARGET_NR_timer_gettime64
13612     case TARGET_NR_timer_gettime64:
13613     {
13614         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13615         target_timer_t timerid = get_timer_id(arg1);
13616 
13617         if (timerid < 0) {
13618             ret = timerid;
13619         } else if (!arg2) {
13620             ret = -TARGET_EFAULT;
13621         } else {
13622             timer_t htimer = g_posix_timers[timerid];
13623             struct itimerspec hspec;
13624             ret = get_errno(timer_gettime(htimer, &hspec));
13625 
13626             if (host_to_target_itimerspec64(arg2, &hspec)) {
13627                 ret = -TARGET_EFAULT;
13628             }
13629         }
13630         return ret;
13631     }
13632 #endif
13633 
13634 #ifdef TARGET_NR_timer_getoverrun
13635     case TARGET_NR_timer_getoverrun:
13636     {
13637         /* args: timer_t timerid */
13638         target_timer_t timerid = get_timer_id(arg1);
13639 
13640         if (timerid < 0) {
13641             ret = timerid;
13642         } else {
13643             timer_t htimer = g_posix_timers[timerid];
13644             ret = get_errno(timer_getoverrun(htimer));
13645         }
13646         return ret;
13647     }
13648 #endif
13649 
13650 #ifdef TARGET_NR_timer_delete
13651     case TARGET_NR_timer_delete:
13652     {
13653         /* args: timer_t timerid */
13654         target_timer_t timerid = get_timer_id(arg1);
13655 
13656         if (timerid < 0) {
13657             ret = timerid;
13658         } else {
13659             timer_t htimer = g_posix_timers[timerid];
13660             ret = get_errno(timer_delete(htimer));
13661             free_host_timer_slot(timerid);
13662         }
13663         return ret;
13664     }
13665 #endif
13666 
13667 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13668     case TARGET_NR_timerfd_create:
13669         ret = get_errno(timerfd_create(arg1,
13670                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13671         if (ret >= 0) {
13672             fd_trans_register(ret, &target_timerfd_trans);
13673         }
13674         return ret;
13675 #endif
13676 
13677 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13678     case TARGET_NR_timerfd_gettime:
13679         {
13680             struct itimerspec its_curr;
13681 
13682             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13683 
13684             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13685                 return -TARGET_EFAULT;
13686             }
13687         }
13688         return ret;
13689 #endif
13690 
13691 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13692     case TARGET_NR_timerfd_gettime64:
13693         {
13694             struct itimerspec its_curr;
13695 
13696             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13697 
13698             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13699                 return -TARGET_EFAULT;
13700             }
13701         }
13702         return ret;
13703 #endif
13704 
13705 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13706     case TARGET_NR_timerfd_settime:
13707         {
13708             struct itimerspec its_new, its_old, *p_new;
13709 
13710             if (arg3) {
13711                 if (target_to_host_itimerspec(&its_new, arg3)) {
13712                     return -TARGET_EFAULT;
13713                 }
13714                 p_new = &its_new;
13715             } else {
13716                 p_new = NULL;
13717             }
13718 
13719             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13720 
13721             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13722                 return -TARGET_EFAULT;
13723             }
13724         }
13725         return ret;
13726 #endif
13727 
13728 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13729     case TARGET_NR_timerfd_settime64:
13730         {
13731             struct itimerspec its_new, its_old, *p_new;
13732 
13733             if (arg3) {
13734                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13735                     return -TARGET_EFAULT;
13736                 }
13737                 p_new = &its_new;
13738             } else {
13739                 p_new = NULL;
13740             }
13741 
13742             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13743 
13744             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13745                 return -TARGET_EFAULT;
13746             }
13747         }
13748         return ret;
13749 #endif
13750 
13751 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13752     case TARGET_NR_ioprio_get:
13753         return get_errno(ioprio_get(arg1, arg2));
13754 #endif
13755 
13756 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13757     case TARGET_NR_ioprio_set:
13758         return get_errno(ioprio_set(arg1, arg2, arg3));
13759 #endif
13760 
13761 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13762     case TARGET_NR_setns:
13763         return get_errno(setns(arg1, arg2));
13764 #endif
13765 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13766     case TARGET_NR_unshare:
13767         return get_errno(unshare(arg1));
13768 #endif
13769 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13770     case TARGET_NR_kcmp:
13771         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13772 #endif
13773 #ifdef TARGET_NR_swapcontext
13774     case TARGET_NR_swapcontext:
13775         /* PowerPC specific.  */
13776         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13777 #endif
13778 #ifdef TARGET_NR_memfd_create
13779     case TARGET_NR_memfd_create:
13780         p = lock_user_string(arg1);
13781         if (!p) {
13782             return -TARGET_EFAULT;
13783         }
13784         ret = get_errno(memfd_create(p, arg2));
13785         fd_trans_unregister(ret);
13786         unlock_user(p, arg1, 0);
13787         return ret;
13788 #endif
13789 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13790     case TARGET_NR_membarrier:
13791         return get_errno(membarrier(arg1, arg2));
13792 #endif
13793 
13794 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13795     case TARGET_NR_copy_file_range:
13796         {
13797             loff_t inoff, outoff;
13798             loff_t *pinoff = NULL, *poutoff = NULL;
13799 
13800             if (arg2) {
13801                 if (get_user_u64(inoff, arg2)) {
13802                     return -TARGET_EFAULT;
13803                 }
13804                 pinoff = &inoff;
13805             }
13806             if (arg4) {
13807                 if (get_user_u64(outoff, arg4)) {
13808                     return -TARGET_EFAULT;
13809                 }
13810                 poutoff = &outoff;
13811             }
13812             /* Do not sign-extend the count parameter. */
13813             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13814                                                  (abi_ulong)arg5, arg6));
13815             if (!is_error(ret) && ret > 0) {
13816                 if (arg2) {
13817                     if (put_user_u64(inoff, arg2)) {
13818                         return -TARGET_EFAULT;
13819                     }
13820                 }
13821                 if (arg4) {
13822                     if (put_user_u64(outoff, arg4)) {
13823                         return -TARGET_EFAULT;
13824                     }
13825                 }
13826             }
13827         }
13828         return ret;
13829 #endif
13830 
13831 #if defined(TARGET_NR_pivot_root)
13832     case TARGET_NR_pivot_root:
13833         {
13834             void *p2;
13835             p = lock_user_string(arg1); /* new_root */
13836             p2 = lock_user_string(arg2); /* put_old */
13837             if (!p || !p2) {
13838                 ret = -TARGET_EFAULT;
13839             } else {
13840                 ret = get_errno(pivot_root(p, p2));
13841             }
13842             unlock_user(p2, arg2, 0);
13843             unlock_user(p, arg1, 0);
13844         }
13845         return ret;
13846 #endif
13847 
13848 #if defined(TARGET_NR_riscv_hwprobe)
13849     case TARGET_NR_riscv_hwprobe:
13850         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13851 #endif
13852 
13853     default:
13854         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13855         return -TARGET_ENOSYS;
13856     }
13857     return ret;
13858 }
13859 
13860 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13861                     abi_long arg2, abi_long arg3, abi_long arg4,
13862                     abi_long arg5, abi_long arg6, abi_long arg7,
13863                     abi_long arg8)
13864 {
13865     CPUState *cpu = env_cpu(cpu_env);
13866     abi_long ret;
13867 
13868 #ifdef DEBUG_ERESTARTSYS
13869     /* Debug-only code for exercising the syscall-restart code paths
13870      * in the per-architecture cpu main loops: restart every syscall
13871      * the guest makes once before letting it through.
13872      */
13873     {
13874         static bool flag;
13875         flag = !flag;
13876         if (flag) {
13877             return -QEMU_ERESTARTSYS;
13878         }
13879     }
13880 #endif
13881 
13882     record_syscall_start(cpu, num, arg1,
13883                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13884 
13885     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13886         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13887     }
13888 
13889     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13890                       arg5, arg6, arg7, arg8);
13891 
13892     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13893         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13894                           arg3, arg4, arg5, arg6);
13895     }
13896 
13897     record_syscall_return(cpu, num, ret);
13898     return ret;
13899 }
13900