xref: /openbmc/qemu/linux-user/syscall.c (revision cb9d5d1f)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
85 
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
92 
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
130 
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  unsigned int,  fd, unsigned long, hi, unsigned long, lo,
321           loff_t *, res, unsigned int, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458   { 0, 0, 0, 0 }
459 };
460 
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462 
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467           const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470                          const struct timespec times[2], int flags)
471 {
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477 
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482           const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485                          int newfd, const char *new, int flags)
486 {
487     if (flags == 0) {
488         return renameat(oldfd, old, newfd, new);
489     }
490     errno = ENOSYS;
491     return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495 
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY  */
505 
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513     uint64_t rlim_cur;
514     uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517           const struct host_rlimit64 *, new_limit,
518           struct host_rlimit64 *, old_limit)
519 #endif
520 
521 
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527 
528 static inline int next_free_host_timer(void)
529 {
530     int k;
531     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533             return k;
534         }
535     }
536     return -1;
537 }
538 
539 static inline void free_host_timer_slot(int id)
540 {
541     qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544 
545 static inline int host_to_target_errno(int host_errno)
546 {
547     switch (host_errno) {
548 #define E(X)  case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return host_errno;
553     }
554 }
555 
556 static inline int target_to_host_errno(int target_errno)
557 {
558     switch (target_errno) {
559 #define E(X)  case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562     default:
563         return target_errno;
564     }
565 }
566 
567 abi_long get_errno(abi_long ret)
568 {
569     if (ret == -1)
570         return -host_to_target_errno(errno);
571     else
572         return ret;
573 }
574 
575 const char *target_strerror(int err)
576 {
577     if (err == QEMU_ERESTARTSYS) {
578         return "To be restarted";
579     }
580     if (err == QEMU_ESIGRETURN) {
581         return "Successful exit from sigreturn";
582     }
583 
584     return strerror(target_to_host_errno(err));
585 }
586 
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589     int i;
590     uint8_t b;
591     if (usize <= ksize) {
592         return 1;
593     }
594     for (i = ksize; i < usize; i++) {
595         if (get_user_u8(b, addr + i)) {
596             return -TARGET_EFAULT;
597         }
598         if (b != 0) {
599             return 0;
600         }
601     }
602     return 1;
603 }
604 
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608     return safe_syscall(__NR_##name); \
609 }
610 
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614     return safe_syscall(__NR_##name, arg1); \
615 }
616 
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622 
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628 
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630     type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635 
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637     type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639     type5 arg5) \
640 { \
641     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643 
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645     type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647     type5 arg5, type6 arg6) \
648 { \
649     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651 
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655               int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658               struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661               int, options, struct rusage *, rusage)
662 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
663 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
664               char **, argv, char **, envp, int, flags)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
668               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
669 #endif
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
672               struct timespec *, tsp, const sigset_t *, sigmask,
673               size_t, sigsetsize)
674 #endif
675 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
676               int, maxevents, int, timeout, const sigset_t *, sigmask,
677               size_t, sigsetsize)
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
680               const struct timespec *,timeout,int *,uaddr2,int,val3)
681 #endif
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
684               const struct timespec *,timeout,int *,uaddr2,int,val3)
685 #endif
686 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
687 safe_syscall2(int, kill, pid_t, pid, int, sig)
688 safe_syscall2(int, tkill, int, tid, int, sig)
689 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
690 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
692 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
693               unsigned long, pos_l, unsigned long, pos_h)
694 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
695               unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
697               socklen_t, addrlen)
698 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
699               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
700 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
701               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
702 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
703 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
704 safe_syscall2(int, flock, int, fd, int, operation)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
707               const struct timespec *, uts, size_t, sigsetsize)
708 #endif
709 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
710               int, flags)
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep, const struct timespec *, req,
713               struct timespec *, rem)
714 #endif
715 #if defined(TARGET_NR_clock_nanosleep) || \
716     defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
718               const struct timespec *, req, struct timespec *, rem)
719 #endif
720 #ifdef __NR_ipc
721 #ifdef __s390x__
722 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
723               void *, ptr)
724 #else
725 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
726               void *, ptr, long, fifth)
727 #endif
728 #endif
729 #ifdef __NR_msgsnd
730 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
731               int, flags)
732 #endif
733 #ifdef __NR_msgrcv
734 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
735               long, msgtype, int, flags)
736 #endif
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739               unsigned, nsops, const struct timespec *, timeout)
740 #endif
741 #if defined(TARGET_NR_mq_timedsend) || \
742     defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
744               size_t, len, unsigned, prio, const struct timespec *, timeout)
745 #endif
746 #if defined(TARGET_NR_mq_timedreceive) || \
747     defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
749               size_t, len, unsigned *, prio, const struct timespec *, timeout)
750 #endif
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
753               int, outfd, loff_t *, poutoff, size_t, length,
754               unsigned int, flags)
755 #endif
756 
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758  * "third argument might be integer or pointer or not present" behaviour of
759  * the libc function.
760  */
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764  *  use the flock64 struct rather than unsuffixed flock
765  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
766  */
767 #ifdef __NR_fcntl64
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
769 #else
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
771 #endif
772 
773 static inline int host_to_target_sock_type(int host_type)
774 {
775     int target_type;
776 
777     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
778     case SOCK_DGRAM:
779         target_type = TARGET_SOCK_DGRAM;
780         break;
781     case SOCK_STREAM:
782         target_type = TARGET_SOCK_STREAM;
783         break;
784     default:
785         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
786         break;
787     }
788 
789 #if defined(SOCK_CLOEXEC)
790     if (host_type & SOCK_CLOEXEC) {
791         target_type |= TARGET_SOCK_CLOEXEC;
792     }
793 #endif
794 
795 #if defined(SOCK_NONBLOCK)
796     if (host_type & SOCK_NONBLOCK) {
797         target_type |= TARGET_SOCK_NONBLOCK;
798     }
799 #endif
800 
801     return target_type;
802 }
803 
804 static abi_ulong target_brk, initial_target_brk;
805 static abi_ulong brk_page;
806 
807 void target_set_brk(abi_ulong new_brk)
808 {
809     target_brk = TARGET_PAGE_ALIGN(new_brk);
810     initial_target_brk = target_brk;
811     brk_page = HOST_PAGE_ALIGN(target_brk);
812 }
813 
814 /* do_brk() must return target values and target errnos. */
815 abi_long do_brk(abi_ulong brk_val)
816 {
817     abi_long mapped_addr;
818     abi_ulong new_alloc_size;
819     abi_ulong new_brk, new_host_brk_page;
820 
821     /* brk pointers are always untagged */
822 
823     /* return old brk value if brk_val unchanged */
824     if (brk_val == target_brk) {
825         return target_brk;
826     }
827 
828     /* do not allow to shrink below initial brk value */
829     if (brk_val < initial_target_brk) {
830         return target_brk;
831     }
832 
833     new_brk = TARGET_PAGE_ALIGN(brk_val);
834     new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
835 
836     /* brk_val and old target_brk might be on the same page */
837     if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
838         /* empty remaining bytes in (possibly larger) host page */
839         memset(g2h_untagged(new_brk), 0, new_host_brk_page - new_brk);
840         target_brk = brk_val;
841         return target_brk;
842     }
843 
844     /* Release heap if necesary */
845     if (new_brk < target_brk) {
846         /* empty remaining bytes in (possibly larger) host page */
847         memset(g2h_untagged(new_brk), 0, new_host_brk_page - new_brk);
848 
849         /* free unused host pages and set new brk_page */
850         target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
851         brk_page = new_host_brk_page;
852 
853         target_brk = brk_val;
854         return target_brk;
855     }
856 
857     if (new_host_brk_page > brk_page) {
858         new_alloc_size = new_host_brk_page - brk_page;
859         mapped_addr = target_mmap(brk_page, new_alloc_size,
860                                   PROT_READ | PROT_WRITE,
861                                   MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
862                                   -1, 0);
863     } else {
864         new_alloc_size = 0;
865         mapped_addr = brk_page;
866     }
867 
868     if (mapped_addr == brk_page) {
869         /* Heap contents are initialized to zero, as for anonymous
870          * mapped pages.  Technically the new pages are already
871          * initialized to zero since they *are* anonymous mapped
872          * pages, however we have to take care with the contents that
873          * come from the remaining part of the previous page: it may
874          * contains garbage data due to a previous heap usage (grown
875          * then shrunken).  */
876         memset(g2h_untagged(brk_page), 0, HOST_PAGE_ALIGN(brk_page) - brk_page);
877 
878         target_brk = brk_val;
879         brk_page = new_host_brk_page;
880         return target_brk;
881     }
882 
883 #if defined(TARGET_ALPHA)
884     /* We (partially) emulate OSF/1 on Alpha, which requires we
885        return a proper errno, not an unchanged brk value.  */
886     return -TARGET_ENOMEM;
887 #endif
888     /* For everything else, return the previous break. */
889     return target_brk;
890 }
891 
892 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
893     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
894 static inline abi_long copy_from_user_fdset(fd_set *fds,
895                                             abi_ulong target_fds_addr,
896                                             int n)
897 {
898     int i, nw, j, k;
899     abi_ulong b, *target_fds;
900 
901     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
902     if (!(target_fds = lock_user(VERIFY_READ,
903                                  target_fds_addr,
904                                  sizeof(abi_ulong) * nw,
905                                  1)))
906         return -TARGET_EFAULT;
907 
908     FD_ZERO(fds);
909     k = 0;
910     for (i = 0; i < nw; i++) {
911         /* grab the abi_ulong */
912         __get_user(b, &target_fds[i]);
913         for (j = 0; j < TARGET_ABI_BITS; j++) {
914             /* check the bit inside the abi_ulong */
915             if ((b >> j) & 1)
916                 FD_SET(k, fds);
917             k++;
918         }
919     }
920 
921     unlock_user(target_fds, target_fds_addr, 0);
922 
923     return 0;
924 }
925 
926 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
927                                                  abi_ulong target_fds_addr,
928                                                  int n)
929 {
930     if (target_fds_addr) {
931         if (copy_from_user_fdset(fds, target_fds_addr, n))
932             return -TARGET_EFAULT;
933         *fds_ptr = fds;
934     } else {
935         *fds_ptr = NULL;
936     }
937     return 0;
938 }
939 
940 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
941                                           const fd_set *fds,
942                                           int n)
943 {
944     int i, nw, j, k;
945     abi_long v;
946     abi_ulong *target_fds;
947 
948     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
949     if (!(target_fds = lock_user(VERIFY_WRITE,
950                                  target_fds_addr,
951                                  sizeof(abi_ulong) * nw,
952                                  0)))
953         return -TARGET_EFAULT;
954 
955     k = 0;
956     for (i = 0; i < nw; i++) {
957         v = 0;
958         for (j = 0; j < TARGET_ABI_BITS; j++) {
959             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
960             k++;
961         }
962         __put_user(v, &target_fds[i]);
963     }
964 
965     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
966 
967     return 0;
968 }
969 #endif
970 
971 #if defined(__alpha__)
972 #define HOST_HZ 1024
973 #else
974 #define HOST_HZ 100
975 #endif
976 
977 static inline abi_long host_to_target_clock_t(long ticks)
978 {
979 #if HOST_HZ == TARGET_HZ
980     return ticks;
981 #else
982     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
983 #endif
984 }
985 
986 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
987                                              const struct rusage *rusage)
988 {
989     struct target_rusage *target_rusage;
990 
991     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
992         return -TARGET_EFAULT;
993     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
994     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
995     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
996     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
997     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
998     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
999     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1000     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1001     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1002     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1003     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1004     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1005     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1006     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1007     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1008     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1009     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1010     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1011     unlock_user_struct(target_rusage, target_addr, 1);
1012 
1013     return 0;
1014 }
1015 
1016 #ifdef TARGET_NR_setrlimit
1017 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1018 {
1019     abi_ulong target_rlim_swap;
1020     rlim_t result;
1021 
1022     target_rlim_swap = tswapal(target_rlim);
1023     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1024         return RLIM_INFINITY;
1025 
1026     result = target_rlim_swap;
1027     if (target_rlim_swap != (rlim_t)result)
1028         return RLIM_INFINITY;
1029 
1030     return result;
1031 }
1032 #endif
1033 
1034 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1035 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1036 {
1037     abi_ulong target_rlim_swap;
1038     abi_ulong result;
1039 
1040     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1041         target_rlim_swap = TARGET_RLIM_INFINITY;
1042     else
1043         target_rlim_swap = rlim;
1044     result = tswapal(target_rlim_swap);
1045 
1046     return result;
1047 }
1048 #endif
1049 
1050 static inline int target_to_host_resource(int code)
1051 {
1052     switch (code) {
1053     case TARGET_RLIMIT_AS:
1054         return RLIMIT_AS;
1055     case TARGET_RLIMIT_CORE:
1056         return RLIMIT_CORE;
1057     case TARGET_RLIMIT_CPU:
1058         return RLIMIT_CPU;
1059     case TARGET_RLIMIT_DATA:
1060         return RLIMIT_DATA;
1061     case TARGET_RLIMIT_FSIZE:
1062         return RLIMIT_FSIZE;
1063     case TARGET_RLIMIT_LOCKS:
1064         return RLIMIT_LOCKS;
1065     case TARGET_RLIMIT_MEMLOCK:
1066         return RLIMIT_MEMLOCK;
1067     case TARGET_RLIMIT_MSGQUEUE:
1068         return RLIMIT_MSGQUEUE;
1069     case TARGET_RLIMIT_NICE:
1070         return RLIMIT_NICE;
1071     case TARGET_RLIMIT_NOFILE:
1072         return RLIMIT_NOFILE;
1073     case TARGET_RLIMIT_NPROC:
1074         return RLIMIT_NPROC;
1075     case TARGET_RLIMIT_RSS:
1076         return RLIMIT_RSS;
1077     case TARGET_RLIMIT_RTPRIO:
1078         return RLIMIT_RTPRIO;
1079 #ifdef RLIMIT_RTTIME
1080     case TARGET_RLIMIT_RTTIME:
1081         return RLIMIT_RTTIME;
1082 #endif
1083     case TARGET_RLIMIT_SIGPENDING:
1084         return RLIMIT_SIGPENDING;
1085     case TARGET_RLIMIT_STACK:
1086         return RLIMIT_STACK;
1087     default:
1088         return code;
1089     }
1090 }
1091 
1092 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1093                                               abi_ulong target_tv_addr)
1094 {
1095     struct target_timeval *target_tv;
1096 
1097     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1098         return -TARGET_EFAULT;
1099     }
1100 
1101     __get_user(tv->tv_sec, &target_tv->tv_sec);
1102     __get_user(tv->tv_usec, &target_tv->tv_usec);
1103 
1104     unlock_user_struct(target_tv, target_tv_addr, 0);
1105 
1106     return 0;
1107 }
1108 
1109 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1110                                             const struct timeval *tv)
1111 {
1112     struct target_timeval *target_tv;
1113 
1114     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1115         return -TARGET_EFAULT;
1116     }
1117 
1118     __put_user(tv->tv_sec, &target_tv->tv_sec);
1119     __put_user(tv->tv_usec, &target_tv->tv_usec);
1120 
1121     unlock_user_struct(target_tv, target_tv_addr, 1);
1122 
1123     return 0;
1124 }
1125 
1126 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1127 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1128                                                 abi_ulong target_tv_addr)
1129 {
1130     struct target__kernel_sock_timeval *target_tv;
1131 
1132     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1133         return -TARGET_EFAULT;
1134     }
1135 
1136     __get_user(tv->tv_sec, &target_tv->tv_sec);
1137     __get_user(tv->tv_usec, &target_tv->tv_usec);
1138 
1139     unlock_user_struct(target_tv, target_tv_addr, 0);
1140 
1141     return 0;
1142 }
1143 #endif
1144 
1145 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1146                                               const struct timeval *tv)
1147 {
1148     struct target__kernel_sock_timeval *target_tv;
1149 
1150     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1151         return -TARGET_EFAULT;
1152     }
1153 
1154     __put_user(tv->tv_sec, &target_tv->tv_sec);
1155     __put_user(tv->tv_usec, &target_tv->tv_usec);
1156 
1157     unlock_user_struct(target_tv, target_tv_addr, 1);
1158 
1159     return 0;
1160 }
1161 
1162 #if defined(TARGET_NR_futex) || \
1163     defined(TARGET_NR_rt_sigtimedwait) || \
1164     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1165     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1166     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1167     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1168     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1169     defined(TARGET_NR_timer_settime) || \
1170     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1171 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1172                                                abi_ulong target_addr)
1173 {
1174     struct target_timespec *target_ts;
1175 
1176     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1177         return -TARGET_EFAULT;
1178     }
1179     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1180     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1181     unlock_user_struct(target_ts, target_addr, 0);
1182     return 0;
1183 }
1184 #endif
1185 
1186 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1187     defined(TARGET_NR_timer_settime64) || \
1188     defined(TARGET_NR_mq_timedsend_time64) || \
1189     defined(TARGET_NR_mq_timedreceive_time64) || \
1190     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1191     defined(TARGET_NR_clock_nanosleep_time64) || \
1192     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1193     defined(TARGET_NR_utimensat) || \
1194     defined(TARGET_NR_utimensat_time64) || \
1195     defined(TARGET_NR_semtimedop_time64) || \
1196     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1197 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1198                                                  abi_ulong target_addr)
1199 {
1200     struct target__kernel_timespec *target_ts;
1201 
1202     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1203         return -TARGET_EFAULT;
1204     }
1205     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1206     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1207     /* in 32bit mode, this drops the padding */
1208     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1209     unlock_user_struct(target_ts, target_addr, 0);
1210     return 0;
1211 }
1212 #endif
1213 
1214 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1215                                                struct timespec *host_ts)
1216 {
1217     struct target_timespec *target_ts;
1218 
1219     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1220         return -TARGET_EFAULT;
1221     }
1222     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1223     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1224     unlock_user_struct(target_ts, target_addr, 1);
1225     return 0;
1226 }
1227 
1228 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1229                                                  struct timespec *host_ts)
1230 {
1231     struct target__kernel_timespec *target_ts;
1232 
1233     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1234         return -TARGET_EFAULT;
1235     }
1236     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1237     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1238     unlock_user_struct(target_ts, target_addr, 1);
1239     return 0;
1240 }
1241 
1242 #if defined(TARGET_NR_gettimeofday)
1243 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1244                                              struct timezone *tz)
1245 {
1246     struct target_timezone *target_tz;
1247 
1248     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1249         return -TARGET_EFAULT;
1250     }
1251 
1252     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1253     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1254 
1255     unlock_user_struct(target_tz, target_tz_addr, 1);
1256 
1257     return 0;
1258 }
1259 #endif
1260 
1261 #if defined(TARGET_NR_settimeofday)
1262 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1263                                                abi_ulong target_tz_addr)
1264 {
1265     struct target_timezone *target_tz;
1266 
1267     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1268         return -TARGET_EFAULT;
1269     }
1270 
1271     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1272     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1273 
1274     unlock_user_struct(target_tz, target_tz_addr, 0);
1275 
1276     return 0;
1277 }
1278 #endif
1279 
1280 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1281 #include <mqueue.h>
1282 
1283 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1284                                               abi_ulong target_mq_attr_addr)
1285 {
1286     struct target_mq_attr *target_mq_attr;
1287 
1288     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1289                           target_mq_attr_addr, 1))
1290         return -TARGET_EFAULT;
1291 
1292     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1293     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1294     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1295     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1296 
1297     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1298 
1299     return 0;
1300 }
1301 
1302 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1303                                             const struct mq_attr *attr)
1304 {
1305     struct target_mq_attr *target_mq_attr;
1306 
1307     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1308                           target_mq_attr_addr, 0))
1309         return -TARGET_EFAULT;
1310 
1311     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1312     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1313     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1314     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1315 
1316     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1317 
1318     return 0;
1319 }
1320 #endif
1321 
1322 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1323 /* do_select() must return target values and target errnos. */
1324 static abi_long do_select(int n,
1325                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1326                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1327 {
1328     fd_set rfds, wfds, efds;
1329     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1330     struct timeval tv;
1331     struct timespec ts, *ts_ptr;
1332     abi_long ret;
1333 
1334     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1335     if (ret) {
1336         return ret;
1337     }
1338     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1339     if (ret) {
1340         return ret;
1341     }
1342     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1343     if (ret) {
1344         return ret;
1345     }
1346 
1347     if (target_tv_addr) {
1348         if (copy_from_user_timeval(&tv, target_tv_addr))
1349             return -TARGET_EFAULT;
1350         ts.tv_sec = tv.tv_sec;
1351         ts.tv_nsec = tv.tv_usec * 1000;
1352         ts_ptr = &ts;
1353     } else {
1354         ts_ptr = NULL;
1355     }
1356 
1357     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1358                                   ts_ptr, NULL));
1359 
1360     if (!is_error(ret)) {
1361         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1362             return -TARGET_EFAULT;
1363         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1364             return -TARGET_EFAULT;
1365         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1366             return -TARGET_EFAULT;
1367 
1368         if (target_tv_addr) {
1369             tv.tv_sec = ts.tv_sec;
1370             tv.tv_usec = ts.tv_nsec / 1000;
1371             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1372                 return -TARGET_EFAULT;
1373             }
1374         }
1375     }
1376 
1377     return ret;
1378 }
1379 
1380 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1381 static abi_long do_old_select(abi_ulong arg1)
1382 {
1383     struct target_sel_arg_struct *sel;
1384     abi_ulong inp, outp, exp, tvp;
1385     long nsel;
1386 
1387     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1388         return -TARGET_EFAULT;
1389     }
1390 
1391     nsel = tswapal(sel->n);
1392     inp = tswapal(sel->inp);
1393     outp = tswapal(sel->outp);
1394     exp = tswapal(sel->exp);
1395     tvp = tswapal(sel->tvp);
1396 
1397     unlock_user_struct(sel, arg1, 0);
1398 
1399     return do_select(nsel, inp, outp, exp, tvp);
1400 }
1401 #endif
1402 #endif
1403 
1404 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1405 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1406                             abi_long arg4, abi_long arg5, abi_long arg6,
1407                             bool time64)
1408 {
1409     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1410     fd_set rfds, wfds, efds;
1411     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1412     struct timespec ts, *ts_ptr;
1413     abi_long ret;
1414 
1415     /*
1416      * The 6th arg is actually two args smashed together,
1417      * so we cannot use the C library.
1418      */
1419     struct {
1420         sigset_t *set;
1421         size_t size;
1422     } sig, *sig_ptr;
1423 
1424     abi_ulong arg_sigset, arg_sigsize, *arg7;
1425 
1426     n = arg1;
1427     rfd_addr = arg2;
1428     wfd_addr = arg3;
1429     efd_addr = arg4;
1430     ts_addr = arg5;
1431 
1432     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1433     if (ret) {
1434         return ret;
1435     }
1436     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1437     if (ret) {
1438         return ret;
1439     }
1440     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1441     if (ret) {
1442         return ret;
1443     }
1444 
1445     /*
1446      * This takes a timespec, and not a timeval, so we cannot
1447      * use the do_select() helper ...
1448      */
1449     if (ts_addr) {
1450         if (time64) {
1451             if (target_to_host_timespec64(&ts, ts_addr)) {
1452                 return -TARGET_EFAULT;
1453             }
1454         } else {
1455             if (target_to_host_timespec(&ts, ts_addr)) {
1456                 return -TARGET_EFAULT;
1457             }
1458         }
1459             ts_ptr = &ts;
1460     } else {
1461         ts_ptr = NULL;
1462     }
1463 
1464     /* Extract the two packed args for the sigset */
1465     sig_ptr = NULL;
1466     if (arg6) {
1467         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1468         if (!arg7) {
1469             return -TARGET_EFAULT;
1470         }
1471         arg_sigset = tswapal(arg7[0]);
1472         arg_sigsize = tswapal(arg7[1]);
1473         unlock_user(arg7, arg6, 0);
1474 
1475         if (arg_sigset) {
1476             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1477             if (ret != 0) {
1478                 return ret;
1479             }
1480             sig_ptr = &sig;
1481             sig.size = SIGSET_T_SIZE;
1482         }
1483     }
1484 
1485     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1486                                   ts_ptr, sig_ptr));
1487 
1488     if (sig_ptr) {
1489         finish_sigsuspend_mask(ret);
1490     }
1491 
1492     if (!is_error(ret)) {
1493         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1494             return -TARGET_EFAULT;
1495         }
1496         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1497             return -TARGET_EFAULT;
1498         }
1499         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1500             return -TARGET_EFAULT;
1501         }
1502         if (time64) {
1503             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1504                 return -TARGET_EFAULT;
1505             }
1506         } else {
1507             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1508                 return -TARGET_EFAULT;
1509             }
1510         }
1511     }
1512     return ret;
1513 }
1514 #endif
1515 
1516 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1517     defined(TARGET_NR_ppoll_time64)
1518 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1519                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1520 {
1521     struct target_pollfd *target_pfd;
1522     unsigned int nfds = arg2;
1523     struct pollfd *pfd;
1524     unsigned int i;
1525     abi_long ret;
1526 
1527     pfd = NULL;
1528     target_pfd = NULL;
1529     if (nfds) {
1530         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1531             return -TARGET_EINVAL;
1532         }
1533         target_pfd = lock_user(VERIFY_WRITE, arg1,
1534                                sizeof(struct target_pollfd) * nfds, 1);
1535         if (!target_pfd) {
1536             return -TARGET_EFAULT;
1537         }
1538 
1539         pfd = alloca(sizeof(struct pollfd) * nfds);
1540         for (i = 0; i < nfds; i++) {
1541             pfd[i].fd = tswap32(target_pfd[i].fd);
1542             pfd[i].events = tswap16(target_pfd[i].events);
1543         }
1544     }
1545     if (ppoll) {
1546         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1547         sigset_t *set = NULL;
1548 
1549         if (arg3) {
1550             if (time64) {
1551                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1552                     unlock_user(target_pfd, arg1, 0);
1553                     return -TARGET_EFAULT;
1554                 }
1555             } else {
1556                 if (target_to_host_timespec(timeout_ts, arg3)) {
1557                     unlock_user(target_pfd, arg1, 0);
1558                     return -TARGET_EFAULT;
1559                 }
1560             }
1561         } else {
1562             timeout_ts = NULL;
1563         }
1564 
1565         if (arg4) {
1566             ret = process_sigsuspend_mask(&set, arg4, arg5);
1567             if (ret != 0) {
1568                 unlock_user(target_pfd, arg1, 0);
1569                 return ret;
1570             }
1571         }
1572 
1573         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1574                                    set, SIGSET_T_SIZE));
1575 
1576         if (set) {
1577             finish_sigsuspend_mask(ret);
1578         }
1579         if (!is_error(ret) && arg3) {
1580             if (time64) {
1581                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1582                     return -TARGET_EFAULT;
1583                 }
1584             } else {
1585                 if (host_to_target_timespec(arg3, timeout_ts)) {
1586                     return -TARGET_EFAULT;
1587                 }
1588             }
1589         }
1590     } else {
1591           struct timespec ts, *pts;
1592 
1593           if (arg3 >= 0) {
1594               /* Convert ms to secs, ns */
1595               ts.tv_sec = arg3 / 1000;
1596               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1597               pts = &ts;
1598           } else {
1599               /* -ve poll() timeout means "infinite" */
1600               pts = NULL;
1601           }
1602           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1603     }
1604 
1605     if (!is_error(ret)) {
1606         for (i = 0; i < nfds; i++) {
1607             target_pfd[i].revents = tswap16(pfd[i].revents);
1608         }
1609     }
1610     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1611     return ret;
1612 }
1613 #endif
1614 
1615 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1616                         int flags, int is_pipe2)
1617 {
1618     int host_pipe[2];
1619     abi_long ret;
1620     ret = pipe2(host_pipe, flags);
1621 
1622     if (is_error(ret))
1623         return get_errno(ret);
1624 
1625     /* Several targets have special calling conventions for the original
1626        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1627     if (!is_pipe2) {
1628 #if defined(TARGET_ALPHA)
1629         cpu_env->ir[IR_A4] = host_pipe[1];
1630         return host_pipe[0];
1631 #elif defined(TARGET_MIPS)
1632         cpu_env->active_tc.gpr[3] = host_pipe[1];
1633         return host_pipe[0];
1634 #elif defined(TARGET_SH4)
1635         cpu_env->gregs[1] = host_pipe[1];
1636         return host_pipe[0];
1637 #elif defined(TARGET_SPARC)
1638         cpu_env->regwptr[1] = host_pipe[1];
1639         return host_pipe[0];
1640 #endif
1641     }
1642 
1643     if (put_user_s32(host_pipe[0], pipedes)
1644         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1645         return -TARGET_EFAULT;
1646     return get_errno(ret);
1647 }
1648 
1649 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1650                                               abi_ulong target_addr,
1651                                               socklen_t len)
1652 {
1653     struct target_ip_mreqn *target_smreqn;
1654 
1655     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1656     if (!target_smreqn)
1657         return -TARGET_EFAULT;
1658     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1659     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1660     if (len == sizeof(struct target_ip_mreqn))
1661         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1662     unlock_user(target_smreqn, target_addr, 0);
1663 
1664     return 0;
1665 }
1666 
1667 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1668                                                abi_ulong target_addr,
1669                                                socklen_t len)
1670 {
1671     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1672     sa_family_t sa_family;
1673     struct target_sockaddr *target_saddr;
1674 
1675     if (fd_trans_target_to_host_addr(fd)) {
1676         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1677     }
1678 
1679     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1680     if (!target_saddr)
1681         return -TARGET_EFAULT;
1682 
1683     sa_family = tswap16(target_saddr->sa_family);
1684 
1685     /* Oops. The caller might send a incomplete sun_path; sun_path
1686      * must be terminated by \0 (see the manual page), but
1687      * unfortunately it is quite common to specify sockaddr_un
1688      * length as "strlen(x->sun_path)" while it should be
1689      * "strlen(...) + 1". We'll fix that here if needed.
1690      * Linux kernel has a similar feature.
1691      */
1692 
1693     if (sa_family == AF_UNIX) {
1694         if (len < unix_maxlen && len > 0) {
1695             char *cp = (char*)target_saddr;
1696 
1697             if ( cp[len-1] && !cp[len] )
1698                 len++;
1699         }
1700         if (len > unix_maxlen)
1701             len = unix_maxlen;
1702     }
1703 
1704     memcpy(addr, target_saddr, len);
1705     addr->sa_family = sa_family;
1706     if (sa_family == AF_NETLINK) {
1707         struct sockaddr_nl *nladdr;
1708 
1709         nladdr = (struct sockaddr_nl *)addr;
1710         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1711         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1712     } else if (sa_family == AF_PACKET) {
1713 	struct target_sockaddr_ll *lladdr;
1714 
1715 	lladdr = (struct target_sockaddr_ll *)addr;
1716 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1717 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1718     } else if (sa_family == AF_INET6) {
1719         struct sockaddr_in6 *in6addr;
1720 
1721         in6addr = (struct sockaddr_in6 *)addr;
1722         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1723     }
1724     unlock_user(target_saddr, target_addr, 0);
1725 
1726     return 0;
1727 }
1728 
1729 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1730                                                struct sockaddr *addr,
1731                                                socklen_t len)
1732 {
1733     struct target_sockaddr *target_saddr;
1734 
1735     if (len == 0) {
1736         return 0;
1737     }
1738     assert(addr);
1739 
1740     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1741     if (!target_saddr)
1742         return -TARGET_EFAULT;
1743     memcpy(target_saddr, addr, len);
1744     if (len >= offsetof(struct target_sockaddr, sa_family) +
1745         sizeof(target_saddr->sa_family)) {
1746         target_saddr->sa_family = tswap16(addr->sa_family);
1747     }
1748     if (addr->sa_family == AF_NETLINK &&
1749         len >= sizeof(struct target_sockaddr_nl)) {
1750         struct target_sockaddr_nl *target_nl =
1751                (struct target_sockaddr_nl *)target_saddr;
1752         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1753         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1754     } else if (addr->sa_family == AF_PACKET) {
1755         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1756         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1757         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1758     } else if (addr->sa_family == AF_INET6 &&
1759                len >= sizeof(struct target_sockaddr_in6)) {
1760         struct target_sockaddr_in6 *target_in6 =
1761                (struct target_sockaddr_in6 *)target_saddr;
1762         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1763     }
1764     unlock_user(target_saddr, target_addr, len);
1765 
1766     return 0;
1767 }
1768 
1769 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1770                                            struct target_msghdr *target_msgh)
1771 {
1772     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1773     abi_long msg_controllen;
1774     abi_ulong target_cmsg_addr;
1775     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1776     socklen_t space = 0;
1777 
1778     msg_controllen = tswapal(target_msgh->msg_controllen);
1779     if (msg_controllen < sizeof (struct target_cmsghdr))
1780         goto the_end;
1781     target_cmsg_addr = tswapal(target_msgh->msg_control);
1782     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1783     target_cmsg_start = target_cmsg;
1784     if (!target_cmsg)
1785         return -TARGET_EFAULT;
1786 
1787     while (cmsg && target_cmsg) {
1788         void *data = CMSG_DATA(cmsg);
1789         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1790 
1791         int len = tswapal(target_cmsg->cmsg_len)
1792             - sizeof(struct target_cmsghdr);
1793 
1794         space += CMSG_SPACE(len);
1795         if (space > msgh->msg_controllen) {
1796             space -= CMSG_SPACE(len);
1797             /* This is a QEMU bug, since we allocated the payload
1798              * area ourselves (unlike overflow in host-to-target
1799              * conversion, which is just the guest giving us a buffer
1800              * that's too small). It can't happen for the payload types
1801              * we currently support; if it becomes an issue in future
1802              * we would need to improve our allocation strategy to
1803              * something more intelligent than "twice the size of the
1804              * target buffer we're reading from".
1805              */
1806             qemu_log_mask(LOG_UNIMP,
1807                           ("Unsupported ancillary data %d/%d: "
1808                            "unhandled msg size\n"),
1809                           tswap32(target_cmsg->cmsg_level),
1810                           tswap32(target_cmsg->cmsg_type));
1811             break;
1812         }
1813 
1814         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1815             cmsg->cmsg_level = SOL_SOCKET;
1816         } else {
1817             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1818         }
1819         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1820         cmsg->cmsg_len = CMSG_LEN(len);
1821 
1822         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1823             int *fd = (int *)data;
1824             int *target_fd = (int *)target_data;
1825             int i, numfds = len / sizeof(int);
1826 
1827             for (i = 0; i < numfds; i++) {
1828                 __get_user(fd[i], target_fd + i);
1829             }
1830         } else if (cmsg->cmsg_level == SOL_SOCKET
1831                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1832             struct ucred *cred = (struct ucred *)data;
1833             struct target_ucred *target_cred =
1834                 (struct target_ucred *)target_data;
1835 
1836             __get_user(cred->pid, &target_cred->pid);
1837             __get_user(cred->uid, &target_cred->uid);
1838             __get_user(cred->gid, &target_cred->gid);
1839         } else if (cmsg->cmsg_level == SOL_ALG) {
1840             uint32_t *dst = (uint32_t *)data;
1841 
1842             memcpy(dst, target_data, len);
1843             /* fix endianess of first 32-bit word */
1844             if (len >= sizeof(uint32_t)) {
1845                 *dst = tswap32(*dst);
1846             }
1847         } else {
1848             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1849                           cmsg->cmsg_level, cmsg->cmsg_type);
1850             memcpy(data, target_data, len);
1851         }
1852 
1853         cmsg = CMSG_NXTHDR(msgh, cmsg);
1854         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1855                                          target_cmsg_start);
1856     }
1857     unlock_user(target_cmsg, target_cmsg_addr, 0);
1858  the_end:
1859     msgh->msg_controllen = space;
1860     return 0;
1861 }
1862 
1863 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1864                                            struct msghdr *msgh)
1865 {
1866     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1867     abi_long msg_controllen;
1868     abi_ulong target_cmsg_addr;
1869     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1870     socklen_t space = 0;
1871 
1872     msg_controllen = tswapal(target_msgh->msg_controllen);
1873     if (msg_controllen < sizeof (struct target_cmsghdr))
1874         goto the_end;
1875     target_cmsg_addr = tswapal(target_msgh->msg_control);
1876     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1877     target_cmsg_start = target_cmsg;
1878     if (!target_cmsg)
1879         return -TARGET_EFAULT;
1880 
1881     while (cmsg && target_cmsg) {
1882         void *data = CMSG_DATA(cmsg);
1883         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1884 
1885         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1886         int tgt_len, tgt_space;
1887 
1888         /* We never copy a half-header but may copy half-data;
1889          * this is Linux's behaviour in put_cmsg(). Note that
1890          * truncation here is a guest problem (which we report
1891          * to the guest via the CTRUNC bit), unlike truncation
1892          * in target_to_host_cmsg, which is a QEMU bug.
1893          */
1894         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1895             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1896             break;
1897         }
1898 
1899         if (cmsg->cmsg_level == SOL_SOCKET) {
1900             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1901         } else {
1902             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1903         }
1904         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1905 
1906         /* Payload types which need a different size of payload on
1907          * the target must adjust tgt_len here.
1908          */
1909         tgt_len = len;
1910         switch (cmsg->cmsg_level) {
1911         case SOL_SOCKET:
1912             switch (cmsg->cmsg_type) {
1913             case SO_TIMESTAMP:
1914                 tgt_len = sizeof(struct target_timeval);
1915                 break;
1916             default:
1917                 break;
1918             }
1919             break;
1920         default:
1921             break;
1922         }
1923 
1924         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1925             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1926             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1927         }
1928 
1929         /* We must now copy-and-convert len bytes of payload
1930          * into tgt_len bytes of destination space. Bear in mind
1931          * that in both source and destination we may be dealing
1932          * with a truncated value!
1933          */
1934         switch (cmsg->cmsg_level) {
1935         case SOL_SOCKET:
1936             switch (cmsg->cmsg_type) {
1937             case SCM_RIGHTS:
1938             {
1939                 int *fd = (int *)data;
1940                 int *target_fd = (int *)target_data;
1941                 int i, numfds = tgt_len / sizeof(int);
1942 
1943                 for (i = 0; i < numfds; i++) {
1944                     __put_user(fd[i], target_fd + i);
1945                 }
1946                 break;
1947             }
1948             case SO_TIMESTAMP:
1949             {
1950                 struct timeval *tv = (struct timeval *)data;
1951                 struct target_timeval *target_tv =
1952                     (struct target_timeval *)target_data;
1953 
1954                 if (len != sizeof(struct timeval) ||
1955                     tgt_len != sizeof(struct target_timeval)) {
1956                     goto unimplemented;
1957                 }
1958 
1959                 /* copy struct timeval to target */
1960                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1961                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1962                 break;
1963             }
1964             case SCM_CREDENTIALS:
1965             {
1966                 struct ucred *cred = (struct ucred *)data;
1967                 struct target_ucred *target_cred =
1968                     (struct target_ucred *)target_data;
1969 
1970                 __put_user(cred->pid, &target_cred->pid);
1971                 __put_user(cred->uid, &target_cred->uid);
1972                 __put_user(cred->gid, &target_cred->gid);
1973                 break;
1974             }
1975             default:
1976                 goto unimplemented;
1977             }
1978             break;
1979 
1980         case SOL_IP:
1981             switch (cmsg->cmsg_type) {
1982             case IP_TTL:
1983             {
1984                 uint32_t *v = (uint32_t *)data;
1985                 uint32_t *t_int = (uint32_t *)target_data;
1986 
1987                 if (len != sizeof(uint32_t) ||
1988                     tgt_len != sizeof(uint32_t)) {
1989                     goto unimplemented;
1990                 }
1991                 __put_user(*v, t_int);
1992                 break;
1993             }
1994             case IP_RECVERR:
1995             {
1996                 struct errhdr_t {
1997                    struct sock_extended_err ee;
1998                    struct sockaddr_in offender;
1999                 };
2000                 struct errhdr_t *errh = (struct errhdr_t *)data;
2001                 struct errhdr_t *target_errh =
2002                     (struct errhdr_t *)target_data;
2003 
2004                 if (len != sizeof(struct errhdr_t) ||
2005                     tgt_len != sizeof(struct errhdr_t)) {
2006                     goto unimplemented;
2007                 }
2008                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2009                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2010                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2011                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2012                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2013                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2014                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2015                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2016                     (void *) &errh->offender, sizeof(errh->offender));
2017                 break;
2018             }
2019             default:
2020                 goto unimplemented;
2021             }
2022             break;
2023 
2024         case SOL_IPV6:
2025             switch (cmsg->cmsg_type) {
2026             case IPV6_HOPLIMIT:
2027             {
2028                 uint32_t *v = (uint32_t *)data;
2029                 uint32_t *t_int = (uint32_t *)target_data;
2030 
2031                 if (len != sizeof(uint32_t) ||
2032                     tgt_len != sizeof(uint32_t)) {
2033                     goto unimplemented;
2034                 }
2035                 __put_user(*v, t_int);
2036                 break;
2037             }
2038             case IPV6_RECVERR:
2039             {
2040                 struct errhdr6_t {
2041                    struct sock_extended_err ee;
2042                    struct sockaddr_in6 offender;
2043                 };
2044                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2045                 struct errhdr6_t *target_errh =
2046                     (struct errhdr6_t *)target_data;
2047 
2048                 if (len != sizeof(struct errhdr6_t) ||
2049                     tgt_len != sizeof(struct errhdr6_t)) {
2050                     goto unimplemented;
2051                 }
2052                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2053                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2054                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2055                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2056                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2057                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2058                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2059                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2060                     (void *) &errh->offender, sizeof(errh->offender));
2061                 break;
2062             }
2063             default:
2064                 goto unimplemented;
2065             }
2066             break;
2067 
2068         default:
2069         unimplemented:
2070             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2071                           cmsg->cmsg_level, cmsg->cmsg_type);
2072             memcpy(target_data, data, MIN(len, tgt_len));
2073             if (tgt_len > len) {
2074                 memset(target_data + len, 0, tgt_len - len);
2075             }
2076         }
2077 
2078         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2079         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2080         if (msg_controllen < tgt_space) {
2081             tgt_space = msg_controllen;
2082         }
2083         msg_controllen -= tgt_space;
2084         space += tgt_space;
2085         cmsg = CMSG_NXTHDR(msgh, cmsg);
2086         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2087                                          target_cmsg_start);
2088     }
2089     unlock_user(target_cmsg, target_cmsg_addr, space);
2090  the_end:
2091     target_msgh->msg_controllen = tswapal(space);
2092     return 0;
2093 }
2094 
2095 /* do_setsockopt() Must return target values and target errnos. */
2096 static abi_long do_setsockopt(int sockfd, int level, int optname,
2097                               abi_ulong optval_addr, socklen_t optlen)
2098 {
2099     abi_long ret;
2100     int val;
2101     struct ip_mreqn *ip_mreq;
2102     struct ip_mreq_source *ip_mreq_source;
2103 
2104     switch(level) {
2105     case SOL_TCP:
2106     case SOL_UDP:
2107         /* TCP and UDP options all take an 'int' value.  */
2108         if (optlen < sizeof(uint32_t))
2109             return -TARGET_EINVAL;
2110 
2111         if (get_user_u32(val, optval_addr))
2112             return -TARGET_EFAULT;
2113         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2114         break;
2115     case SOL_IP:
2116         switch(optname) {
2117         case IP_TOS:
2118         case IP_TTL:
2119         case IP_HDRINCL:
2120         case IP_ROUTER_ALERT:
2121         case IP_RECVOPTS:
2122         case IP_RETOPTS:
2123         case IP_PKTINFO:
2124         case IP_MTU_DISCOVER:
2125         case IP_RECVERR:
2126         case IP_RECVTTL:
2127         case IP_RECVTOS:
2128 #ifdef IP_FREEBIND
2129         case IP_FREEBIND:
2130 #endif
2131         case IP_MULTICAST_TTL:
2132         case IP_MULTICAST_LOOP:
2133             val = 0;
2134             if (optlen >= sizeof(uint32_t)) {
2135                 if (get_user_u32(val, optval_addr))
2136                     return -TARGET_EFAULT;
2137             } else if (optlen >= 1) {
2138                 if (get_user_u8(val, optval_addr))
2139                     return -TARGET_EFAULT;
2140             }
2141             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2142             break;
2143         case IP_ADD_MEMBERSHIP:
2144         case IP_DROP_MEMBERSHIP:
2145             if (optlen < sizeof (struct target_ip_mreq) ||
2146                 optlen > sizeof (struct target_ip_mreqn))
2147                 return -TARGET_EINVAL;
2148 
2149             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2150             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2151             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2152             break;
2153 
2154         case IP_BLOCK_SOURCE:
2155         case IP_UNBLOCK_SOURCE:
2156         case IP_ADD_SOURCE_MEMBERSHIP:
2157         case IP_DROP_SOURCE_MEMBERSHIP:
2158             if (optlen != sizeof (struct target_ip_mreq_source))
2159                 return -TARGET_EINVAL;
2160 
2161             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2162             if (!ip_mreq_source) {
2163                 return -TARGET_EFAULT;
2164             }
2165             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2166             unlock_user (ip_mreq_source, optval_addr, 0);
2167             break;
2168 
2169         default:
2170             goto unimplemented;
2171         }
2172         break;
2173     case SOL_IPV6:
2174         switch (optname) {
2175         case IPV6_MTU_DISCOVER:
2176         case IPV6_MTU:
2177         case IPV6_V6ONLY:
2178         case IPV6_RECVPKTINFO:
2179         case IPV6_UNICAST_HOPS:
2180         case IPV6_MULTICAST_HOPS:
2181         case IPV6_MULTICAST_LOOP:
2182         case IPV6_RECVERR:
2183         case IPV6_RECVHOPLIMIT:
2184         case IPV6_2292HOPLIMIT:
2185         case IPV6_CHECKSUM:
2186         case IPV6_ADDRFORM:
2187         case IPV6_2292PKTINFO:
2188         case IPV6_RECVTCLASS:
2189         case IPV6_RECVRTHDR:
2190         case IPV6_2292RTHDR:
2191         case IPV6_RECVHOPOPTS:
2192         case IPV6_2292HOPOPTS:
2193         case IPV6_RECVDSTOPTS:
2194         case IPV6_2292DSTOPTS:
2195         case IPV6_TCLASS:
2196         case IPV6_ADDR_PREFERENCES:
2197 #ifdef IPV6_RECVPATHMTU
2198         case IPV6_RECVPATHMTU:
2199 #endif
2200 #ifdef IPV6_TRANSPARENT
2201         case IPV6_TRANSPARENT:
2202 #endif
2203 #ifdef IPV6_FREEBIND
2204         case IPV6_FREEBIND:
2205 #endif
2206 #ifdef IPV6_RECVORIGDSTADDR
2207         case IPV6_RECVORIGDSTADDR:
2208 #endif
2209             val = 0;
2210             if (optlen < sizeof(uint32_t)) {
2211                 return -TARGET_EINVAL;
2212             }
2213             if (get_user_u32(val, optval_addr)) {
2214                 return -TARGET_EFAULT;
2215             }
2216             ret = get_errno(setsockopt(sockfd, level, optname,
2217                                        &val, sizeof(val)));
2218             break;
2219         case IPV6_PKTINFO:
2220         {
2221             struct in6_pktinfo pki;
2222 
2223             if (optlen < sizeof(pki)) {
2224                 return -TARGET_EINVAL;
2225             }
2226 
2227             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2228                 return -TARGET_EFAULT;
2229             }
2230 
2231             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2232 
2233             ret = get_errno(setsockopt(sockfd, level, optname,
2234                                        &pki, sizeof(pki)));
2235             break;
2236         }
2237         case IPV6_ADD_MEMBERSHIP:
2238         case IPV6_DROP_MEMBERSHIP:
2239         {
2240             struct ipv6_mreq ipv6mreq;
2241 
2242             if (optlen < sizeof(ipv6mreq)) {
2243                 return -TARGET_EINVAL;
2244             }
2245 
2246             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2247                 return -TARGET_EFAULT;
2248             }
2249 
2250             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2251 
2252             ret = get_errno(setsockopt(sockfd, level, optname,
2253                                        &ipv6mreq, sizeof(ipv6mreq)));
2254             break;
2255         }
2256         default:
2257             goto unimplemented;
2258         }
2259         break;
2260     case SOL_ICMPV6:
2261         switch (optname) {
2262         case ICMPV6_FILTER:
2263         {
2264             struct icmp6_filter icmp6f;
2265 
2266             if (optlen > sizeof(icmp6f)) {
2267                 optlen = sizeof(icmp6f);
2268             }
2269 
2270             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2271                 return -TARGET_EFAULT;
2272             }
2273 
2274             for (val = 0; val < 8; val++) {
2275                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2276             }
2277 
2278             ret = get_errno(setsockopt(sockfd, level, optname,
2279                                        &icmp6f, optlen));
2280             break;
2281         }
2282         default:
2283             goto unimplemented;
2284         }
2285         break;
2286     case SOL_RAW:
2287         switch (optname) {
2288         case ICMP_FILTER:
2289         case IPV6_CHECKSUM:
2290             /* those take an u32 value */
2291             if (optlen < sizeof(uint32_t)) {
2292                 return -TARGET_EINVAL;
2293             }
2294 
2295             if (get_user_u32(val, optval_addr)) {
2296                 return -TARGET_EFAULT;
2297             }
2298             ret = get_errno(setsockopt(sockfd, level, optname,
2299                                        &val, sizeof(val)));
2300             break;
2301 
2302         default:
2303             goto unimplemented;
2304         }
2305         break;
2306 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2307     case SOL_ALG:
2308         switch (optname) {
2309         case ALG_SET_KEY:
2310         {
2311             char *alg_key = g_malloc(optlen);
2312 
2313             if (!alg_key) {
2314                 return -TARGET_ENOMEM;
2315             }
2316             if (copy_from_user(alg_key, optval_addr, optlen)) {
2317                 g_free(alg_key);
2318                 return -TARGET_EFAULT;
2319             }
2320             ret = get_errno(setsockopt(sockfd, level, optname,
2321                                        alg_key, optlen));
2322             g_free(alg_key);
2323             break;
2324         }
2325         case ALG_SET_AEAD_AUTHSIZE:
2326         {
2327             ret = get_errno(setsockopt(sockfd, level, optname,
2328                                        NULL, optlen));
2329             break;
2330         }
2331         default:
2332             goto unimplemented;
2333         }
2334         break;
2335 #endif
2336     case TARGET_SOL_SOCKET:
2337         switch (optname) {
2338         case TARGET_SO_RCVTIMEO:
2339         {
2340                 struct timeval tv;
2341 
2342                 optname = SO_RCVTIMEO;
2343 
2344 set_timeout:
2345                 if (optlen != sizeof(struct target_timeval)) {
2346                     return -TARGET_EINVAL;
2347                 }
2348 
2349                 if (copy_from_user_timeval(&tv, optval_addr)) {
2350                     return -TARGET_EFAULT;
2351                 }
2352 
2353                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2354                                 &tv, sizeof(tv)));
2355                 return ret;
2356         }
2357         case TARGET_SO_SNDTIMEO:
2358                 optname = SO_SNDTIMEO;
2359                 goto set_timeout;
2360         case TARGET_SO_ATTACH_FILTER:
2361         {
2362                 struct target_sock_fprog *tfprog;
2363                 struct target_sock_filter *tfilter;
2364                 struct sock_fprog fprog;
2365                 struct sock_filter *filter;
2366                 int i;
2367 
2368                 if (optlen != sizeof(*tfprog)) {
2369                     return -TARGET_EINVAL;
2370                 }
2371                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2372                     return -TARGET_EFAULT;
2373                 }
2374                 if (!lock_user_struct(VERIFY_READ, tfilter,
2375                                       tswapal(tfprog->filter), 0)) {
2376                     unlock_user_struct(tfprog, optval_addr, 1);
2377                     return -TARGET_EFAULT;
2378                 }
2379 
2380                 fprog.len = tswap16(tfprog->len);
2381                 filter = g_try_new(struct sock_filter, fprog.len);
2382                 if (filter == NULL) {
2383                     unlock_user_struct(tfilter, tfprog->filter, 1);
2384                     unlock_user_struct(tfprog, optval_addr, 1);
2385                     return -TARGET_ENOMEM;
2386                 }
2387                 for (i = 0; i < fprog.len; i++) {
2388                     filter[i].code = tswap16(tfilter[i].code);
2389                     filter[i].jt = tfilter[i].jt;
2390                     filter[i].jf = tfilter[i].jf;
2391                     filter[i].k = tswap32(tfilter[i].k);
2392                 }
2393                 fprog.filter = filter;
2394 
2395                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2396                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2397                 g_free(filter);
2398 
2399                 unlock_user_struct(tfilter, tfprog->filter, 1);
2400                 unlock_user_struct(tfprog, optval_addr, 1);
2401                 return ret;
2402         }
2403 	case TARGET_SO_BINDTODEVICE:
2404 	{
2405 		char *dev_ifname, *addr_ifname;
2406 
2407 		if (optlen > IFNAMSIZ - 1) {
2408 		    optlen = IFNAMSIZ - 1;
2409 		}
2410 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2411 		if (!dev_ifname) {
2412 		    return -TARGET_EFAULT;
2413 		}
2414 		optname = SO_BINDTODEVICE;
2415 		addr_ifname = alloca(IFNAMSIZ);
2416 		memcpy(addr_ifname, dev_ifname, optlen);
2417 		addr_ifname[optlen] = 0;
2418 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2419                                            addr_ifname, optlen));
2420 		unlock_user (dev_ifname, optval_addr, 0);
2421 		return ret;
2422 	}
2423         case TARGET_SO_LINGER:
2424         {
2425                 struct linger lg;
2426                 struct target_linger *tlg;
2427 
2428                 if (optlen != sizeof(struct target_linger)) {
2429                     return -TARGET_EINVAL;
2430                 }
2431                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2432                     return -TARGET_EFAULT;
2433                 }
2434                 __get_user(lg.l_onoff, &tlg->l_onoff);
2435                 __get_user(lg.l_linger, &tlg->l_linger);
2436                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2437                                 &lg, sizeof(lg)));
2438                 unlock_user_struct(tlg, optval_addr, 0);
2439                 return ret;
2440         }
2441             /* Options with 'int' argument.  */
2442         case TARGET_SO_DEBUG:
2443 		optname = SO_DEBUG;
2444 		break;
2445         case TARGET_SO_REUSEADDR:
2446 		optname = SO_REUSEADDR;
2447 		break;
2448 #ifdef SO_REUSEPORT
2449         case TARGET_SO_REUSEPORT:
2450                 optname = SO_REUSEPORT;
2451                 break;
2452 #endif
2453         case TARGET_SO_TYPE:
2454 		optname = SO_TYPE;
2455 		break;
2456         case TARGET_SO_ERROR:
2457 		optname = SO_ERROR;
2458 		break;
2459         case TARGET_SO_DONTROUTE:
2460 		optname = SO_DONTROUTE;
2461 		break;
2462         case TARGET_SO_BROADCAST:
2463 		optname = SO_BROADCAST;
2464 		break;
2465         case TARGET_SO_SNDBUF:
2466 		optname = SO_SNDBUF;
2467 		break;
2468         case TARGET_SO_SNDBUFFORCE:
2469                 optname = SO_SNDBUFFORCE;
2470                 break;
2471         case TARGET_SO_RCVBUF:
2472 		optname = SO_RCVBUF;
2473 		break;
2474         case TARGET_SO_RCVBUFFORCE:
2475                 optname = SO_RCVBUFFORCE;
2476                 break;
2477         case TARGET_SO_KEEPALIVE:
2478 		optname = SO_KEEPALIVE;
2479 		break;
2480         case TARGET_SO_OOBINLINE:
2481 		optname = SO_OOBINLINE;
2482 		break;
2483         case TARGET_SO_NO_CHECK:
2484 		optname = SO_NO_CHECK;
2485 		break;
2486         case TARGET_SO_PRIORITY:
2487 		optname = SO_PRIORITY;
2488 		break;
2489 #ifdef SO_BSDCOMPAT
2490         case TARGET_SO_BSDCOMPAT:
2491 		optname = SO_BSDCOMPAT;
2492 		break;
2493 #endif
2494         case TARGET_SO_PASSCRED:
2495 		optname = SO_PASSCRED;
2496 		break;
2497         case TARGET_SO_PASSSEC:
2498                 optname = SO_PASSSEC;
2499                 break;
2500         case TARGET_SO_TIMESTAMP:
2501 		optname = SO_TIMESTAMP;
2502 		break;
2503         case TARGET_SO_RCVLOWAT:
2504 		optname = SO_RCVLOWAT;
2505 		break;
2506         default:
2507             goto unimplemented;
2508         }
2509 	if (optlen < sizeof(uint32_t))
2510             return -TARGET_EINVAL;
2511 
2512 	if (get_user_u32(val, optval_addr))
2513             return -TARGET_EFAULT;
2514 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2515         break;
2516 #ifdef SOL_NETLINK
2517     case SOL_NETLINK:
2518         switch (optname) {
2519         case NETLINK_PKTINFO:
2520         case NETLINK_ADD_MEMBERSHIP:
2521         case NETLINK_DROP_MEMBERSHIP:
2522         case NETLINK_BROADCAST_ERROR:
2523         case NETLINK_NO_ENOBUFS:
2524 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2525         case NETLINK_LISTEN_ALL_NSID:
2526         case NETLINK_CAP_ACK:
2527 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2528 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2529         case NETLINK_EXT_ACK:
2530 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2531 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2532         case NETLINK_GET_STRICT_CHK:
2533 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2534             break;
2535         default:
2536             goto unimplemented;
2537         }
2538         val = 0;
2539         if (optlen < sizeof(uint32_t)) {
2540             return -TARGET_EINVAL;
2541         }
2542         if (get_user_u32(val, optval_addr)) {
2543             return -TARGET_EFAULT;
2544         }
2545         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2546                                    sizeof(val)));
2547         break;
2548 #endif /* SOL_NETLINK */
2549     default:
2550     unimplemented:
2551         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2552                       level, optname);
2553         ret = -TARGET_ENOPROTOOPT;
2554     }
2555     return ret;
2556 }
2557 
2558 /* do_getsockopt() Must return target values and target errnos. */
2559 static abi_long do_getsockopt(int sockfd, int level, int optname,
2560                               abi_ulong optval_addr, abi_ulong optlen)
2561 {
2562     abi_long ret;
2563     int len, val;
2564     socklen_t lv;
2565 
2566     switch(level) {
2567     case TARGET_SOL_SOCKET:
2568         level = SOL_SOCKET;
2569         switch (optname) {
2570         /* These don't just return a single integer */
2571         case TARGET_SO_PEERNAME:
2572             goto unimplemented;
2573         case TARGET_SO_RCVTIMEO: {
2574             struct timeval tv;
2575             socklen_t tvlen;
2576 
2577             optname = SO_RCVTIMEO;
2578 
2579 get_timeout:
2580             if (get_user_u32(len, optlen)) {
2581                 return -TARGET_EFAULT;
2582             }
2583             if (len < 0) {
2584                 return -TARGET_EINVAL;
2585             }
2586 
2587             tvlen = sizeof(tv);
2588             ret = get_errno(getsockopt(sockfd, level, optname,
2589                                        &tv, &tvlen));
2590             if (ret < 0) {
2591                 return ret;
2592             }
2593             if (len > sizeof(struct target_timeval)) {
2594                 len = sizeof(struct target_timeval);
2595             }
2596             if (copy_to_user_timeval(optval_addr, &tv)) {
2597                 return -TARGET_EFAULT;
2598             }
2599             if (put_user_u32(len, optlen)) {
2600                 return -TARGET_EFAULT;
2601             }
2602             break;
2603         }
2604         case TARGET_SO_SNDTIMEO:
2605             optname = SO_SNDTIMEO;
2606             goto get_timeout;
2607         case TARGET_SO_PEERCRED: {
2608             struct ucred cr;
2609             socklen_t crlen;
2610             struct target_ucred *tcr;
2611 
2612             if (get_user_u32(len, optlen)) {
2613                 return -TARGET_EFAULT;
2614             }
2615             if (len < 0) {
2616                 return -TARGET_EINVAL;
2617             }
2618 
2619             crlen = sizeof(cr);
2620             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2621                                        &cr, &crlen));
2622             if (ret < 0) {
2623                 return ret;
2624             }
2625             if (len > crlen) {
2626                 len = crlen;
2627             }
2628             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2629                 return -TARGET_EFAULT;
2630             }
2631             __put_user(cr.pid, &tcr->pid);
2632             __put_user(cr.uid, &tcr->uid);
2633             __put_user(cr.gid, &tcr->gid);
2634             unlock_user_struct(tcr, optval_addr, 1);
2635             if (put_user_u32(len, optlen)) {
2636                 return -TARGET_EFAULT;
2637             }
2638             break;
2639         }
2640         case TARGET_SO_PEERSEC: {
2641             char *name;
2642 
2643             if (get_user_u32(len, optlen)) {
2644                 return -TARGET_EFAULT;
2645             }
2646             if (len < 0) {
2647                 return -TARGET_EINVAL;
2648             }
2649             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2650             if (!name) {
2651                 return -TARGET_EFAULT;
2652             }
2653             lv = len;
2654             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2655                                        name, &lv));
2656             if (put_user_u32(lv, optlen)) {
2657                 ret = -TARGET_EFAULT;
2658             }
2659             unlock_user(name, optval_addr, lv);
2660             break;
2661         }
2662         case TARGET_SO_LINGER:
2663         {
2664             struct linger lg;
2665             socklen_t lglen;
2666             struct target_linger *tlg;
2667 
2668             if (get_user_u32(len, optlen)) {
2669                 return -TARGET_EFAULT;
2670             }
2671             if (len < 0) {
2672                 return -TARGET_EINVAL;
2673             }
2674 
2675             lglen = sizeof(lg);
2676             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2677                                        &lg, &lglen));
2678             if (ret < 0) {
2679                 return ret;
2680             }
2681             if (len > lglen) {
2682                 len = lglen;
2683             }
2684             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2685                 return -TARGET_EFAULT;
2686             }
2687             __put_user(lg.l_onoff, &tlg->l_onoff);
2688             __put_user(lg.l_linger, &tlg->l_linger);
2689             unlock_user_struct(tlg, optval_addr, 1);
2690             if (put_user_u32(len, optlen)) {
2691                 return -TARGET_EFAULT;
2692             }
2693             break;
2694         }
2695         /* Options with 'int' argument.  */
2696         case TARGET_SO_DEBUG:
2697             optname = SO_DEBUG;
2698             goto int_case;
2699         case TARGET_SO_REUSEADDR:
2700             optname = SO_REUSEADDR;
2701             goto int_case;
2702 #ifdef SO_REUSEPORT
2703         case TARGET_SO_REUSEPORT:
2704             optname = SO_REUSEPORT;
2705             goto int_case;
2706 #endif
2707         case TARGET_SO_TYPE:
2708             optname = SO_TYPE;
2709             goto int_case;
2710         case TARGET_SO_ERROR:
2711             optname = SO_ERROR;
2712             goto int_case;
2713         case TARGET_SO_DONTROUTE:
2714             optname = SO_DONTROUTE;
2715             goto int_case;
2716         case TARGET_SO_BROADCAST:
2717             optname = SO_BROADCAST;
2718             goto int_case;
2719         case TARGET_SO_SNDBUF:
2720             optname = SO_SNDBUF;
2721             goto int_case;
2722         case TARGET_SO_RCVBUF:
2723             optname = SO_RCVBUF;
2724             goto int_case;
2725         case TARGET_SO_KEEPALIVE:
2726             optname = SO_KEEPALIVE;
2727             goto int_case;
2728         case TARGET_SO_OOBINLINE:
2729             optname = SO_OOBINLINE;
2730             goto int_case;
2731         case TARGET_SO_NO_CHECK:
2732             optname = SO_NO_CHECK;
2733             goto int_case;
2734         case TARGET_SO_PRIORITY:
2735             optname = SO_PRIORITY;
2736             goto int_case;
2737 #ifdef SO_BSDCOMPAT
2738         case TARGET_SO_BSDCOMPAT:
2739             optname = SO_BSDCOMPAT;
2740             goto int_case;
2741 #endif
2742         case TARGET_SO_PASSCRED:
2743             optname = SO_PASSCRED;
2744             goto int_case;
2745         case TARGET_SO_TIMESTAMP:
2746             optname = SO_TIMESTAMP;
2747             goto int_case;
2748         case TARGET_SO_RCVLOWAT:
2749             optname = SO_RCVLOWAT;
2750             goto int_case;
2751         case TARGET_SO_ACCEPTCONN:
2752             optname = SO_ACCEPTCONN;
2753             goto int_case;
2754         case TARGET_SO_PROTOCOL:
2755             optname = SO_PROTOCOL;
2756             goto int_case;
2757         case TARGET_SO_DOMAIN:
2758             optname = SO_DOMAIN;
2759             goto int_case;
2760         default:
2761             goto int_case;
2762         }
2763         break;
2764     case SOL_TCP:
2765     case SOL_UDP:
2766         /* TCP and UDP options all take an 'int' value.  */
2767     int_case:
2768         if (get_user_u32(len, optlen))
2769             return -TARGET_EFAULT;
2770         if (len < 0)
2771             return -TARGET_EINVAL;
2772         lv = sizeof(lv);
2773         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2774         if (ret < 0)
2775             return ret;
2776         switch (optname) {
2777         case SO_TYPE:
2778             val = host_to_target_sock_type(val);
2779             break;
2780         case SO_ERROR:
2781             val = host_to_target_errno(val);
2782             break;
2783         }
2784         if (len > lv)
2785             len = lv;
2786         if (len == 4) {
2787             if (put_user_u32(val, optval_addr))
2788                 return -TARGET_EFAULT;
2789         } else {
2790             if (put_user_u8(val, optval_addr))
2791                 return -TARGET_EFAULT;
2792         }
2793         if (put_user_u32(len, optlen))
2794             return -TARGET_EFAULT;
2795         break;
2796     case SOL_IP:
2797         switch(optname) {
2798         case IP_TOS:
2799         case IP_TTL:
2800         case IP_HDRINCL:
2801         case IP_ROUTER_ALERT:
2802         case IP_RECVOPTS:
2803         case IP_RETOPTS:
2804         case IP_PKTINFO:
2805         case IP_MTU_DISCOVER:
2806         case IP_RECVERR:
2807         case IP_RECVTOS:
2808 #ifdef IP_FREEBIND
2809         case IP_FREEBIND:
2810 #endif
2811         case IP_MULTICAST_TTL:
2812         case IP_MULTICAST_LOOP:
2813             if (get_user_u32(len, optlen))
2814                 return -TARGET_EFAULT;
2815             if (len < 0)
2816                 return -TARGET_EINVAL;
2817             lv = sizeof(lv);
2818             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2819             if (ret < 0)
2820                 return ret;
2821             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2822                 len = 1;
2823                 if (put_user_u32(len, optlen)
2824                     || put_user_u8(val, optval_addr))
2825                     return -TARGET_EFAULT;
2826             } else {
2827                 if (len > sizeof(int))
2828                     len = sizeof(int);
2829                 if (put_user_u32(len, optlen)
2830                     || put_user_u32(val, optval_addr))
2831                     return -TARGET_EFAULT;
2832             }
2833             break;
2834         default:
2835             ret = -TARGET_ENOPROTOOPT;
2836             break;
2837         }
2838         break;
2839     case SOL_IPV6:
2840         switch (optname) {
2841         case IPV6_MTU_DISCOVER:
2842         case IPV6_MTU:
2843         case IPV6_V6ONLY:
2844         case IPV6_RECVPKTINFO:
2845         case IPV6_UNICAST_HOPS:
2846         case IPV6_MULTICAST_HOPS:
2847         case IPV6_MULTICAST_LOOP:
2848         case IPV6_RECVERR:
2849         case IPV6_RECVHOPLIMIT:
2850         case IPV6_2292HOPLIMIT:
2851         case IPV6_CHECKSUM:
2852         case IPV6_ADDRFORM:
2853         case IPV6_2292PKTINFO:
2854         case IPV6_RECVTCLASS:
2855         case IPV6_RECVRTHDR:
2856         case IPV6_2292RTHDR:
2857         case IPV6_RECVHOPOPTS:
2858         case IPV6_2292HOPOPTS:
2859         case IPV6_RECVDSTOPTS:
2860         case IPV6_2292DSTOPTS:
2861         case IPV6_TCLASS:
2862         case IPV6_ADDR_PREFERENCES:
2863 #ifdef IPV6_RECVPATHMTU
2864         case IPV6_RECVPATHMTU:
2865 #endif
2866 #ifdef IPV6_TRANSPARENT
2867         case IPV6_TRANSPARENT:
2868 #endif
2869 #ifdef IPV6_FREEBIND
2870         case IPV6_FREEBIND:
2871 #endif
2872 #ifdef IPV6_RECVORIGDSTADDR
2873         case IPV6_RECVORIGDSTADDR:
2874 #endif
2875             if (get_user_u32(len, optlen))
2876                 return -TARGET_EFAULT;
2877             if (len < 0)
2878                 return -TARGET_EINVAL;
2879             lv = sizeof(lv);
2880             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2881             if (ret < 0)
2882                 return ret;
2883             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2884                 len = 1;
2885                 if (put_user_u32(len, optlen)
2886                     || put_user_u8(val, optval_addr))
2887                     return -TARGET_EFAULT;
2888             } else {
2889                 if (len > sizeof(int))
2890                     len = sizeof(int);
2891                 if (put_user_u32(len, optlen)
2892                     || put_user_u32(val, optval_addr))
2893                     return -TARGET_EFAULT;
2894             }
2895             break;
2896         default:
2897             ret = -TARGET_ENOPROTOOPT;
2898             break;
2899         }
2900         break;
2901 #ifdef SOL_NETLINK
2902     case SOL_NETLINK:
2903         switch (optname) {
2904         case NETLINK_PKTINFO:
2905         case NETLINK_BROADCAST_ERROR:
2906         case NETLINK_NO_ENOBUFS:
2907 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2908         case NETLINK_LISTEN_ALL_NSID:
2909         case NETLINK_CAP_ACK:
2910 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2911 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2912         case NETLINK_EXT_ACK:
2913 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2914 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2915         case NETLINK_GET_STRICT_CHK:
2916 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2917             if (get_user_u32(len, optlen)) {
2918                 return -TARGET_EFAULT;
2919             }
2920             if (len != sizeof(val)) {
2921                 return -TARGET_EINVAL;
2922             }
2923             lv = len;
2924             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2925             if (ret < 0) {
2926                 return ret;
2927             }
2928             if (put_user_u32(lv, optlen)
2929                 || put_user_u32(val, optval_addr)) {
2930                 return -TARGET_EFAULT;
2931             }
2932             break;
2933 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2934         case NETLINK_LIST_MEMBERSHIPS:
2935         {
2936             uint32_t *results;
2937             int i;
2938             if (get_user_u32(len, optlen)) {
2939                 return -TARGET_EFAULT;
2940             }
2941             if (len < 0) {
2942                 return -TARGET_EINVAL;
2943             }
2944             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2945             if (!results && len > 0) {
2946                 return -TARGET_EFAULT;
2947             }
2948             lv = len;
2949             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2950             if (ret < 0) {
2951                 unlock_user(results, optval_addr, 0);
2952                 return ret;
2953             }
2954             /* swap host endianess to target endianess. */
2955             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2956                 results[i] = tswap32(results[i]);
2957             }
2958             if (put_user_u32(lv, optlen)) {
2959                 return -TARGET_EFAULT;
2960             }
2961             unlock_user(results, optval_addr, 0);
2962             break;
2963         }
2964 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2965         default:
2966             goto unimplemented;
2967         }
2968         break;
2969 #endif /* SOL_NETLINK */
2970     default:
2971     unimplemented:
2972         qemu_log_mask(LOG_UNIMP,
2973                       "getsockopt level=%d optname=%d not yet supported\n",
2974                       level, optname);
2975         ret = -TARGET_EOPNOTSUPP;
2976         break;
2977     }
2978     return ret;
2979 }
2980 
2981 /* Convert target low/high pair representing file offset into the host
2982  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2983  * as the kernel doesn't handle them either.
2984  */
2985 static void target_to_host_low_high(abi_ulong tlow,
2986                                     abi_ulong thigh,
2987                                     unsigned long *hlow,
2988                                     unsigned long *hhigh)
2989 {
2990     uint64_t off = tlow |
2991         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2992         TARGET_LONG_BITS / 2;
2993 
2994     *hlow = off;
2995     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
2996 }
2997 
2998 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
2999                                 abi_ulong count, int copy)
3000 {
3001     struct target_iovec *target_vec;
3002     struct iovec *vec;
3003     abi_ulong total_len, max_len;
3004     int i;
3005     int err = 0;
3006     bool bad_address = false;
3007 
3008     if (count == 0) {
3009         errno = 0;
3010         return NULL;
3011     }
3012     if (count > IOV_MAX) {
3013         errno = EINVAL;
3014         return NULL;
3015     }
3016 
3017     vec = g_try_new0(struct iovec, count);
3018     if (vec == NULL) {
3019         errno = ENOMEM;
3020         return NULL;
3021     }
3022 
3023     target_vec = lock_user(VERIFY_READ, target_addr,
3024                            count * sizeof(struct target_iovec), 1);
3025     if (target_vec == NULL) {
3026         err = EFAULT;
3027         goto fail2;
3028     }
3029 
3030     /* ??? If host page size > target page size, this will result in a
3031        value larger than what we can actually support.  */
3032     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3033     total_len = 0;
3034 
3035     for (i = 0; i < count; i++) {
3036         abi_ulong base = tswapal(target_vec[i].iov_base);
3037         abi_long len = tswapal(target_vec[i].iov_len);
3038 
3039         if (len < 0) {
3040             err = EINVAL;
3041             goto fail;
3042         } else if (len == 0) {
3043             /* Zero length pointer is ignored.  */
3044             vec[i].iov_base = 0;
3045         } else {
3046             vec[i].iov_base = lock_user(type, base, len, copy);
3047             /* If the first buffer pointer is bad, this is a fault.  But
3048              * subsequent bad buffers will result in a partial write; this
3049              * is realized by filling the vector with null pointers and
3050              * zero lengths. */
3051             if (!vec[i].iov_base) {
3052                 if (i == 0) {
3053                     err = EFAULT;
3054                     goto fail;
3055                 } else {
3056                     bad_address = true;
3057                 }
3058             }
3059             if (bad_address) {
3060                 len = 0;
3061             }
3062             if (len > max_len - total_len) {
3063                 len = max_len - total_len;
3064             }
3065         }
3066         vec[i].iov_len = len;
3067         total_len += len;
3068     }
3069 
3070     unlock_user(target_vec, target_addr, 0);
3071     return vec;
3072 
3073  fail:
3074     while (--i >= 0) {
3075         if (tswapal(target_vec[i].iov_len) > 0) {
3076             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3077         }
3078     }
3079     unlock_user(target_vec, target_addr, 0);
3080  fail2:
3081     g_free(vec);
3082     errno = err;
3083     return NULL;
3084 }
3085 
3086 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3087                          abi_ulong count, int copy)
3088 {
3089     struct target_iovec *target_vec;
3090     int i;
3091 
3092     target_vec = lock_user(VERIFY_READ, target_addr,
3093                            count * sizeof(struct target_iovec), 1);
3094     if (target_vec) {
3095         for (i = 0; i < count; i++) {
3096             abi_ulong base = tswapal(target_vec[i].iov_base);
3097             abi_long len = tswapal(target_vec[i].iov_len);
3098             if (len < 0) {
3099                 break;
3100             }
3101             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3102         }
3103         unlock_user(target_vec, target_addr, 0);
3104     }
3105 
3106     g_free(vec);
3107 }
3108 
3109 static inline int target_to_host_sock_type(int *type)
3110 {
3111     int host_type = 0;
3112     int target_type = *type;
3113 
3114     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3115     case TARGET_SOCK_DGRAM:
3116         host_type = SOCK_DGRAM;
3117         break;
3118     case TARGET_SOCK_STREAM:
3119         host_type = SOCK_STREAM;
3120         break;
3121     default:
3122         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3123         break;
3124     }
3125     if (target_type & TARGET_SOCK_CLOEXEC) {
3126 #if defined(SOCK_CLOEXEC)
3127         host_type |= SOCK_CLOEXEC;
3128 #else
3129         return -TARGET_EINVAL;
3130 #endif
3131     }
3132     if (target_type & TARGET_SOCK_NONBLOCK) {
3133 #if defined(SOCK_NONBLOCK)
3134         host_type |= SOCK_NONBLOCK;
3135 #elif !defined(O_NONBLOCK)
3136         return -TARGET_EINVAL;
3137 #endif
3138     }
3139     *type = host_type;
3140     return 0;
3141 }
3142 
3143 /* Try to emulate socket type flags after socket creation.  */
3144 static int sock_flags_fixup(int fd, int target_type)
3145 {
3146 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3147     if (target_type & TARGET_SOCK_NONBLOCK) {
3148         int flags = fcntl(fd, F_GETFL);
3149         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3150             close(fd);
3151             return -TARGET_EINVAL;
3152         }
3153     }
3154 #endif
3155     return fd;
3156 }
3157 
3158 /* do_socket() Must return target values and target errnos. */
3159 static abi_long do_socket(int domain, int type, int protocol)
3160 {
3161     int target_type = type;
3162     int ret;
3163 
3164     ret = target_to_host_sock_type(&type);
3165     if (ret) {
3166         return ret;
3167     }
3168 
3169     if (domain == PF_NETLINK && !(
3170 #ifdef CONFIG_RTNETLINK
3171          protocol == NETLINK_ROUTE ||
3172 #endif
3173          protocol == NETLINK_KOBJECT_UEVENT ||
3174          protocol == NETLINK_AUDIT)) {
3175         return -TARGET_EPROTONOSUPPORT;
3176     }
3177 
3178     if (domain == AF_PACKET ||
3179         (domain == AF_INET && type == SOCK_PACKET)) {
3180         protocol = tswap16(protocol);
3181     }
3182 
3183     ret = get_errno(socket(domain, type, protocol));
3184     if (ret >= 0) {
3185         ret = sock_flags_fixup(ret, target_type);
3186         if (type == SOCK_PACKET) {
3187             /* Manage an obsolete case :
3188              * if socket type is SOCK_PACKET, bind by name
3189              */
3190             fd_trans_register(ret, &target_packet_trans);
3191         } else if (domain == PF_NETLINK) {
3192             switch (protocol) {
3193 #ifdef CONFIG_RTNETLINK
3194             case NETLINK_ROUTE:
3195                 fd_trans_register(ret, &target_netlink_route_trans);
3196                 break;
3197 #endif
3198             case NETLINK_KOBJECT_UEVENT:
3199                 /* nothing to do: messages are strings */
3200                 break;
3201             case NETLINK_AUDIT:
3202                 fd_trans_register(ret, &target_netlink_audit_trans);
3203                 break;
3204             default:
3205                 g_assert_not_reached();
3206             }
3207         }
3208     }
3209     return ret;
3210 }
3211 
3212 /* do_bind() Must return target values and target errnos. */
3213 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3214                         socklen_t addrlen)
3215 {
3216     void *addr;
3217     abi_long ret;
3218 
3219     if ((int)addrlen < 0) {
3220         return -TARGET_EINVAL;
3221     }
3222 
3223     addr = alloca(addrlen+1);
3224 
3225     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3226     if (ret)
3227         return ret;
3228 
3229     return get_errno(bind(sockfd, addr, addrlen));
3230 }
3231 
3232 /* do_connect() Must return target values and target errnos. */
3233 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3234                            socklen_t addrlen)
3235 {
3236     void *addr;
3237     abi_long ret;
3238 
3239     if ((int)addrlen < 0) {
3240         return -TARGET_EINVAL;
3241     }
3242 
3243     addr = alloca(addrlen+1);
3244 
3245     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3246     if (ret)
3247         return ret;
3248 
3249     return get_errno(safe_connect(sockfd, addr, addrlen));
3250 }
3251 
3252 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3253 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3254                                       int flags, int send)
3255 {
3256     abi_long ret, len;
3257     struct msghdr msg;
3258     abi_ulong count;
3259     struct iovec *vec;
3260     abi_ulong target_vec;
3261 
3262     if (msgp->msg_name) {
3263         msg.msg_namelen = tswap32(msgp->msg_namelen);
3264         msg.msg_name = alloca(msg.msg_namelen+1);
3265         ret = target_to_host_sockaddr(fd, msg.msg_name,
3266                                       tswapal(msgp->msg_name),
3267                                       msg.msg_namelen);
3268         if (ret == -TARGET_EFAULT) {
3269             /* For connected sockets msg_name and msg_namelen must
3270              * be ignored, so returning EFAULT immediately is wrong.
3271              * Instead, pass a bad msg_name to the host kernel, and
3272              * let it decide whether to return EFAULT or not.
3273              */
3274             msg.msg_name = (void *)-1;
3275         } else if (ret) {
3276             goto out2;
3277         }
3278     } else {
3279         msg.msg_name = NULL;
3280         msg.msg_namelen = 0;
3281     }
3282     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3283     msg.msg_control = alloca(msg.msg_controllen);
3284     memset(msg.msg_control, 0, msg.msg_controllen);
3285 
3286     msg.msg_flags = tswap32(msgp->msg_flags);
3287 
3288     count = tswapal(msgp->msg_iovlen);
3289     target_vec = tswapal(msgp->msg_iov);
3290 
3291     if (count > IOV_MAX) {
3292         /* sendrcvmsg returns a different errno for this condition than
3293          * readv/writev, so we must catch it here before lock_iovec() does.
3294          */
3295         ret = -TARGET_EMSGSIZE;
3296         goto out2;
3297     }
3298 
3299     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3300                      target_vec, count, send);
3301     if (vec == NULL) {
3302         ret = -host_to_target_errno(errno);
3303         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3304         if (!send || ret) {
3305             goto out2;
3306         }
3307     }
3308     msg.msg_iovlen = count;
3309     msg.msg_iov = vec;
3310 
3311     if (send) {
3312         if (fd_trans_target_to_host_data(fd)) {
3313             void *host_msg;
3314 
3315             host_msg = g_malloc(msg.msg_iov->iov_len);
3316             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3317             ret = fd_trans_target_to_host_data(fd)(host_msg,
3318                                                    msg.msg_iov->iov_len);
3319             if (ret >= 0) {
3320                 msg.msg_iov->iov_base = host_msg;
3321                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3322             }
3323             g_free(host_msg);
3324         } else {
3325             ret = target_to_host_cmsg(&msg, msgp);
3326             if (ret == 0) {
3327                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3328             }
3329         }
3330     } else {
3331         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3332         if (!is_error(ret)) {
3333             len = ret;
3334             if (fd_trans_host_to_target_data(fd)) {
3335                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3336                                                MIN(msg.msg_iov->iov_len, len));
3337             }
3338             if (!is_error(ret)) {
3339                 ret = host_to_target_cmsg(msgp, &msg);
3340             }
3341             if (!is_error(ret)) {
3342                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3343                 msgp->msg_flags = tswap32(msg.msg_flags);
3344                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3345                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3346                                     msg.msg_name, msg.msg_namelen);
3347                     if (ret) {
3348                         goto out;
3349                     }
3350                 }
3351 
3352                 ret = len;
3353             }
3354         }
3355     }
3356 
3357 out:
3358     if (vec) {
3359         unlock_iovec(vec, target_vec, count, !send);
3360     }
3361 out2:
3362     return ret;
3363 }
3364 
3365 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3366                                int flags, int send)
3367 {
3368     abi_long ret;
3369     struct target_msghdr *msgp;
3370 
3371     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3372                           msgp,
3373                           target_msg,
3374                           send ? 1 : 0)) {
3375         return -TARGET_EFAULT;
3376     }
3377     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3378     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3379     return ret;
3380 }
3381 
3382 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3383  * so it might not have this *mmsg-specific flag either.
3384  */
3385 #ifndef MSG_WAITFORONE
3386 #define MSG_WAITFORONE 0x10000
3387 #endif
3388 
3389 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3390                                 unsigned int vlen, unsigned int flags,
3391                                 int send)
3392 {
3393     struct target_mmsghdr *mmsgp;
3394     abi_long ret = 0;
3395     int i;
3396 
3397     if (vlen > UIO_MAXIOV) {
3398         vlen = UIO_MAXIOV;
3399     }
3400 
3401     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3402     if (!mmsgp) {
3403         return -TARGET_EFAULT;
3404     }
3405 
3406     for (i = 0; i < vlen; i++) {
3407         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3408         if (is_error(ret)) {
3409             break;
3410         }
3411         mmsgp[i].msg_len = tswap32(ret);
3412         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3413         if (flags & MSG_WAITFORONE) {
3414             flags |= MSG_DONTWAIT;
3415         }
3416     }
3417 
3418     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3419 
3420     /* Return number of datagrams sent if we sent any at all;
3421      * otherwise return the error.
3422      */
3423     if (i) {
3424         return i;
3425     }
3426     return ret;
3427 }
3428 
3429 /* do_accept4() Must return target values and target errnos. */
3430 static abi_long do_accept4(int fd, abi_ulong target_addr,
3431                            abi_ulong target_addrlen_addr, int flags)
3432 {
3433     socklen_t addrlen, ret_addrlen;
3434     void *addr;
3435     abi_long ret;
3436     int host_flags;
3437 
3438     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3439         return -TARGET_EINVAL;
3440     }
3441 
3442     host_flags = 0;
3443     if (flags & TARGET_SOCK_NONBLOCK) {
3444         host_flags |= SOCK_NONBLOCK;
3445     }
3446     if (flags & TARGET_SOCK_CLOEXEC) {
3447         host_flags |= SOCK_CLOEXEC;
3448     }
3449 
3450     if (target_addr == 0) {
3451         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3452     }
3453 
3454     /* linux returns EFAULT if addrlen pointer is invalid */
3455     if (get_user_u32(addrlen, target_addrlen_addr))
3456         return -TARGET_EFAULT;
3457 
3458     if ((int)addrlen < 0) {
3459         return -TARGET_EINVAL;
3460     }
3461 
3462     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3463         return -TARGET_EFAULT;
3464     }
3465 
3466     addr = alloca(addrlen);
3467 
3468     ret_addrlen = addrlen;
3469     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3470     if (!is_error(ret)) {
3471         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3472         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3473             ret = -TARGET_EFAULT;
3474         }
3475     }
3476     return ret;
3477 }
3478 
3479 /* do_getpeername() Must return target values and target errnos. */
3480 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3481                                abi_ulong target_addrlen_addr)
3482 {
3483     socklen_t addrlen, ret_addrlen;
3484     void *addr;
3485     abi_long ret;
3486 
3487     if (get_user_u32(addrlen, target_addrlen_addr))
3488         return -TARGET_EFAULT;
3489 
3490     if ((int)addrlen < 0) {
3491         return -TARGET_EINVAL;
3492     }
3493 
3494     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3495         return -TARGET_EFAULT;
3496     }
3497 
3498     addr = alloca(addrlen);
3499 
3500     ret_addrlen = addrlen;
3501     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3502     if (!is_error(ret)) {
3503         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3504         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3505             ret = -TARGET_EFAULT;
3506         }
3507     }
3508     return ret;
3509 }
3510 
3511 /* do_getsockname() Must return target values and target errnos. */
3512 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3513                                abi_ulong target_addrlen_addr)
3514 {
3515     socklen_t addrlen, ret_addrlen;
3516     void *addr;
3517     abi_long ret;
3518 
3519     if (get_user_u32(addrlen, target_addrlen_addr))
3520         return -TARGET_EFAULT;
3521 
3522     if ((int)addrlen < 0) {
3523         return -TARGET_EINVAL;
3524     }
3525 
3526     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3527         return -TARGET_EFAULT;
3528     }
3529 
3530     addr = alloca(addrlen);
3531 
3532     ret_addrlen = addrlen;
3533     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3534     if (!is_error(ret)) {
3535         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3536         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3537             ret = -TARGET_EFAULT;
3538         }
3539     }
3540     return ret;
3541 }
3542 
3543 /* do_socketpair() Must return target values and target errnos. */
3544 static abi_long do_socketpair(int domain, int type, int protocol,
3545                               abi_ulong target_tab_addr)
3546 {
3547     int tab[2];
3548     abi_long ret;
3549 
3550     target_to_host_sock_type(&type);
3551 
3552     ret = get_errno(socketpair(domain, type, protocol, tab));
3553     if (!is_error(ret)) {
3554         if (put_user_s32(tab[0], target_tab_addr)
3555             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3556             ret = -TARGET_EFAULT;
3557     }
3558     return ret;
3559 }
3560 
3561 /* do_sendto() Must return target values and target errnos. */
3562 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3563                           abi_ulong target_addr, socklen_t addrlen)
3564 {
3565     void *addr;
3566     void *host_msg;
3567     void *copy_msg = NULL;
3568     abi_long ret;
3569 
3570     if ((int)addrlen < 0) {
3571         return -TARGET_EINVAL;
3572     }
3573 
3574     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3575     if (!host_msg)
3576         return -TARGET_EFAULT;
3577     if (fd_trans_target_to_host_data(fd)) {
3578         copy_msg = host_msg;
3579         host_msg = g_malloc(len);
3580         memcpy(host_msg, copy_msg, len);
3581         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3582         if (ret < 0) {
3583             goto fail;
3584         }
3585     }
3586     if (target_addr) {
3587         addr = alloca(addrlen+1);
3588         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3589         if (ret) {
3590             goto fail;
3591         }
3592         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3593     } else {
3594         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3595     }
3596 fail:
3597     if (copy_msg) {
3598         g_free(host_msg);
3599         host_msg = copy_msg;
3600     }
3601     unlock_user(host_msg, msg, 0);
3602     return ret;
3603 }
3604 
3605 /* do_recvfrom() Must return target values and target errnos. */
3606 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3607                             abi_ulong target_addr,
3608                             abi_ulong target_addrlen)
3609 {
3610     socklen_t addrlen, ret_addrlen;
3611     void *addr;
3612     void *host_msg;
3613     abi_long ret;
3614 
3615     if (!msg) {
3616         host_msg = NULL;
3617     } else {
3618         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3619         if (!host_msg) {
3620             return -TARGET_EFAULT;
3621         }
3622     }
3623     if (target_addr) {
3624         if (get_user_u32(addrlen, target_addrlen)) {
3625             ret = -TARGET_EFAULT;
3626             goto fail;
3627         }
3628         if ((int)addrlen < 0) {
3629             ret = -TARGET_EINVAL;
3630             goto fail;
3631         }
3632         addr = alloca(addrlen);
3633         ret_addrlen = addrlen;
3634         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3635                                       addr, &ret_addrlen));
3636     } else {
3637         addr = NULL; /* To keep compiler quiet.  */
3638         addrlen = 0; /* To keep compiler quiet.  */
3639         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3640     }
3641     if (!is_error(ret)) {
3642         if (fd_trans_host_to_target_data(fd)) {
3643             abi_long trans;
3644             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3645             if (is_error(trans)) {
3646                 ret = trans;
3647                 goto fail;
3648             }
3649         }
3650         if (target_addr) {
3651             host_to_target_sockaddr(target_addr, addr,
3652                                     MIN(addrlen, ret_addrlen));
3653             if (put_user_u32(ret_addrlen, target_addrlen)) {
3654                 ret = -TARGET_EFAULT;
3655                 goto fail;
3656             }
3657         }
3658         unlock_user(host_msg, msg, len);
3659     } else {
3660 fail:
3661         unlock_user(host_msg, msg, 0);
3662     }
3663     return ret;
3664 }
3665 
3666 #ifdef TARGET_NR_socketcall
3667 /* do_socketcall() must return target values and target errnos. */
3668 static abi_long do_socketcall(int num, abi_ulong vptr)
3669 {
3670     static const unsigned nargs[] = { /* number of arguments per operation */
3671         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3672         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3673         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3674         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3675         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3676         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3677         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3678         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3679         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3680         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3681         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3682         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3683         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3684         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3685         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3686         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3687         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3688         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3689         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3690         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3691     };
3692     abi_long a[6]; /* max 6 args */
3693     unsigned i;
3694 
3695     /* check the range of the first argument num */
3696     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3697     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3698         return -TARGET_EINVAL;
3699     }
3700     /* ensure we have space for args */
3701     if (nargs[num] > ARRAY_SIZE(a)) {
3702         return -TARGET_EINVAL;
3703     }
3704     /* collect the arguments in a[] according to nargs[] */
3705     for (i = 0; i < nargs[num]; ++i) {
3706         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3707             return -TARGET_EFAULT;
3708         }
3709     }
3710     /* now when we have the args, invoke the appropriate underlying function */
3711     switch (num) {
3712     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3713         return do_socket(a[0], a[1], a[2]);
3714     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3715         return do_bind(a[0], a[1], a[2]);
3716     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3717         return do_connect(a[0], a[1], a[2]);
3718     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3719         return get_errno(listen(a[0], a[1]));
3720     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3721         return do_accept4(a[0], a[1], a[2], 0);
3722     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3723         return do_getsockname(a[0], a[1], a[2]);
3724     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3725         return do_getpeername(a[0], a[1], a[2]);
3726     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3727         return do_socketpair(a[0], a[1], a[2], a[3]);
3728     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3729         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3730     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3731         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3732     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3733         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3734     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3735         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3736     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3737         return get_errno(shutdown(a[0], a[1]));
3738     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3739         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3740     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3741         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3742     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3743         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3744     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3745         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3746     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3747         return do_accept4(a[0], a[1], a[2], a[3]);
3748     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3749         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3750     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3751         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3752     default:
3753         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3754         return -TARGET_EINVAL;
3755     }
3756 }
3757 #endif
3758 
3759 #define N_SHM_REGIONS	32
3760 
3761 static struct shm_region {
3762     abi_ulong start;
3763     abi_ulong size;
3764     bool in_use;
3765 } shm_regions[N_SHM_REGIONS];
3766 
3767 #ifndef TARGET_SEMID64_DS
3768 /* asm-generic version of this struct */
3769 struct target_semid64_ds
3770 {
3771   struct target_ipc_perm sem_perm;
3772   abi_ulong sem_otime;
3773 #if TARGET_ABI_BITS == 32
3774   abi_ulong __unused1;
3775 #endif
3776   abi_ulong sem_ctime;
3777 #if TARGET_ABI_BITS == 32
3778   abi_ulong __unused2;
3779 #endif
3780   abi_ulong sem_nsems;
3781   abi_ulong __unused3;
3782   abi_ulong __unused4;
3783 };
3784 #endif
3785 
3786 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3787                                                abi_ulong target_addr)
3788 {
3789     struct target_ipc_perm *target_ip;
3790     struct target_semid64_ds *target_sd;
3791 
3792     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3793         return -TARGET_EFAULT;
3794     target_ip = &(target_sd->sem_perm);
3795     host_ip->__key = tswap32(target_ip->__key);
3796     host_ip->uid = tswap32(target_ip->uid);
3797     host_ip->gid = tswap32(target_ip->gid);
3798     host_ip->cuid = tswap32(target_ip->cuid);
3799     host_ip->cgid = tswap32(target_ip->cgid);
3800 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3801     host_ip->mode = tswap32(target_ip->mode);
3802 #else
3803     host_ip->mode = tswap16(target_ip->mode);
3804 #endif
3805 #if defined(TARGET_PPC)
3806     host_ip->__seq = tswap32(target_ip->__seq);
3807 #else
3808     host_ip->__seq = tswap16(target_ip->__seq);
3809 #endif
3810     unlock_user_struct(target_sd, target_addr, 0);
3811     return 0;
3812 }
3813 
3814 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3815                                                struct ipc_perm *host_ip)
3816 {
3817     struct target_ipc_perm *target_ip;
3818     struct target_semid64_ds *target_sd;
3819 
3820     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3821         return -TARGET_EFAULT;
3822     target_ip = &(target_sd->sem_perm);
3823     target_ip->__key = tswap32(host_ip->__key);
3824     target_ip->uid = tswap32(host_ip->uid);
3825     target_ip->gid = tswap32(host_ip->gid);
3826     target_ip->cuid = tswap32(host_ip->cuid);
3827     target_ip->cgid = tswap32(host_ip->cgid);
3828 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3829     target_ip->mode = tswap32(host_ip->mode);
3830 #else
3831     target_ip->mode = tswap16(host_ip->mode);
3832 #endif
3833 #if defined(TARGET_PPC)
3834     target_ip->__seq = tswap32(host_ip->__seq);
3835 #else
3836     target_ip->__seq = tswap16(host_ip->__seq);
3837 #endif
3838     unlock_user_struct(target_sd, target_addr, 1);
3839     return 0;
3840 }
3841 
3842 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3843                                                abi_ulong target_addr)
3844 {
3845     struct target_semid64_ds *target_sd;
3846 
3847     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3848         return -TARGET_EFAULT;
3849     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3850         return -TARGET_EFAULT;
3851     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3852     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3853     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3854     unlock_user_struct(target_sd, target_addr, 0);
3855     return 0;
3856 }
3857 
3858 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3859                                                struct semid_ds *host_sd)
3860 {
3861     struct target_semid64_ds *target_sd;
3862 
3863     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3864         return -TARGET_EFAULT;
3865     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3866         return -TARGET_EFAULT;
3867     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3868     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3869     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3870     unlock_user_struct(target_sd, target_addr, 1);
3871     return 0;
3872 }
3873 
3874 struct target_seminfo {
3875     int semmap;
3876     int semmni;
3877     int semmns;
3878     int semmnu;
3879     int semmsl;
3880     int semopm;
3881     int semume;
3882     int semusz;
3883     int semvmx;
3884     int semaem;
3885 };
3886 
3887 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3888                                               struct seminfo *host_seminfo)
3889 {
3890     struct target_seminfo *target_seminfo;
3891     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3892         return -TARGET_EFAULT;
3893     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3894     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3895     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3896     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3897     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3898     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3899     __put_user(host_seminfo->semume, &target_seminfo->semume);
3900     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3901     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3902     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3903     unlock_user_struct(target_seminfo, target_addr, 1);
3904     return 0;
3905 }
3906 
3907 union semun {
3908 	int val;
3909 	struct semid_ds *buf;
3910 	unsigned short *array;
3911 	struct seminfo *__buf;
3912 };
3913 
3914 union target_semun {
3915 	int val;
3916 	abi_ulong buf;
3917 	abi_ulong array;
3918 	abi_ulong __buf;
3919 };
3920 
3921 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3922                                                abi_ulong target_addr)
3923 {
3924     int nsems;
3925     unsigned short *array;
3926     union semun semun;
3927     struct semid_ds semid_ds;
3928     int i, ret;
3929 
3930     semun.buf = &semid_ds;
3931 
3932     ret = semctl(semid, 0, IPC_STAT, semun);
3933     if (ret == -1)
3934         return get_errno(ret);
3935 
3936     nsems = semid_ds.sem_nsems;
3937 
3938     *host_array = g_try_new(unsigned short, nsems);
3939     if (!*host_array) {
3940         return -TARGET_ENOMEM;
3941     }
3942     array = lock_user(VERIFY_READ, target_addr,
3943                       nsems*sizeof(unsigned short), 1);
3944     if (!array) {
3945         g_free(*host_array);
3946         return -TARGET_EFAULT;
3947     }
3948 
3949     for(i=0; i<nsems; i++) {
3950         __get_user((*host_array)[i], &array[i]);
3951     }
3952     unlock_user(array, target_addr, 0);
3953 
3954     return 0;
3955 }
3956 
3957 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3958                                                unsigned short **host_array)
3959 {
3960     int nsems;
3961     unsigned short *array;
3962     union semun semun;
3963     struct semid_ds semid_ds;
3964     int i, ret;
3965 
3966     semun.buf = &semid_ds;
3967 
3968     ret = semctl(semid, 0, IPC_STAT, semun);
3969     if (ret == -1)
3970         return get_errno(ret);
3971 
3972     nsems = semid_ds.sem_nsems;
3973 
3974     array = lock_user(VERIFY_WRITE, target_addr,
3975                       nsems*sizeof(unsigned short), 0);
3976     if (!array)
3977         return -TARGET_EFAULT;
3978 
3979     for(i=0; i<nsems; i++) {
3980         __put_user((*host_array)[i], &array[i]);
3981     }
3982     g_free(*host_array);
3983     unlock_user(array, target_addr, 1);
3984 
3985     return 0;
3986 }
3987 
3988 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3989                                  abi_ulong target_arg)
3990 {
3991     union target_semun target_su = { .buf = target_arg };
3992     union semun arg;
3993     struct semid_ds dsarg;
3994     unsigned short *array = NULL;
3995     struct seminfo seminfo;
3996     abi_long ret = -TARGET_EINVAL;
3997     abi_long err;
3998     cmd &= 0xff;
3999 
4000     switch( cmd ) {
4001 	case GETVAL:
4002 	case SETVAL:
4003             /* In 64 bit cross-endian situations, we will erroneously pick up
4004              * the wrong half of the union for the "val" element.  To rectify
4005              * this, the entire 8-byte structure is byteswapped, followed by
4006 	     * a swap of the 4 byte val field. In other cases, the data is
4007 	     * already in proper host byte order. */
4008 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4009 		target_su.buf = tswapal(target_su.buf);
4010 		arg.val = tswap32(target_su.val);
4011 	    } else {
4012 		arg.val = target_su.val;
4013 	    }
4014             ret = get_errno(semctl(semid, semnum, cmd, arg));
4015             break;
4016 	case GETALL:
4017 	case SETALL:
4018             err = target_to_host_semarray(semid, &array, target_su.array);
4019             if (err)
4020                 return err;
4021             arg.array = array;
4022             ret = get_errno(semctl(semid, semnum, cmd, arg));
4023             err = host_to_target_semarray(semid, target_su.array, &array);
4024             if (err)
4025                 return err;
4026             break;
4027 	case IPC_STAT:
4028 	case IPC_SET:
4029 	case SEM_STAT:
4030             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4031             if (err)
4032                 return err;
4033             arg.buf = &dsarg;
4034             ret = get_errno(semctl(semid, semnum, cmd, arg));
4035             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4036             if (err)
4037                 return err;
4038             break;
4039 	case IPC_INFO:
4040 	case SEM_INFO:
4041             arg.__buf = &seminfo;
4042             ret = get_errno(semctl(semid, semnum, cmd, arg));
4043             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4044             if (err)
4045                 return err;
4046             break;
4047 	case IPC_RMID:
4048 	case GETPID:
4049 	case GETNCNT:
4050 	case GETZCNT:
4051             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4052             break;
4053     }
4054 
4055     return ret;
4056 }
4057 
4058 struct target_sembuf {
4059     unsigned short sem_num;
4060     short sem_op;
4061     short sem_flg;
4062 };
4063 
4064 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4065                                              abi_ulong target_addr,
4066                                              unsigned nsops)
4067 {
4068     struct target_sembuf *target_sembuf;
4069     int i;
4070 
4071     target_sembuf = lock_user(VERIFY_READ, target_addr,
4072                               nsops*sizeof(struct target_sembuf), 1);
4073     if (!target_sembuf)
4074         return -TARGET_EFAULT;
4075 
4076     for(i=0; i<nsops; i++) {
4077         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4078         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4079         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4080     }
4081 
4082     unlock_user(target_sembuf, target_addr, 0);
4083 
4084     return 0;
4085 }
4086 
4087 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4088     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4089 
4090 /*
4091  * This macro is required to handle the s390 variants, which passes the
4092  * arguments in a different order than default.
4093  */
4094 #ifdef __s390x__
4095 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4096   (__nsops), (__timeout), (__sops)
4097 #else
4098 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4099   (__nsops), 0, (__sops), (__timeout)
4100 #endif
4101 
4102 static inline abi_long do_semtimedop(int semid,
4103                                      abi_long ptr,
4104                                      unsigned nsops,
4105                                      abi_long timeout, bool time64)
4106 {
4107     struct sembuf *sops;
4108     struct timespec ts, *pts = NULL;
4109     abi_long ret;
4110 
4111     if (timeout) {
4112         pts = &ts;
4113         if (time64) {
4114             if (target_to_host_timespec64(pts, timeout)) {
4115                 return -TARGET_EFAULT;
4116             }
4117         } else {
4118             if (target_to_host_timespec(pts, timeout)) {
4119                 return -TARGET_EFAULT;
4120             }
4121         }
4122     }
4123 
4124     if (nsops > TARGET_SEMOPM) {
4125         return -TARGET_E2BIG;
4126     }
4127 
4128     sops = g_new(struct sembuf, nsops);
4129 
4130     if (target_to_host_sembuf(sops, ptr, nsops)) {
4131         g_free(sops);
4132         return -TARGET_EFAULT;
4133     }
4134 
4135     ret = -TARGET_ENOSYS;
4136 #ifdef __NR_semtimedop
4137     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4138 #endif
4139 #ifdef __NR_ipc
4140     if (ret == -TARGET_ENOSYS) {
4141         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4142                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4143     }
4144 #endif
4145     g_free(sops);
4146     return ret;
4147 }
4148 #endif
4149 
4150 struct target_msqid_ds
4151 {
4152     struct target_ipc_perm msg_perm;
4153     abi_ulong msg_stime;
4154 #if TARGET_ABI_BITS == 32
4155     abi_ulong __unused1;
4156 #endif
4157     abi_ulong msg_rtime;
4158 #if TARGET_ABI_BITS == 32
4159     abi_ulong __unused2;
4160 #endif
4161     abi_ulong msg_ctime;
4162 #if TARGET_ABI_BITS == 32
4163     abi_ulong __unused3;
4164 #endif
4165     abi_ulong __msg_cbytes;
4166     abi_ulong msg_qnum;
4167     abi_ulong msg_qbytes;
4168     abi_ulong msg_lspid;
4169     abi_ulong msg_lrpid;
4170     abi_ulong __unused4;
4171     abi_ulong __unused5;
4172 };
4173 
4174 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4175                                                abi_ulong target_addr)
4176 {
4177     struct target_msqid_ds *target_md;
4178 
4179     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4180         return -TARGET_EFAULT;
4181     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4182         return -TARGET_EFAULT;
4183     host_md->msg_stime = tswapal(target_md->msg_stime);
4184     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4185     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4186     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4187     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4188     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4189     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4190     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4191     unlock_user_struct(target_md, target_addr, 0);
4192     return 0;
4193 }
4194 
4195 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4196                                                struct msqid_ds *host_md)
4197 {
4198     struct target_msqid_ds *target_md;
4199 
4200     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4201         return -TARGET_EFAULT;
4202     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4203         return -TARGET_EFAULT;
4204     target_md->msg_stime = tswapal(host_md->msg_stime);
4205     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4206     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4207     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4208     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4209     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4210     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4211     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4212     unlock_user_struct(target_md, target_addr, 1);
4213     return 0;
4214 }
4215 
4216 struct target_msginfo {
4217     int msgpool;
4218     int msgmap;
4219     int msgmax;
4220     int msgmnb;
4221     int msgmni;
4222     int msgssz;
4223     int msgtql;
4224     unsigned short int msgseg;
4225 };
4226 
4227 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4228                                               struct msginfo *host_msginfo)
4229 {
4230     struct target_msginfo *target_msginfo;
4231     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4232         return -TARGET_EFAULT;
4233     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4234     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4235     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4236     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4237     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4238     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4239     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4240     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4241     unlock_user_struct(target_msginfo, target_addr, 1);
4242     return 0;
4243 }
4244 
4245 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4246 {
4247     struct msqid_ds dsarg;
4248     struct msginfo msginfo;
4249     abi_long ret = -TARGET_EINVAL;
4250 
4251     cmd &= 0xff;
4252 
4253     switch (cmd) {
4254     case IPC_STAT:
4255     case IPC_SET:
4256     case MSG_STAT:
4257         if (target_to_host_msqid_ds(&dsarg,ptr))
4258             return -TARGET_EFAULT;
4259         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4260         if (host_to_target_msqid_ds(ptr,&dsarg))
4261             return -TARGET_EFAULT;
4262         break;
4263     case IPC_RMID:
4264         ret = get_errno(msgctl(msgid, cmd, NULL));
4265         break;
4266     case IPC_INFO:
4267     case MSG_INFO:
4268         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4269         if (host_to_target_msginfo(ptr, &msginfo))
4270             return -TARGET_EFAULT;
4271         break;
4272     }
4273 
4274     return ret;
4275 }
4276 
4277 struct target_msgbuf {
4278     abi_long mtype;
4279     char	mtext[1];
4280 };
4281 
4282 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4283                                  ssize_t msgsz, int msgflg)
4284 {
4285     struct target_msgbuf *target_mb;
4286     struct msgbuf *host_mb;
4287     abi_long ret = 0;
4288 
4289     if (msgsz < 0) {
4290         return -TARGET_EINVAL;
4291     }
4292 
4293     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4294         return -TARGET_EFAULT;
4295     host_mb = g_try_malloc(msgsz + sizeof(long));
4296     if (!host_mb) {
4297         unlock_user_struct(target_mb, msgp, 0);
4298         return -TARGET_ENOMEM;
4299     }
4300     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4301     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4302     ret = -TARGET_ENOSYS;
4303 #ifdef __NR_msgsnd
4304     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4305 #endif
4306 #ifdef __NR_ipc
4307     if (ret == -TARGET_ENOSYS) {
4308 #ifdef __s390x__
4309         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4310                                  host_mb));
4311 #else
4312         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4313                                  host_mb, 0));
4314 #endif
4315     }
4316 #endif
4317     g_free(host_mb);
4318     unlock_user_struct(target_mb, msgp, 0);
4319 
4320     return ret;
4321 }
4322 
4323 #ifdef __NR_ipc
4324 #if defined(__sparc__)
4325 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4326 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4327 #elif defined(__s390x__)
4328 /* The s390 sys_ipc variant has only five parameters.  */
4329 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4330     ((long int[]){(long int)__msgp, __msgtyp})
4331 #else
4332 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4333     ((long int[]){(long int)__msgp, __msgtyp}), 0
4334 #endif
4335 #endif
4336 
4337 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4338                                  ssize_t msgsz, abi_long msgtyp,
4339                                  int msgflg)
4340 {
4341     struct target_msgbuf *target_mb;
4342     char *target_mtext;
4343     struct msgbuf *host_mb;
4344     abi_long ret = 0;
4345 
4346     if (msgsz < 0) {
4347         return -TARGET_EINVAL;
4348     }
4349 
4350     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4351         return -TARGET_EFAULT;
4352 
4353     host_mb = g_try_malloc(msgsz + sizeof(long));
4354     if (!host_mb) {
4355         ret = -TARGET_ENOMEM;
4356         goto end;
4357     }
4358     ret = -TARGET_ENOSYS;
4359 #ifdef __NR_msgrcv
4360     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4361 #endif
4362 #ifdef __NR_ipc
4363     if (ret == -TARGET_ENOSYS) {
4364         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4365                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4366     }
4367 #endif
4368 
4369     if (ret > 0) {
4370         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4371         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4372         if (!target_mtext) {
4373             ret = -TARGET_EFAULT;
4374             goto end;
4375         }
4376         memcpy(target_mb->mtext, host_mb->mtext, ret);
4377         unlock_user(target_mtext, target_mtext_addr, ret);
4378     }
4379 
4380     target_mb->mtype = tswapal(host_mb->mtype);
4381 
4382 end:
4383     if (target_mb)
4384         unlock_user_struct(target_mb, msgp, 1);
4385     g_free(host_mb);
4386     return ret;
4387 }
4388 
4389 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4390                                                abi_ulong target_addr)
4391 {
4392     struct target_shmid_ds *target_sd;
4393 
4394     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4395         return -TARGET_EFAULT;
4396     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4397         return -TARGET_EFAULT;
4398     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4399     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4400     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4401     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4402     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4403     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4404     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4405     unlock_user_struct(target_sd, target_addr, 0);
4406     return 0;
4407 }
4408 
4409 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4410                                                struct shmid_ds *host_sd)
4411 {
4412     struct target_shmid_ds *target_sd;
4413 
4414     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4415         return -TARGET_EFAULT;
4416     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4417         return -TARGET_EFAULT;
4418     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4419     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4420     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4421     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4422     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4423     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4424     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4425     unlock_user_struct(target_sd, target_addr, 1);
4426     return 0;
4427 }
4428 
4429 struct  target_shminfo {
4430     abi_ulong shmmax;
4431     abi_ulong shmmin;
4432     abi_ulong shmmni;
4433     abi_ulong shmseg;
4434     abi_ulong shmall;
4435 };
4436 
4437 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4438                                               struct shminfo *host_shminfo)
4439 {
4440     struct target_shminfo *target_shminfo;
4441     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4442         return -TARGET_EFAULT;
4443     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4444     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4445     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4446     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4447     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4448     unlock_user_struct(target_shminfo, target_addr, 1);
4449     return 0;
4450 }
4451 
4452 struct target_shm_info {
4453     int used_ids;
4454     abi_ulong shm_tot;
4455     abi_ulong shm_rss;
4456     abi_ulong shm_swp;
4457     abi_ulong swap_attempts;
4458     abi_ulong swap_successes;
4459 };
4460 
4461 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4462                                                struct shm_info *host_shm_info)
4463 {
4464     struct target_shm_info *target_shm_info;
4465     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4466         return -TARGET_EFAULT;
4467     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4468     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4469     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4470     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4471     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4472     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4473     unlock_user_struct(target_shm_info, target_addr, 1);
4474     return 0;
4475 }
4476 
4477 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4478 {
4479     struct shmid_ds dsarg;
4480     struct shminfo shminfo;
4481     struct shm_info shm_info;
4482     abi_long ret = -TARGET_EINVAL;
4483 
4484     cmd &= 0xff;
4485 
4486     switch(cmd) {
4487     case IPC_STAT:
4488     case IPC_SET:
4489     case SHM_STAT:
4490         if (target_to_host_shmid_ds(&dsarg, buf))
4491             return -TARGET_EFAULT;
4492         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4493         if (host_to_target_shmid_ds(buf, &dsarg))
4494             return -TARGET_EFAULT;
4495         break;
4496     case IPC_INFO:
4497         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4498         if (host_to_target_shminfo(buf, &shminfo))
4499             return -TARGET_EFAULT;
4500         break;
4501     case SHM_INFO:
4502         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4503         if (host_to_target_shm_info(buf, &shm_info))
4504             return -TARGET_EFAULT;
4505         break;
4506     case IPC_RMID:
4507     case SHM_LOCK:
4508     case SHM_UNLOCK:
4509         ret = get_errno(shmctl(shmid, cmd, NULL));
4510         break;
4511     }
4512 
4513     return ret;
4514 }
4515 
4516 #ifndef TARGET_FORCE_SHMLBA
4517 /* For most architectures, SHMLBA is the same as the page size;
4518  * some architectures have larger values, in which case they should
4519  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4520  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4521  * and defining its own value for SHMLBA.
4522  *
4523  * The kernel also permits SHMLBA to be set by the architecture to a
4524  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4525  * this means that addresses are rounded to the large size if
4526  * SHM_RND is set but addresses not aligned to that size are not rejected
4527  * as long as they are at least page-aligned. Since the only architecture
4528  * which uses this is ia64 this code doesn't provide for that oddity.
4529  */
4530 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4531 {
4532     return TARGET_PAGE_SIZE;
4533 }
4534 #endif
4535 
4536 static abi_ulong do_shmat(CPUArchState *cpu_env, int shmid,
4537                           abi_ulong shmaddr, int shmflg)
4538 {
4539     CPUState *cpu = env_cpu(cpu_env);
4540     abi_ulong raddr;
4541     void *host_raddr;
4542     struct shmid_ds shm_info;
4543     int i, ret;
4544     abi_ulong shmlba;
4545 
4546     /* shmat pointers are always untagged */
4547 
4548     /* find out the length of the shared memory segment */
4549     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4550     if (is_error(ret)) {
4551         /* can't get length, bail out */
4552         return ret;
4553     }
4554 
4555     shmlba = target_shmlba(cpu_env);
4556 
4557     if (shmaddr & (shmlba - 1)) {
4558         if (shmflg & SHM_RND) {
4559             shmaddr &= ~(shmlba - 1);
4560         } else {
4561             return -TARGET_EINVAL;
4562         }
4563     }
4564     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4565         return -TARGET_EINVAL;
4566     }
4567 
4568     mmap_lock();
4569 
4570     /*
4571      * We're mapping shared memory, so ensure we generate code for parallel
4572      * execution and flush old translations.  This will work up to the level
4573      * supported by the host -- anything that requires EXCP_ATOMIC will not
4574      * be atomic with respect to an external process.
4575      */
4576     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4577         cpu->tcg_cflags |= CF_PARALLEL;
4578         tb_flush(cpu);
4579     }
4580 
4581     if (shmaddr)
4582         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4583     else {
4584         abi_ulong mmap_start;
4585 
4586         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4587         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4588 
4589         if (mmap_start == -1) {
4590             errno = ENOMEM;
4591             host_raddr = (void *)-1;
4592         } else
4593             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4594                                shmflg | SHM_REMAP);
4595     }
4596 
4597     if (host_raddr == (void *)-1) {
4598         mmap_unlock();
4599         return get_errno((intptr_t)host_raddr);
4600     }
4601     raddr = h2g((uintptr_t)host_raddr);
4602 
4603     page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4604                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4605                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4606 
4607     for (i = 0; i < N_SHM_REGIONS; i++) {
4608         if (!shm_regions[i].in_use) {
4609             shm_regions[i].in_use = true;
4610             shm_regions[i].start = raddr;
4611             shm_regions[i].size = shm_info.shm_segsz;
4612             break;
4613         }
4614     }
4615 
4616     mmap_unlock();
4617     return raddr;
4618 }
4619 
4620 static inline abi_long do_shmdt(abi_ulong shmaddr)
4621 {
4622     int i;
4623     abi_long rv;
4624 
4625     /* shmdt pointers are always untagged */
4626 
4627     mmap_lock();
4628 
4629     for (i = 0; i < N_SHM_REGIONS; ++i) {
4630         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4631             shm_regions[i].in_use = false;
4632             page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4633             break;
4634         }
4635     }
4636     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4637 
4638     mmap_unlock();
4639 
4640     return rv;
4641 }
4642 
4643 #ifdef TARGET_NR_ipc
4644 /* ??? This only works with linear mappings.  */
4645 /* do_ipc() must return target values and target errnos. */
4646 static abi_long do_ipc(CPUArchState *cpu_env,
4647                        unsigned int call, abi_long first,
4648                        abi_long second, abi_long third,
4649                        abi_long ptr, abi_long fifth)
4650 {
4651     int version;
4652     abi_long ret = 0;
4653 
4654     version = call >> 16;
4655     call &= 0xffff;
4656 
4657     switch (call) {
4658     case IPCOP_semop:
4659         ret = do_semtimedop(first, ptr, second, 0, false);
4660         break;
4661     case IPCOP_semtimedop:
4662     /*
4663      * The s390 sys_ipc variant has only five parameters instead of six
4664      * (as for default variant) and the only difference is the handling of
4665      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4666      * to a struct timespec where the generic variant uses fifth parameter.
4667      */
4668 #if defined(TARGET_S390X)
4669         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4670 #else
4671         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4672 #endif
4673         break;
4674 
4675     case IPCOP_semget:
4676         ret = get_errno(semget(first, second, third));
4677         break;
4678 
4679     case IPCOP_semctl: {
4680         /* The semun argument to semctl is passed by value, so dereference the
4681          * ptr argument. */
4682         abi_ulong atptr;
4683         get_user_ual(atptr, ptr);
4684         ret = do_semctl(first, second, third, atptr);
4685         break;
4686     }
4687 
4688     case IPCOP_msgget:
4689         ret = get_errno(msgget(first, second));
4690         break;
4691 
4692     case IPCOP_msgsnd:
4693         ret = do_msgsnd(first, ptr, second, third);
4694         break;
4695 
4696     case IPCOP_msgctl:
4697         ret = do_msgctl(first, second, ptr);
4698         break;
4699 
4700     case IPCOP_msgrcv:
4701         switch (version) {
4702         case 0:
4703             {
4704                 struct target_ipc_kludge {
4705                     abi_long msgp;
4706                     abi_long msgtyp;
4707                 } *tmp;
4708 
4709                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4710                     ret = -TARGET_EFAULT;
4711                     break;
4712                 }
4713 
4714                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4715 
4716                 unlock_user_struct(tmp, ptr, 0);
4717                 break;
4718             }
4719         default:
4720             ret = do_msgrcv(first, ptr, second, fifth, third);
4721         }
4722         break;
4723 
4724     case IPCOP_shmat:
4725         switch (version) {
4726         default:
4727         {
4728             abi_ulong raddr;
4729             raddr = do_shmat(cpu_env, first, ptr, second);
4730             if (is_error(raddr))
4731                 return get_errno(raddr);
4732             if (put_user_ual(raddr, third))
4733                 return -TARGET_EFAULT;
4734             break;
4735         }
4736         case 1:
4737             ret = -TARGET_EINVAL;
4738             break;
4739         }
4740 	break;
4741     case IPCOP_shmdt:
4742         ret = do_shmdt(ptr);
4743 	break;
4744 
4745     case IPCOP_shmget:
4746 	/* IPC_* flag values are the same on all linux platforms */
4747 	ret = get_errno(shmget(first, second, third));
4748 	break;
4749 
4750 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4751     case IPCOP_shmctl:
4752         ret = do_shmctl(first, second, ptr);
4753         break;
4754     default:
4755         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4756                       call, version);
4757 	ret = -TARGET_ENOSYS;
4758 	break;
4759     }
4760     return ret;
4761 }
4762 #endif
4763 
4764 /* kernel structure types definitions */
4765 
4766 #define STRUCT(name, ...) STRUCT_ ## name,
4767 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4768 enum {
4769 #include "syscall_types.h"
4770 STRUCT_MAX
4771 };
4772 #undef STRUCT
4773 #undef STRUCT_SPECIAL
4774 
4775 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4776 #define STRUCT_SPECIAL(name)
4777 #include "syscall_types.h"
4778 #undef STRUCT
4779 #undef STRUCT_SPECIAL
4780 
4781 #define MAX_STRUCT_SIZE 4096
4782 
4783 #ifdef CONFIG_FIEMAP
4784 /* So fiemap access checks don't overflow on 32 bit systems.
4785  * This is very slightly smaller than the limit imposed by
4786  * the underlying kernel.
4787  */
4788 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4789                             / sizeof(struct fiemap_extent))
4790 
4791 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4792                                        int fd, int cmd, abi_long arg)
4793 {
4794     /* The parameter for this ioctl is a struct fiemap followed
4795      * by an array of struct fiemap_extent whose size is set
4796      * in fiemap->fm_extent_count. The array is filled in by the
4797      * ioctl.
4798      */
4799     int target_size_in, target_size_out;
4800     struct fiemap *fm;
4801     const argtype *arg_type = ie->arg_type;
4802     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4803     void *argptr, *p;
4804     abi_long ret;
4805     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4806     uint32_t outbufsz;
4807     int free_fm = 0;
4808 
4809     assert(arg_type[0] == TYPE_PTR);
4810     assert(ie->access == IOC_RW);
4811     arg_type++;
4812     target_size_in = thunk_type_size(arg_type, 0);
4813     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4814     if (!argptr) {
4815         return -TARGET_EFAULT;
4816     }
4817     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4818     unlock_user(argptr, arg, 0);
4819     fm = (struct fiemap *)buf_temp;
4820     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4821         return -TARGET_EINVAL;
4822     }
4823 
4824     outbufsz = sizeof (*fm) +
4825         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4826 
4827     if (outbufsz > MAX_STRUCT_SIZE) {
4828         /* We can't fit all the extents into the fixed size buffer.
4829          * Allocate one that is large enough and use it instead.
4830          */
4831         fm = g_try_malloc(outbufsz);
4832         if (!fm) {
4833             return -TARGET_ENOMEM;
4834         }
4835         memcpy(fm, buf_temp, sizeof(struct fiemap));
4836         free_fm = 1;
4837     }
4838     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4839     if (!is_error(ret)) {
4840         target_size_out = target_size_in;
4841         /* An extent_count of 0 means we were only counting the extents
4842          * so there are no structs to copy
4843          */
4844         if (fm->fm_extent_count != 0) {
4845             target_size_out += fm->fm_mapped_extents * extent_size;
4846         }
4847         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4848         if (!argptr) {
4849             ret = -TARGET_EFAULT;
4850         } else {
4851             /* Convert the struct fiemap */
4852             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4853             if (fm->fm_extent_count != 0) {
4854                 p = argptr + target_size_in;
4855                 /* ...and then all the struct fiemap_extents */
4856                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4857                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4858                                   THUNK_TARGET);
4859                     p += extent_size;
4860                 }
4861             }
4862             unlock_user(argptr, arg, target_size_out);
4863         }
4864     }
4865     if (free_fm) {
4866         g_free(fm);
4867     }
4868     return ret;
4869 }
4870 #endif
4871 
4872 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4873                                 int fd, int cmd, abi_long arg)
4874 {
4875     const argtype *arg_type = ie->arg_type;
4876     int target_size;
4877     void *argptr;
4878     int ret;
4879     struct ifconf *host_ifconf;
4880     uint32_t outbufsz;
4881     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4882     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4883     int target_ifreq_size;
4884     int nb_ifreq;
4885     int free_buf = 0;
4886     int i;
4887     int target_ifc_len;
4888     abi_long target_ifc_buf;
4889     int host_ifc_len;
4890     char *host_ifc_buf;
4891 
4892     assert(arg_type[0] == TYPE_PTR);
4893     assert(ie->access == IOC_RW);
4894 
4895     arg_type++;
4896     target_size = thunk_type_size(arg_type, 0);
4897 
4898     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4899     if (!argptr)
4900         return -TARGET_EFAULT;
4901     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4902     unlock_user(argptr, arg, 0);
4903 
4904     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4905     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4906     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4907 
4908     if (target_ifc_buf != 0) {
4909         target_ifc_len = host_ifconf->ifc_len;
4910         nb_ifreq = target_ifc_len / target_ifreq_size;
4911         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4912 
4913         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4914         if (outbufsz > MAX_STRUCT_SIZE) {
4915             /*
4916              * We can't fit all the extents into the fixed size buffer.
4917              * Allocate one that is large enough and use it instead.
4918              */
4919             host_ifconf = g_try_malloc(outbufsz);
4920             if (!host_ifconf) {
4921                 return -TARGET_ENOMEM;
4922             }
4923             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4924             free_buf = 1;
4925         }
4926         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4927 
4928         host_ifconf->ifc_len = host_ifc_len;
4929     } else {
4930       host_ifc_buf = NULL;
4931     }
4932     host_ifconf->ifc_buf = host_ifc_buf;
4933 
4934     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4935     if (!is_error(ret)) {
4936 	/* convert host ifc_len to target ifc_len */
4937 
4938         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4939         target_ifc_len = nb_ifreq * target_ifreq_size;
4940         host_ifconf->ifc_len = target_ifc_len;
4941 
4942 	/* restore target ifc_buf */
4943 
4944         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4945 
4946 	/* copy struct ifconf to target user */
4947 
4948         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4949         if (!argptr)
4950             return -TARGET_EFAULT;
4951         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4952         unlock_user(argptr, arg, target_size);
4953 
4954         if (target_ifc_buf != 0) {
4955             /* copy ifreq[] to target user */
4956             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4957             for (i = 0; i < nb_ifreq ; i++) {
4958                 thunk_convert(argptr + i * target_ifreq_size,
4959                               host_ifc_buf + i * sizeof(struct ifreq),
4960                               ifreq_arg_type, THUNK_TARGET);
4961             }
4962             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4963         }
4964     }
4965 
4966     if (free_buf) {
4967         g_free(host_ifconf);
4968     }
4969 
4970     return ret;
4971 }
4972 
4973 #if defined(CONFIG_USBFS)
4974 #if HOST_LONG_BITS > 64
4975 #error USBDEVFS thunks do not support >64 bit hosts yet.
4976 #endif
4977 struct live_urb {
4978     uint64_t target_urb_adr;
4979     uint64_t target_buf_adr;
4980     char *target_buf_ptr;
4981     struct usbdevfs_urb host_urb;
4982 };
4983 
4984 static GHashTable *usbdevfs_urb_hashtable(void)
4985 {
4986     static GHashTable *urb_hashtable;
4987 
4988     if (!urb_hashtable) {
4989         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4990     }
4991     return urb_hashtable;
4992 }
4993 
4994 static void urb_hashtable_insert(struct live_urb *urb)
4995 {
4996     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
4997     g_hash_table_insert(urb_hashtable, urb, urb);
4998 }
4999 
5000 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5001 {
5002     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5003     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5004 }
5005 
5006 static void urb_hashtable_remove(struct live_urb *urb)
5007 {
5008     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5009     g_hash_table_remove(urb_hashtable, urb);
5010 }
5011 
5012 static abi_long
5013 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5014                           int fd, int cmd, abi_long arg)
5015 {
5016     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5017     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5018     struct live_urb *lurb;
5019     void *argptr;
5020     uint64_t hurb;
5021     int target_size;
5022     uintptr_t target_urb_adr;
5023     abi_long ret;
5024 
5025     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5026 
5027     memset(buf_temp, 0, sizeof(uint64_t));
5028     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5029     if (is_error(ret)) {
5030         return ret;
5031     }
5032 
5033     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5034     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5035     if (!lurb->target_urb_adr) {
5036         return -TARGET_EFAULT;
5037     }
5038     urb_hashtable_remove(lurb);
5039     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5040         lurb->host_urb.buffer_length);
5041     lurb->target_buf_ptr = NULL;
5042 
5043     /* restore the guest buffer pointer */
5044     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5045 
5046     /* update the guest urb struct */
5047     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5048     if (!argptr) {
5049         g_free(lurb);
5050         return -TARGET_EFAULT;
5051     }
5052     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5053     unlock_user(argptr, lurb->target_urb_adr, target_size);
5054 
5055     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5056     /* write back the urb handle */
5057     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5058     if (!argptr) {
5059         g_free(lurb);
5060         return -TARGET_EFAULT;
5061     }
5062 
5063     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5064     target_urb_adr = lurb->target_urb_adr;
5065     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5066     unlock_user(argptr, arg, target_size);
5067 
5068     g_free(lurb);
5069     return ret;
5070 }
5071 
5072 static abi_long
5073 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5074                              uint8_t *buf_temp __attribute__((unused)),
5075                              int fd, int cmd, abi_long arg)
5076 {
5077     struct live_urb *lurb;
5078 
5079     /* map target address back to host URB with metadata. */
5080     lurb = urb_hashtable_lookup(arg);
5081     if (!lurb) {
5082         return -TARGET_EFAULT;
5083     }
5084     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5085 }
5086 
5087 static abi_long
5088 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5089                             int fd, int cmd, abi_long arg)
5090 {
5091     const argtype *arg_type = ie->arg_type;
5092     int target_size;
5093     abi_long ret;
5094     void *argptr;
5095     int rw_dir;
5096     struct live_urb *lurb;
5097 
5098     /*
5099      * each submitted URB needs to map to a unique ID for the
5100      * kernel, and that unique ID needs to be a pointer to
5101      * host memory.  hence, we need to malloc for each URB.
5102      * isochronous transfers have a variable length struct.
5103      */
5104     arg_type++;
5105     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5106 
5107     /* construct host copy of urb and metadata */
5108     lurb = g_try_new0(struct live_urb, 1);
5109     if (!lurb) {
5110         return -TARGET_ENOMEM;
5111     }
5112 
5113     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5114     if (!argptr) {
5115         g_free(lurb);
5116         return -TARGET_EFAULT;
5117     }
5118     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5119     unlock_user(argptr, arg, 0);
5120 
5121     lurb->target_urb_adr = arg;
5122     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5123 
5124     /* buffer space used depends on endpoint type so lock the entire buffer */
5125     /* control type urbs should check the buffer contents for true direction */
5126     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5127     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5128         lurb->host_urb.buffer_length, 1);
5129     if (lurb->target_buf_ptr == NULL) {
5130         g_free(lurb);
5131         return -TARGET_EFAULT;
5132     }
5133 
5134     /* update buffer pointer in host copy */
5135     lurb->host_urb.buffer = lurb->target_buf_ptr;
5136 
5137     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5138     if (is_error(ret)) {
5139         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5140         g_free(lurb);
5141     } else {
5142         urb_hashtable_insert(lurb);
5143     }
5144 
5145     return ret;
5146 }
5147 #endif /* CONFIG_USBFS */
5148 
5149 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5150                             int cmd, abi_long arg)
5151 {
5152     void *argptr;
5153     struct dm_ioctl *host_dm;
5154     abi_long guest_data;
5155     uint32_t guest_data_size;
5156     int target_size;
5157     const argtype *arg_type = ie->arg_type;
5158     abi_long ret;
5159     void *big_buf = NULL;
5160     char *host_data;
5161 
5162     arg_type++;
5163     target_size = thunk_type_size(arg_type, 0);
5164     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5165     if (!argptr) {
5166         ret = -TARGET_EFAULT;
5167         goto out;
5168     }
5169     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5170     unlock_user(argptr, arg, 0);
5171 
5172     /* buf_temp is too small, so fetch things into a bigger buffer */
5173     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5174     memcpy(big_buf, buf_temp, target_size);
5175     buf_temp = big_buf;
5176     host_dm = big_buf;
5177 
5178     guest_data = arg + host_dm->data_start;
5179     if ((guest_data - arg) < 0) {
5180         ret = -TARGET_EINVAL;
5181         goto out;
5182     }
5183     guest_data_size = host_dm->data_size - host_dm->data_start;
5184     host_data = (char*)host_dm + host_dm->data_start;
5185 
5186     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5187     if (!argptr) {
5188         ret = -TARGET_EFAULT;
5189         goto out;
5190     }
5191 
5192     switch (ie->host_cmd) {
5193     case DM_REMOVE_ALL:
5194     case DM_LIST_DEVICES:
5195     case DM_DEV_CREATE:
5196     case DM_DEV_REMOVE:
5197     case DM_DEV_SUSPEND:
5198     case DM_DEV_STATUS:
5199     case DM_DEV_WAIT:
5200     case DM_TABLE_STATUS:
5201     case DM_TABLE_CLEAR:
5202     case DM_TABLE_DEPS:
5203     case DM_LIST_VERSIONS:
5204         /* no input data */
5205         break;
5206     case DM_DEV_RENAME:
5207     case DM_DEV_SET_GEOMETRY:
5208         /* data contains only strings */
5209         memcpy(host_data, argptr, guest_data_size);
5210         break;
5211     case DM_TARGET_MSG:
5212         memcpy(host_data, argptr, guest_data_size);
5213         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5214         break;
5215     case DM_TABLE_LOAD:
5216     {
5217         void *gspec = argptr;
5218         void *cur_data = host_data;
5219         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5220         int spec_size = thunk_type_size(arg_type, 0);
5221         int i;
5222 
5223         for (i = 0; i < host_dm->target_count; i++) {
5224             struct dm_target_spec *spec = cur_data;
5225             uint32_t next;
5226             int slen;
5227 
5228             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5229             slen = strlen((char*)gspec + spec_size) + 1;
5230             next = spec->next;
5231             spec->next = sizeof(*spec) + slen;
5232             strcpy((char*)&spec[1], gspec + spec_size);
5233             gspec += next;
5234             cur_data += spec->next;
5235         }
5236         break;
5237     }
5238     default:
5239         ret = -TARGET_EINVAL;
5240         unlock_user(argptr, guest_data, 0);
5241         goto out;
5242     }
5243     unlock_user(argptr, guest_data, 0);
5244 
5245     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5246     if (!is_error(ret)) {
5247         guest_data = arg + host_dm->data_start;
5248         guest_data_size = host_dm->data_size - host_dm->data_start;
5249         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5250         switch (ie->host_cmd) {
5251         case DM_REMOVE_ALL:
5252         case DM_DEV_CREATE:
5253         case DM_DEV_REMOVE:
5254         case DM_DEV_RENAME:
5255         case DM_DEV_SUSPEND:
5256         case DM_DEV_STATUS:
5257         case DM_TABLE_LOAD:
5258         case DM_TABLE_CLEAR:
5259         case DM_TARGET_MSG:
5260         case DM_DEV_SET_GEOMETRY:
5261             /* no return data */
5262             break;
5263         case DM_LIST_DEVICES:
5264         {
5265             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5266             uint32_t remaining_data = guest_data_size;
5267             void *cur_data = argptr;
5268             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5269             int nl_size = 12; /* can't use thunk_size due to alignment */
5270 
5271             while (1) {
5272                 uint32_t next = nl->next;
5273                 if (next) {
5274                     nl->next = nl_size + (strlen(nl->name) + 1);
5275                 }
5276                 if (remaining_data < nl->next) {
5277                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5278                     break;
5279                 }
5280                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5281                 strcpy(cur_data + nl_size, nl->name);
5282                 cur_data += nl->next;
5283                 remaining_data -= nl->next;
5284                 if (!next) {
5285                     break;
5286                 }
5287                 nl = (void*)nl + next;
5288             }
5289             break;
5290         }
5291         case DM_DEV_WAIT:
5292         case DM_TABLE_STATUS:
5293         {
5294             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5295             void *cur_data = argptr;
5296             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5297             int spec_size = thunk_type_size(arg_type, 0);
5298             int i;
5299 
5300             for (i = 0; i < host_dm->target_count; i++) {
5301                 uint32_t next = spec->next;
5302                 int slen = strlen((char*)&spec[1]) + 1;
5303                 spec->next = (cur_data - argptr) + spec_size + slen;
5304                 if (guest_data_size < spec->next) {
5305                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5306                     break;
5307                 }
5308                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5309                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5310                 cur_data = argptr + spec->next;
5311                 spec = (void*)host_dm + host_dm->data_start + next;
5312             }
5313             break;
5314         }
5315         case DM_TABLE_DEPS:
5316         {
5317             void *hdata = (void*)host_dm + host_dm->data_start;
5318             int count = *(uint32_t*)hdata;
5319             uint64_t *hdev = hdata + 8;
5320             uint64_t *gdev = argptr + 8;
5321             int i;
5322 
5323             *(uint32_t*)argptr = tswap32(count);
5324             for (i = 0; i < count; i++) {
5325                 *gdev = tswap64(*hdev);
5326                 gdev++;
5327                 hdev++;
5328             }
5329             break;
5330         }
5331         case DM_LIST_VERSIONS:
5332         {
5333             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5334             uint32_t remaining_data = guest_data_size;
5335             void *cur_data = argptr;
5336             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5337             int vers_size = thunk_type_size(arg_type, 0);
5338 
5339             while (1) {
5340                 uint32_t next = vers->next;
5341                 if (next) {
5342                     vers->next = vers_size + (strlen(vers->name) + 1);
5343                 }
5344                 if (remaining_data < vers->next) {
5345                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5346                     break;
5347                 }
5348                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5349                 strcpy(cur_data + vers_size, vers->name);
5350                 cur_data += vers->next;
5351                 remaining_data -= vers->next;
5352                 if (!next) {
5353                     break;
5354                 }
5355                 vers = (void*)vers + next;
5356             }
5357             break;
5358         }
5359         default:
5360             unlock_user(argptr, guest_data, 0);
5361             ret = -TARGET_EINVAL;
5362             goto out;
5363         }
5364         unlock_user(argptr, guest_data, guest_data_size);
5365 
5366         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5367         if (!argptr) {
5368             ret = -TARGET_EFAULT;
5369             goto out;
5370         }
5371         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5372         unlock_user(argptr, arg, target_size);
5373     }
5374 out:
5375     g_free(big_buf);
5376     return ret;
5377 }
5378 
5379 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5380                                int cmd, abi_long arg)
5381 {
5382     void *argptr;
5383     int target_size;
5384     const argtype *arg_type = ie->arg_type;
5385     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5386     abi_long ret;
5387 
5388     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5389     struct blkpg_partition host_part;
5390 
5391     /* Read and convert blkpg */
5392     arg_type++;
5393     target_size = thunk_type_size(arg_type, 0);
5394     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5395     if (!argptr) {
5396         ret = -TARGET_EFAULT;
5397         goto out;
5398     }
5399     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5400     unlock_user(argptr, arg, 0);
5401 
5402     switch (host_blkpg->op) {
5403     case BLKPG_ADD_PARTITION:
5404     case BLKPG_DEL_PARTITION:
5405         /* payload is struct blkpg_partition */
5406         break;
5407     default:
5408         /* Unknown opcode */
5409         ret = -TARGET_EINVAL;
5410         goto out;
5411     }
5412 
5413     /* Read and convert blkpg->data */
5414     arg = (abi_long)(uintptr_t)host_blkpg->data;
5415     target_size = thunk_type_size(part_arg_type, 0);
5416     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5417     if (!argptr) {
5418         ret = -TARGET_EFAULT;
5419         goto out;
5420     }
5421     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5422     unlock_user(argptr, arg, 0);
5423 
5424     /* Swizzle the data pointer to our local copy and call! */
5425     host_blkpg->data = &host_part;
5426     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5427 
5428 out:
5429     return ret;
5430 }
5431 
5432 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5433                                 int fd, int cmd, abi_long arg)
5434 {
5435     const argtype *arg_type = ie->arg_type;
5436     const StructEntry *se;
5437     const argtype *field_types;
5438     const int *dst_offsets, *src_offsets;
5439     int target_size;
5440     void *argptr;
5441     abi_ulong *target_rt_dev_ptr = NULL;
5442     unsigned long *host_rt_dev_ptr = NULL;
5443     abi_long ret;
5444     int i;
5445 
5446     assert(ie->access == IOC_W);
5447     assert(*arg_type == TYPE_PTR);
5448     arg_type++;
5449     assert(*arg_type == TYPE_STRUCT);
5450     target_size = thunk_type_size(arg_type, 0);
5451     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5452     if (!argptr) {
5453         return -TARGET_EFAULT;
5454     }
5455     arg_type++;
5456     assert(*arg_type == (int)STRUCT_rtentry);
5457     se = struct_entries + *arg_type++;
5458     assert(se->convert[0] == NULL);
5459     /* convert struct here to be able to catch rt_dev string */
5460     field_types = se->field_types;
5461     dst_offsets = se->field_offsets[THUNK_HOST];
5462     src_offsets = se->field_offsets[THUNK_TARGET];
5463     for (i = 0; i < se->nb_fields; i++) {
5464         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5465             assert(*field_types == TYPE_PTRVOID);
5466             target_rt_dev_ptr = argptr + src_offsets[i];
5467             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5468             if (*target_rt_dev_ptr != 0) {
5469                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5470                                                   tswapal(*target_rt_dev_ptr));
5471                 if (!*host_rt_dev_ptr) {
5472                     unlock_user(argptr, arg, 0);
5473                     return -TARGET_EFAULT;
5474                 }
5475             } else {
5476                 *host_rt_dev_ptr = 0;
5477             }
5478             field_types++;
5479             continue;
5480         }
5481         field_types = thunk_convert(buf_temp + dst_offsets[i],
5482                                     argptr + src_offsets[i],
5483                                     field_types, THUNK_HOST);
5484     }
5485     unlock_user(argptr, arg, 0);
5486 
5487     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5488 
5489     assert(host_rt_dev_ptr != NULL);
5490     assert(target_rt_dev_ptr != NULL);
5491     if (*host_rt_dev_ptr != 0) {
5492         unlock_user((void *)*host_rt_dev_ptr,
5493                     *target_rt_dev_ptr, 0);
5494     }
5495     return ret;
5496 }
5497 
5498 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5499                                      int fd, int cmd, abi_long arg)
5500 {
5501     int sig = target_to_host_signal(arg);
5502     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5503 }
5504 
5505 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5506                                     int fd, int cmd, abi_long arg)
5507 {
5508     struct timeval tv;
5509     abi_long ret;
5510 
5511     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5512     if (is_error(ret)) {
5513         return ret;
5514     }
5515 
5516     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5517         if (copy_to_user_timeval(arg, &tv)) {
5518             return -TARGET_EFAULT;
5519         }
5520     } else {
5521         if (copy_to_user_timeval64(arg, &tv)) {
5522             return -TARGET_EFAULT;
5523         }
5524     }
5525 
5526     return ret;
5527 }
5528 
5529 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5530                                       int fd, int cmd, abi_long arg)
5531 {
5532     struct timespec ts;
5533     abi_long ret;
5534 
5535     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5536     if (is_error(ret)) {
5537         return ret;
5538     }
5539 
5540     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5541         if (host_to_target_timespec(arg, &ts)) {
5542             return -TARGET_EFAULT;
5543         }
5544     } else{
5545         if (host_to_target_timespec64(arg, &ts)) {
5546             return -TARGET_EFAULT;
5547         }
5548     }
5549 
5550     return ret;
5551 }
5552 
5553 #ifdef TIOCGPTPEER
5554 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5555                                      int fd, int cmd, abi_long arg)
5556 {
5557     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5558     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5559 }
5560 #endif
5561 
5562 #ifdef HAVE_DRM_H
5563 
5564 static void unlock_drm_version(struct drm_version *host_ver,
5565                                struct target_drm_version *target_ver,
5566                                bool copy)
5567 {
5568     unlock_user(host_ver->name, target_ver->name,
5569                                 copy ? host_ver->name_len : 0);
5570     unlock_user(host_ver->date, target_ver->date,
5571                                 copy ? host_ver->date_len : 0);
5572     unlock_user(host_ver->desc, target_ver->desc,
5573                                 copy ? host_ver->desc_len : 0);
5574 }
5575 
5576 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5577                                           struct target_drm_version *target_ver)
5578 {
5579     memset(host_ver, 0, sizeof(*host_ver));
5580 
5581     __get_user(host_ver->name_len, &target_ver->name_len);
5582     if (host_ver->name_len) {
5583         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5584                                    target_ver->name_len, 0);
5585         if (!host_ver->name) {
5586             return -EFAULT;
5587         }
5588     }
5589 
5590     __get_user(host_ver->date_len, &target_ver->date_len);
5591     if (host_ver->date_len) {
5592         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5593                                    target_ver->date_len, 0);
5594         if (!host_ver->date) {
5595             goto err;
5596         }
5597     }
5598 
5599     __get_user(host_ver->desc_len, &target_ver->desc_len);
5600     if (host_ver->desc_len) {
5601         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5602                                    target_ver->desc_len, 0);
5603         if (!host_ver->desc) {
5604             goto err;
5605         }
5606     }
5607 
5608     return 0;
5609 err:
5610     unlock_drm_version(host_ver, target_ver, false);
5611     return -EFAULT;
5612 }
5613 
5614 static inline void host_to_target_drmversion(
5615                                           struct target_drm_version *target_ver,
5616                                           struct drm_version *host_ver)
5617 {
5618     __put_user(host_ver->version_major, &target_ver->version_major);
5619     __put_user(host_ver->version_minor, &target_ver->version_minor);
5620     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5621     __put_user(host_ver->name_len, &target_ver->name_len);
5622     __put_user(host_ver->date_len, &target_ver->date_len);
5623     __put_user(host_ver->desc_len, &target_ver->desc_len);
5624     unlock_drm_version(host_ver, target_ver, true);
5625 }
5626 
5627 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5628                              int fd, int cmd, abi_long arg)
5629 {
5630     struct drm_version *ver;
5631     struct target_drm_version *target_ver;
5632     abi_long ret;
5633 
5634     switch (ie->host_cmd) {
5635     case DRM_IOCTL_VERSION:
5636         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5637             return -TARGET_EFAULT;
5638         }
5639         ver = (struct drm_version *)buf_temp;
5640         ret = target_to_host_drmversion(ver, target_ver);
5641         if (!is_error(ret)) {
5642             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5643             if (is_error(ret)) {
5644                 unlock_drm_version(ver, target_ver, false);
5645             } else {
5646                 host_to_target_drmversion(target_ver, ver);
5647             }
5648         }
5649         unlock_user_struct(target_ver, arg, 0);
5650         return ret;
5651     }
5652     return -TARGET_ENOSYS;
5653 }
5654 
5655 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5656                                            struct drm_i915_getparam *gparam,
5657                                            int fd, abi_long arg)
5658 {
5659     abi_long ret;
5660     int value;
5661     struct target_drm_i915_getparam *target_gparam;
5662 
5663     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5664         return -TARGET_EFAULT;
5665     }
5666 
5667     __get_user(gparam->param, &target_gparam->param);
5668     gparam->value = &value;
5669     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5670     put_user_s32(value, target_gparam->value);
5671 
5672     unlock_user_struct(target_gparam, arg, 0);
5673     return ret;
5674 }
5675 
5676 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5677                                   int fd, int cmd, abi_long arg)
5678 {
5679     switch (ie->host_cmd) {
5680     case DRM_IOCTL_I915_GETPARAM:
5681         return do_ioctl_drm_i915_getparam(ie,
5682                                           (struct drm_i915_getparam *)buf_temp,
5683                                           fd, arg);
5684     default:
5685         return -TARGET_ENOSYS;
5686     }
5687 }
5688 
5689 #endif
5690 
5691 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5692                                         int fd, int cmd, abi_long arg)
5693 {
5694     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5695     struct tun_filter *target_filter;
5696     char *target_addr;
5697 
5698     assert(ie->access == IOC_W);
5699 
5700     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5701     if (!target_filter) {
5702         return -TARGET_EFAULT;
5703     }
5704     filter->flags = tswap16(target_filter->flags);
5705     filter->count = tswap16(target_filter->count);
5706     unlock_user(target_filter, arg, 0);
5707 
5708     if (filter->count) {
5709         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5710             MAX_STRUCT_SIZE) {
5711             return -TARGET_EFAULT;
5712         }
5713 
5714         target_addr = lock_user(VERIFY_READ,
5715                                 arg + offsetof(struct tun_filter, addr),
5716                                 filter->count * ETH_ALEN, 1);
5717         if (!target_addr) {
5718             return -TARGET_EFAULT;
5719         }
5720         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5721         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5722     }
5723 
5724     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5725 }
5726 
5727 IOCTLEntry ioctl_entries[] = {
5728 #define IOCTL(cmd, access, ...) \
5729     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5730 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5731     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5732 #define IOCTL_IGNORE(cmd) \
5733     { TARGET_ ## cmd, 0, #cmd },
5734 #include "ioctls.h"
5735     { 0, 0, },
5736 };
5737 
5738 /* ??? Implement proper locking for ioctls.  */
5739 /* do_ioctl() Must return target values and target errnos. */
5740 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5741 {
5742     const IOCTLEntry *ie;
5743     const argtype *arg_type;
5744     abi_long ret;
5745     uint8_t buf_temp[MAX_STRUCT_SIZE];
5746     int target_size;
5747     void *argptr;
5748 
5749     ie = ioctl_entries;
5750     for(;;) {
5751         if (ie->target_cmd == 0) {
5752             qemu_log_mask(
5753                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5754             return -TARGET_ENOTTY;
5755         }
5756         if (ie->target_cmd == cmd)
5757             break;
5758         ie++;
5759     }
5760     arg_type = ie->arg_type;
5761     if (ie->do_ioctl) {
5762         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5763     } else if (!ie->host_cmd) {
5764         /* Some architectures define BSD ioctls in their headers
5765            that are not implemented in Linux.  */
5766         return -TARGET_ENOTTY;
5767     }
5768 
5769     switch(arg_type[0]) {
5770     case TYPE_NULL:
5771         /* no argument */
5772         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5773         break;
5774     case TYPE_PTRVOID:
5775     case TYPE_INT:
5776     case TYPE_LONG:
5777     case TYPE_ULONG:
5778         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5779         break;
5780     case TYPE_PTR:
5781         arg_type++;
5782         target_size = thunk_type_size(arg_type, 0);
5783         switch(ie->access) {
5784         case IOC_R:
5785             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5786             if (!is_error(ret)) {
5787                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5788                 if (!argptr)
5789                     return -TARGET_EFAULT;
5790                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5791                 unlock_user(argptr, arg, target_size);
5792             }
5793             break;
5794         case IOC_W:
5795             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5796             if (!argptr)
5797                 return -TARGET_EFAULT;
5798             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5799             unlock_user(argptr, arg, 0);
5800             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5801             break;
5802         default:
5803         case IOC_RW:
5804             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5805             if (!argptr)
5806                 return -TARGET_EFAULT;
5807             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5808             unlock_user(argptr, arg, 0);
5809             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5810             if (!is_error(ret)) {
5811                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5812                 if (!argptr)
5813                     return -TARGET_EFAULT;
5814                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5815                 unlock_user(argptr, arg, target_size);
5816             }
5817             break;
5818         }
5819         break;
5820     default:
5821         qemu_log_mask(LOG_UNIMP,
5822                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5823                       (long)cmd, arg_type[0]);
5824         ret = -TARGET_ENOTTY;
5825         break;
5826     }
5827     return ret;
5828 }
5829 
5830 static const bitmask_transtbl iflag_tbl[] = {
5831         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5832         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5833         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5834         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5835         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5836         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5837         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5838         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5839         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5840         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5841         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5842         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5843         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5844         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5845         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5846         { 0, 0, 0, 0 }
5847 };
5848 
5849 static const bitmask_transtbl oflag_tbl[] = {
5850 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5851 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5852 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5853 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5854 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5855 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5856 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5857 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5858 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5859 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5860 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5861 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5862 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5863 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5864 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5865 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5866 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5867 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5868 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5869 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5870 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5871 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5872 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5873 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5874 	{ 0, 0, 0, 0 }
5875 };
5876 
5877 static const bitmask_transtbl cflag_tbl[] = {
5878 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5879 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5880 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5881 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5882 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5883 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5884 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5885 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5886 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5887 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5888 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5889 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5890 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5891 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5892 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5893 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5894 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5895 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5896 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5897 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5898 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5899 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5900 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5901 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5902 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5903 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5904 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5905 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5906 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5907 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5908 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5909 	{ 0, 0, 0, 0 }
5910 };
5911 
5912 static const bitmask_transtbl lflag_tbl[] = {
5913   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5914   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5915   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5916   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5917   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5918   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5919   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5920   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5921   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5922   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5923   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5924   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5925   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5926   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5927   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5928   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5929   { 0, 0, 0, 0 }
5930 };
5931 
5932 static void target_to_host_termios (void *dst, const void *src)
5933 {
5934     struct host_termios *host = dst;
5935     const struct target_termios *target = src;
5936 
5937     host->c_iflag =
5938         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5939     host->c_oflag =
5940         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5941     host->c_cflag =
5942         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5943     host->c_lflag =
5944         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5945     host->c_line = target->c_line;
5946 
5947     memset(host->c_cc, 0, sizeof(host->c_cc));
5948     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5949     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5950     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5951     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5952     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5953     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5954     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5955     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5956     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5957     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5958     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5959     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5960     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5961     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5962     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5963     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5964     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5965 }
5966 
5967 static void host_to_target_termios (void *dst, const void *src)
5968 {
5969     struct target_termios *target = dst;
5970     const struct host_termios *host = src;
5971 
5972     target->c_iflag =
5973         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5974     target->c_oflag =
5975         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5976     target->c_cflag =
5977         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5978     target->c_lflag =
5979         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5980     target->c_line = host->c_line;
5981 
5982     memset(target->c_cc, 0, sizeof(target->c_cc));
5983     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5984     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5985     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5986     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5987     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5988     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5989     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5990     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5991     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5992     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
5993     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
5994     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
5995     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
5996     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
5997     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
5998     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
5999     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6000 }
6001 
6002 static const StructEntry struct_termios_def = {
6003     .convert = { host_to_target_termios, target_to_host_termios },
6004     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6005     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6006     .print = print_termios,
6007 };
6008 
6009 /* If the host does not provide these bits, they may be safely discarded. */
6010 #ifndef MAP_SYNC
6011 #define MAP_SYNC 0
6012 #endif
6013 #ifndef MAP_UNINITIALIZED
6014 #define MAP_UNINITIALIZED 0
6015 #endif
6016 
6017 static const bitmask_transtbl mmap_flags_tbl[] = {
6018     { TARGET_MAP_TYPE, TARGET_MAP_SHARED, MAP_TYPE, MAP_SHARED },
6019     { TARGET_MAP_TYPE, TARGET_MAP_PRIVATE, MAP_TYPE, MAP_PRIVATE },
6020     { TARGET_MAP_TYPE, TARGET_MAP_SHARED_VALIDATE,
6021       MAP_TYPE, MAP_SHARED_VALIDATE },
6022     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6023     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6024       MAP_ANONYMOUS, MAP_ANONYMOUS },
6025     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6026       MAP_GROWSDOWN, MAP_GROWSDOWN },
6027     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6028       MAP_DENYWRITE, MAP_DENYWRITE },
6029     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6030       MAP_EXECUTABLE, MAP_EXECUTABLE },
6031     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6032     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6033       MAP_NORESERVE, MAP_NORESERVE },
6034     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6035     /* MAP_STACK had been ignored by the kernel for quite some time.
6036        Recognize it for the target insofar as we do not want to pass
6037        it through to the host.  */
6038     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6039     { TARGET_MAP_SYNC, TARGET_MAP_SYNC, MAP_SYNC, MAP_SYNC },
6040     { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
6041     { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
6042     { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
6043       MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
6044     { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
6045       MAP_UNINITIALIZED, MAP_UNINITIALIZED },
6046     { 0, 0, 0, 0 }
6047 };
6048 
6049 /*
6050  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6051  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6052  */
6053 #if defined(TARGET_I386)
6054 
6055 /* NOTE: there is really one LDT for all the threads */
6056 static uint8_t *ldt_table;
6057 
6058 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6059 {
6060     int size;
6061     void *p;
6062 
6063     if (!ldt_table)
6064         return 0;
6065     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6066     if (size > bytecount)
6067         size = bytecount;
6068     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6069     if (!p)
6070         return -TARGET_EFAULT;
6071     /* ??? Should this by byteswapped?  */
6072     memcpy(p, ldt_table, size);
6073     unlock_user(p, ptr, size);
6074     return size;
6075 }
6076 
6077 /* XXX: add locking support */
6078 static abi_long write_ldt(CPUX86State *env,
6079                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6080 {
6081     struct target_modify_ldt_ldt_s ldt_info;
6082     struct target_modify_ldt_ldt_s *target_ldt_info;
6083     int seg_32bit, contents, read_exec_only, limit_in_pages;
6084     int seg_not_present, useable, lm;
6085     uint32_t *lp, entry_1, entry_2;
6086 
6087     if (bytecount != sizeof(ldt_info))
6088         return -TARGET_EINVAL;
6089     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6090         return -TARGET_EFAULT;
6091     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6092     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6093     ldt_info.limit = tswap32(target_ldt_info->limit);
6094     ldt_info.flags = tswap32(target_ldt_info->flags);
6095     unlock_user_struct(target_ldt_info, ptr, 0);
6096 
6097     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6098         return -TARGET_EINVAL;
6099     seg_32bit = ldt_info.flags & 1;
6100     contents = (ldt_info.flags >> 1) & 3;
6101     read_exec_only = (ldt_info.flags >> 3) & 1;
6102     limit_in_pages = (ldt_info.flags >> 4) & 1;
6103     seg_not_present = (ldt_info.flags >> 5) & 1;
6104     useable = (ldt_info.flags >> 6) & 1;
6105 #ifdef TARGET_ABI32
6106     lm = 0;
6107 #else
6108     lm = (ldt_info.flags >> 7) & 1;
6109 #endif
6110     if (contents == 3) {
6111         if (oldmode)
6112             return -TARGET_EINVAL;
6113         if (seg_not_present == 0)
6114             return -TARGET_EINVAL;
6115     }
6116     /* allocate the LDT */
6117     if (!ldt_table) {
6118         env->ldt.base = target_mmap(0,
6119                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6120                                     PROT_READ|PROT_WRITE,
6121                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6122         if (env->ldt.base == -1)
6123             return -TARGET_ENOMEM;
6124         memset(g2h_untagged(env->ldt.base), 0,
6125                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6126         env->ldt.limit = 0xffff;
6127         ldt_table = g2h_untagged(env->ldt.base);
6128     }
6129 
6130     /* NOTE: same code as Linux kernel */
6131     /* Allow LDTs to be cleared by the user. */
6132     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6133         if (oldmode ||
6134             (contents == 0		&&
6135              read_exec_only == 1	&&
6136              seg_32bit == 0		&&
6137              limit_in_pages == 0	&&
6138              seg_not_present == 1	&&
6139              useable == 0 )) {
6140             entry_1 = 0;
6141             entry_2 = 0;
6142             goto install;
6143         }
6144     }
6145 
6146     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6147         (ldt_info.limit & 0x0ffff);
6148     entry_2 = (ldt_info.base_addr & 0xff000000) |
6149         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6150         (ldt_info.limit & 0xf0000) |
6151         ((read_exec_only ^ 1) << 9) |
6152         (contents << 10) |
6153         ((seg_not_present ^ 1) << 15) |
6154         (seg_32bit << 22) |
6155         (limit_in_pages << 23) |
6156         (lm << 21) |
6157         0x7000;
6158     if (!oldmode)
6159         entry_2 |= (useable << 20);
6160 
6161     /* Install the new entry ...  */
6162 install:
6163     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6164     lp[0] = tswap32(entry_1);
6165     lp[1] = tswap32(entry_2);
6166     return 0;
6167 }
6168 
6169 /* specific and weird i386 syscalls */
6170 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6171                               unsigned long bytecount)
6172 {
6173     abi_long ret;
6174 
6175     switch (func) {
6176     case 0:
6177         ret = read_ldt(ptr, bytecount);
6178         break;
6179     case 1:
6180         ret = write_ldt(env, ptr, bytecount, 1);
6181         break;
6182     case 0x11:
6183         ret = write_ldt(env, ptr, bytecount, 0);
6184         break;
6185     default:
6186         ret = -TARGET_ENOSYS;
6187         break;
6188     }
6189     return ret;
6190 }
6191 
6192 #if defined(TARGET_ABI32)
6193 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6194 {
6195     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6196     struct target_modify_ldt_ldt_s ldt_info;
6197     struct target_modify_ldt_ldt_s *target_ldt_info;
6198     int seg_32bit, contents, read_exec_only, limit_in_pages;
6199     int seg_not_present, useable, lm;
6200     uint32_t *lp, entry_1, entry_2;
6201     int i;
6202 
6203     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6204     if (!target_ldt_info)
6205         return -TARGET_EFAULT;
6206     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6207     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6208     ldt_info.limit = tswap32(target_ldt_info->limit);
6209     ldt_info.flags = tswap32(target_ldt_info->flags);
6210     if (ldt_info.entry_number == -1) {
6211         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6212             if (gdt_table[i] == 0) {
6213                 ldt_info.entry_number = i;
6214                 target_ldt_info->entry_number = tswap32(i);
6215                 break;
6216             }
6217         }
6218     }
6219     unlock_user_struct(target_ldt_info, ptr, 1);
6220 
6221     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6222         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6223            return -TARGET_EINVAL;
6224     seg_32bit = ldt_info.flags & 1;
6225     contents = (ldt_info.flags >> 1) & 3;
6226     read_exec_only = (ldt_info.flags >> 3) & 1;
6227     limit_in_pages = (ldt_info.flags >> 4) & 1;
6228     seg_not_present = (ldt_info.flags >> 5) & 1;
6229     useable = (ldt_info.flags >> 6) & 1;
6230 #ifdef TARGET_ABI32
6231     lm = 0;
6232 #else
6233     lm = (ldt_info.flags >> 7) & 1;
6234 #endif
6235 
6236     if (contents == 3) {
6237         if (seg_not_present == 0)
6238             return -TARGET_EINVAL;
6239     }
6240 
6241     /* NOTE: same code as Linux kernel */
6242     /* Allow LDTs to be cleared by the user. */
6243     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6244         if ((contents == 0             &&
6245              read_exec_only == 1       &&
6246              seg_32bit == 0            &&
6247              limit_in_pages == 0       &&
6248              seg_not_present == 1      &&
6249              useable == 0 )) {
6250             entry_1 = 0;
6251             entry_2 = 0;
6252             goto install;
6253         }
6254     }
6255 
6256     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6257         (ldt_info.limit & 0x0ffff);
6258     entry_2 = (ldt_info.base_addr & 0xff000000) |
6259         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6260         (ldt_info.limit & 0xf0000) |
6261         ((read_exec_only ^ 1) << 9) |
6262         (contents << 10) |
6263         ((seg_not_present ^ 1) << 15) |
6264         (seg_32bit << 22) |
6265         (limit_in_pages << 23) |
6266         (useable << 20) |
6267         (lm << 21) |
6268         0x7000;
6269 
6270     /* Install the new entry ...  */
6271 install:
6272     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6273     lp[0] = tswap32(entry_1);
6274     lp[1] = tswap32(entry_2);
6275     return 0;
6276 }
6277 
6278 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6279 {
6280     struct target_modify_ldt_ldt_s *target_ldt_info;
6281     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6282     uint32_t base_addr, limit, flags;
6283     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6284     int seg_not_present, useable, lm;
6285     uint32_t *lp, entry_1, entry_2;
6286 
6287     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6288     if (!target_ldt_info)
6289         return -TARGET_EFAULT;
6290     idx = tswap32(target_ldt_info->entry_number);
6291     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6292         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6293         unlock_user_struct(target_ldt_info, ptr, 1);
6294         return -TARGET_EINVAL;
6295     }
6296     lp = (uint32_t *)(gdt_table + idx);
6297     entry_1 = tswap32(lp[0]);
6298     entry_2 = tswap32(lp[1]);
6299 
6300     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6301     contents = (entry_2 >> 10) & 3;
6302     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6303     seg_32bit = (entry_2 >> 22) & 1;
6304     limit_in_pages = (entry_2 >> 23) & 1;
6305     useable = (entry_2 >> 20) & 1;
6306 #ifdef TARGET_ABI32
6307     lm = 0;
6308 #else
6309     lm = (entry_2 >> 21) & 1;
6310 #endif
6311     flags = (seg_32bit << 0) | (contents << 1) |
6312         (read_exec_only << 3) | (limit_in_pages << 4) |
6313         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6314     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6315     base_addr = (entry_1 >> 16) |
6316         (entry_2 & 0xff000000) |
6317         ((entry_2 & 0xff) << 16);
6318     target_ldt_info->base_addr = tswapal(base_addr);
6319     target_ldt_info->limit = tswap32(limit);
6320     target_ldt_info->flags = tswap32(flags);
6321     unlock_user_struct(target_ldt_info, ptr, 1);
6322     return 0;
6323 }
6324 
6325 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6326 {
6327     return -TARGET_ENOSYS;
6328 }
6329 #else
6330 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6331 {
6332     abi_long ret = 0;
6333     abi_ulong val;
6334     int idx;
6335 
6336     switch(code) {
6337     case TARGET_ARCH_SET_GS:
6338     case TARGET_ARCH_SET_FS:
6339         if (code == TARGET_ARCH_SET_GS)
6340             idx = R_GS;
6341         else
6342             idx = R_FS;
6343         cpu_x86_load_seg(env, idx, 0);
6344         env->segs[idx].base = addr;
6345         break;
6346     case TARGET_ARCH_GET_GS:
6347     case TARGET_ARCH_GET_FS:
6348         if (code == TARGET_ARCH_GET_GS)
6349             idx = R_GS;
6350         else
6351             idx = R_FS;
6352         val = env->segs[idx].base;
6353         if (put_user(val, addr, abi_ulong))
6354             ret = -TARGET_EFAULT;
6355         break;
6356     default:
6357         ret = -TARGET_EINVAL;
6358         break;
6359     }
6360     return ret;
6361 }
6362 #endif /* defined(TARGET_ABI32 */
6363 #endif /* defined(TARGET_I386) */
6364 
6365 /*
6366  * These constants are generic.  Supply any that are missing from the host.
6367  */
6368 #ifndef PR_SET_NAME
6369 # define PR_SET_NAME    15
6370 # define PR_GET_NAME    16
6371 #endif
6372 #ifndef PR_SET_FP_MODE
6373 # define PR_SET_FP_MODE 45
6374 # define PR_GET_FP_MODE 46
6375 # define PR_FP_MODE_FR   (1 << 0)
6376 # define PR_FP_MODE_FRE  (1 << 1)
6377 #endif
6378 #ifndef PR_SVE_SET_VL
6379 # define PR_SVE_SET_VL  50
6380 # define PR_SVE_GET_VL  51
6381 # define PR_SVE_VL_LEN_MASK  0xffff
6382 # define PR_SVE_VL_INHERIT   (1 << 17)
6383 #endif
6384 #ifndef PR_PAC_RESET_KEYS
6385 # define PR_PAC_RESET_KEYS  54
6386 # define PR_PAC_APIAKEY   (1 << 0)
6387 # define PR_PAC_APIBKEY   (1 << 1)
6388 # define PR_PAC_APDAKEY   (1 << 2)
6389 # define PR_PAC_APDBKEY   (1 << 3)
6390 # define PR_PAC_APGAKEY   (1 << 4)
6391 #endif
6392 #ifndef PR_SET_TAGGED_ADDR_CTRL
6393 # define PR_SET_TAGGED_ADDR_CTRL 55
6394 # define PR_GET_TAGGED_ADDR_CTRL 56
6395 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6396 #endif
6397 #ifndef PR_MTE_TCF_SHIFT
6398 # define PR_MTE_TCF_SHIFT       1
6399 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6400 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6401 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6402 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6403 # define PR_MTE_TAG_SHIFT       3
6404 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6405 #endif
6406 #ifndef PR_SET_IO_FLUSHER
6407 # define PR_SET_IO_FLUSHER 57
6408 # define PR_GET_IO_FLUSHER 58
6409 #endif
6410 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6411 # define PR_SET_SYSCALL_USER_DISPATCH 59
6412 #endif
6413 #ifndef PR_SME_SET_VL
6414 # define PR_SME_SET_VL  63
6415 # define PR_SME_GET_VL  64
6416 # define PR_SME_VL_LEN_MASK  0xffff
6417 # define PR_SME_VL_INHERIT   (1 << 17)
6418 #endif
6419 
6420 #include "target_prctl.h"
6421 
6422 static abi_long do_prctl_inval0(CPUArchState *env)
6423 {
6424     return -TARGET_EINVAL;
6425 }
6426 
6427 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6428 {
6429     return -TARGET_EINVAL;
6430 }
6431 
6432 #ifndef do_prctl_get_fp_mode
6433 #define do_prctl_get_fp_mode do_prctl_inval0
6434 #endif
6435 #ifndef do_prctl_set_fp_mode
6436 #define do_prctl_set_fp_mode do_prctl_inval1
6437 #endif
6438 #ifndef do_prctl_sve_get_vl
6439 #define do_prctl_sve_get_vl do_prctl_inval0
6440 #endif
6441 #ifndef do_prctl_sve_set_vl
6442 #define do_prctl_sve_set_vl do_prctl_inval1
6443 #endif
6444 #ifndef do_prctl_reset_keys
6445 #define do_prctl_reset_keys do_prctl_inval1
6446 #endif
6447 #ifndef do_prctl_set_tagged_addr_ctrl
6448 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6449 #endif
6450 #ifndef do_prctl_get_tagged_addr_ctrl
6451 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6452 #endif
6453 #ifndef do_prctl_get_unalign
6454 #define do_prctl_get_unalign do_prctl_inval1
6455 #endif
6456 #ifndef do_prctl_set_unalign
6457 #define do_prctl_set_unalign do_prctl_inval1
6458 #endif
6459 #ifndef do_prctl_sme_get_vl
6460 #define do_prctl_sme_get_vl do_prctl_inval0
6461 #endif
6462 #ifndef do_prctl_sme_set_vl
6463 #define do_prctl_sme_set_vl do_prctl_inval1
6464 #endif
6465 
6466 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6467                          abi_long arg3, abi_long arg4, abi_long arg5)
6468 {
6469     abi_long ret;
6470 
6471     switch (option) {
6472     case PR_GET_PDEATHSIG:
6473         {
6474             int deathsig;
6475             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6476                                   arg3, arg4, arg5));
6477             if (!is_error(ret) &&
6478                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6479                 return -TARGET_EFAULT;
6480             }
6481             return ret;
6482         }
6483     case PR_SET_PDEATHSIG:
6484         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6485                                arg3, arg4, arg5));
6486     case PR_GET_NAME:
6487         {
6488             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6489             if (!name) {
6490                 return -TARGET_EFAULT;
6491             }
6492             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6493                                   arg3, arg4, arg5));
6494             unlock_user(name, arg2, 16);
6495             return ret;
6496         }
6497     case PR_SET_NAME:
6498         {
6499             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6500             if (!name) {
6501                 return -TARGET_EFAULT;
6502             }
6503             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6504                                   arg3, arg4, arg5));
6505             unlock_user(name, arg2, 0);
6506             return ret;
6507         }
6508     case PR_GET_FP_MODE:
6509         return do_prctl_get_fp_mode(env);
6510     case PR_SET_FP_MODE:
6511         return do_prctl_set_fp_mode(env, arg2);
6512     case PR_SVE_GET_VL:
6513         return do_prctl_sve_get_vl(env);
6514     case PR_SVE_SET_VL:
6515         return do_prctl_sve_set_vl(env, arg2);
6516     case PR_SME_GET_VL:
6517         return do_prctl_sme_get_vl(env);
6518     case PR_SME_SET_VL:
6519         return do_prctl_sme_set_vl(env, arg2);
6520     case PR_PAC_RESET_KEYS:
6521         if (arg3 || arg4 || arg5) {
6522             return -TARGET_EINVAL;
6523         }
6524         return do_prctl_reset_keys(env, arg2);
6525     case PR_SET_TAGGED_ADDR_CTRL:
6526         if (arg3 || arg4 || arg5) {
6527             return -TARGET_EINVAL;
6528         }
6529         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6530     case PR_GET_TAGGED_ADDR_CTRL:
6531         if (arg2 || arg3 || arg4 || arg5) {
6532             return -TARGET_EINVAL;
6533         }
6534         return do_prctl_get_tagged_addr_ctrl(env);
6535 
6536     case PR_GET_UNALIGN:
6537         return do_prctl_get_unalign(env, arg2);
6538     case PR_SET_UNALIGN:
6539         return do_prctl_set_unalign(env, arg2);
6540 
6541     case PR_CAP_AMBIENT:
6542     case PR_CAPBSET_READ:
6543     case PR_CAPBSET_DROP:
6544     case PR_GET_DUMPABLE:
6545     case PR_SET_DUMPABLE:
6546     case PR_GET_KEEPCAPS:
6547     case PR_SET_KEEPCAPS:
6548     case PR_GET_SECUREBITS:
6549     case PR_SET_SECUREBITS:
6550     case PR_GET_TIMING:
6551     case PR_SET_TIMING:
6552     case PR_GET_TIMERSLACK:
6553     case PR_SET_TIMERSLACK:
6554     case PR_MCE_KILL:
6555     case PR_MCE_KILL_GET:
6556     case PR_GET_NO_NEW_PRIVS:
6557     case PR_SET_NO_NEW_PRIVS:
6558     case PR_GET_IO_FLUSHER:
6559     case PR_SET_IO_FLUSHER:
6560         /* Some prctl options have no pointer arguments and we can pass on. */
6561         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6562 
6563     case PR_GET_CHILD_SUBREAPER:
6564     case PR_SET_CHILD_SUBREAPER:
6565     case PR_GET_SPECULATION_CTRL:
6566     case PR_SET_SPECULATION_CTRL:
6567     case PR_GET_TID_ADDRESS:
6568         /* TODO */
6569         return -TARGET_EINVAL;
6570 
6571     case PR_GET_FPEXC:
6572     case PR_SET_FPEXC:
6573         /* Was used for SPE on PowerPC. */
6574         return -TARGET_EINVAL;
6575 
6576     case PR_GET_ENDIAN:
6577     case PR_SET_ENDIAN:
6578     case PR_GET_FPEMU:
6579     case PR_SET_FPEMU:
6580     case PR_SET_MM:
6581     case PR_GET_SECCOMP:
6582     case PR_SET_SECCOMP:
6583     case PR_SET_SYSCALL_USER_DISPATCH:
6584     case PR_GET_THP_DISABLE:
6585     case PR_SET_THP_DISABLE:
6586     case PR_GET_TSC:
6587     case PR_SET_TSC:
6588         /* Disable to prevent the target disabling stuff we need. */
6589         return -TARGET_EINVAL;
6590 
6591     default:
6592         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6593                       option);
6594         return -TARGET_EINVAL;
6595     }
6596 }
6597 
6598 #define NEW_STACK_SIZE 0x40000
6599 
6600 
6601 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6602 typedef struct {
6603     CPUArchState *env;
6604     pthread_mutex_t mutex;
6605     pthread_cond_t cond;
6606     pthread_t thread;
6607     uint32_t tid;
6608     abi_ulong child_tidptr;
6609     abi_ulong parent_tidptr;
6610     sigset_t sigmask;
6611 } new_thread_info;
6612 
6613 static void *clone_func(void *arg)
6614 {
6615     new_thread_info *info = arg;
6616     CPUArchState *env;
6617     CPUState *cpu;
6618     TaskState *ts;
6619 
6620     rcu_register_thread();
6621     tcg_register_thread();
6622     env = info->env;
6623     cpu = env_cpu(env);
6624     thread_cpu = cpu;
6625     ts = (TaskState *)cpu->opaque;
6626     info->tid = sys_gettid();
6627     task_settid(ts);
6628     if (info->child_tidptr)
6629         put_user_u32(info->tid, info->child_tidptr);
6630     if (info->parent_tidptr)
6631         put_user_u32(info->tid, info->parent_tidptr);
6632     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6633     /* Enable signals.  */
6634     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6635     /* Signal to the parent that we're ready.  */
6636     pthread_mutex_lock(&info->mutex);
6637     pthread_cond_broadcast(&info->cond);
6638     pthread_mutex_unlock(&info->mutex);
6639     /* Wait until the parent has finished initializing the tls state.  */
6640     pthread_mutex_lock(&clone_lock);
6641     pthread_mutex_unlock(&clone_lock);
6642     cpu_loop(env);
6643     /* never exits */
6644     return NULL;
6645 }
6646 
6647 /* do_fork() Must return host values and target errnos (unlike most
6648    do_*() functions). */
6649 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6650                    abi_ulong parent_tidptr, target_ulong newtls,
6651                    abi_ulong child_tidptr)
6652 {
6653     CPUState *cpu = env_cpu(env);
6654     int ret;
6655     TaskState *ts;
6656     CPUState *new_cpu;
6657     CPUArchState *new_env;
6658     sigset_t sigmask;
6659 
6660     flags &= ~CLONE_IGNORED_FLAGS;
6661 
6662     /* Emulate vfork() with fork() */
6663     if (flags & CLONE_VFORK)
6664         flags &= ~(CLONE_VFORK | CLONE_VM);
6665 
6666     if (flags & CLONE_VM) {
6667         TaskState *parent_ts = (TaskState *)cpu->opaque;
6668         new_thread_info info;
6669         pthread_attr_t attr;
6670 
6671         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6672             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6673             return -TARGET_EINVAL;
6674         }
6675 
6676         ts = g_new0(TaskState, 1);
6677         init_task_state(ts);
6678 
6679         /* Grab a mutex so that thread setup appears atomic.  */
6680         pthread_mutex_lock(&clone_lock);
6681 
6682         /*
6683          * If this is our first additional thread, we need to ensure we
6684          * generate code for parallel execution and flush old translations.
6685          * Do this now so that the copy gets CF_PARALLEL too.
6686          */
6687         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6688             cpu->tcg_cflags |= CF_PARALLEL;
6689             tb_flush(cpu);
6690         }
6691 
6692         /* we create a new CPU instance. */
6693         new_env = cpu_copy(env);
6694         /* Init regs that differ from the parent.  */
6695         cpu_clone_regs_child(new_env, newsp, flags);
6696         cpu_clone_regs_parent(env, flags);
6697         new_cpu = env_cpu(new_env);
6698         new_cpu->opaque = ts;
6699         ts->bprm = parent_ts->bprm;
6700         ts->info = parent_ts->info;
6701         ts->signal_mask = parent_ts->signal_mask;
6702 
6703         if (flags & CLONE_CHILD_CLEARTID) {
6704             ts->child_tidptr = child_tidptr;
6705         }
6706 
6707         if (flags & CLONE_SETTLS) {
6708             cpu_set_tls (new_env, newtls);
6709         }
6710 
6711         memset(&info, 0, sizeof(info));
6712         pthread_mutex_init(&info.mutex, NULL);
6713         pthread_mutex_lock(&info.mutex);
6714         pthread_cond_init(&info.cond, NULL);
6715         info.env = new_env;
6716         if (flags & CLONE_CHILD_SETTID) {
6717             info.child_tidptr = child_tidptr;
6718         }
6719         if (flags & CLONE_PARENT_SETTID) {
6720             info.parent_tidptr = parent_tidptr;
6721         }
6722 
6723         ret = pthread_attr_init(&attr);
6724         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6725         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6726         /* It is not safe to deliver signals until the child has finished
6727            initializing, so temporarily block all signals.  */
6728         sigfillset(&sigmask);
6729         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6730         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6731 
6732         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6733         /* TODO: Free new CPU state if thread creation failed.  */
6734 
6735         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6736         pthread_attr_destroy(&attr);
6737         if (ret == 0) {
6738             /* Wait for the child to initialize.  */
6739             pthread_cond_wait(&info.cond, &info.mutex);
6740             ret = info.tid;
6741         } else {
6742             ret = -1;
6743         }
6744         pthread_mutex_unlock(&info.mutex);
6745         pthread_cond_destroy(&info.cond);
6746         pthread_mutex_destroy(&info.mutex);
6747         pthread_mutex_unlock(&clone_lock);
6748     } else {
6749         /* if no CLONE_VM, we consider it is a fork */
6750         if (flags & CLONE_INVALID_FORK_FLAGS) {
6751             return -TARGET_EINVAL;
6752         }
6753 
6754         /* We can't support custom termination signals */
6755         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6756             return -TARGET_EINVAL;
6757         }
6758 
6759 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6760         if (flags & CLONE_PIDFD) {
6761             return -TARGET_EINVAL;
6762         }
6763 #endif
6764 
6765         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6766         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6767             return -TARGET_EINVAL;
6768         }
6769 
6770         if (block_signals()) {
6771             return -QEMU_ERESTARTSYS;
6772         }
6773 
6774         fork_start();
6775         ret = fork();
6776         if (ret == 0) {
6777             /* Child Process.  */
6778             cpu_clone_regs_child(env, newsp, flags);
6779             fork_end(1);
6780             /* There is a race condition here.  The parent process could
6781                theoretically read the TID in the child process before the child
6782                tid is set.  This would require using either ptrace
6783                (not implemented) or having *_tidptr to point at a shared memory
6784                mapping.  We can't repeat the spinlock hack used above because
6785                the child process gets its own copy of the lock.  */
6786             if (flags & CLONE_CHILD_SETTID)
6787                 put_user_u32(sys_gettid(), child_tidptr);
6788             if (flags & CLONE_PARENT_SETTID)
6789                 put_user_u32(sys_gettid(), parent_tidptr);
6790             ts = (TaskState *)cpu->opaque;
6791             if (flags & CLONE_SETTLS)
6792                 cpu_set_tls (env, newtls);
6793             if (flags & CLONE_CHILD_CLEARTID)
6794                 ts->child_tidptr = child_tidptr;
6795         } else {
6796             cpu_clone_regs_parent(env, flags);
6797             if (flags & CLONE_PIDFD) {
6798                 int pid_fd = 0;
6799 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6800                 int pid_child = ret;
6801                 pid_fd = pidfd_open(pid_child, 0);
6802                 if (pid_fd >= 0) {
6803                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6804                                                | FD_CLOEXEC);
6805                 } else {
6806                         pid_fd = 0;
6807                 }
6808 #endif
6809                 put_user_u32(pid_fd, parent_tidptr);
6810                 }
6811             fork_end(0);
6812         }
6813         g_assert(!cpu_in_exclusive_context(cpu));
6814     }
6815     return ret;
6816 }
6817 
6818 /* warning : doesn't handle linux specific flags... */
6819 static int target_to_host_fcntl_cmd(int cmd)
6820 {
6821     int ret;
6822 
6823     switch(cmd) {
6824     case TARGET_F_DUPFD:
6825     case TARGET_F_GETFD:
6826     case TARGET_F_SETFD:
6827     case TARGET_F_GETFL:
6828     case TARGET_F_SETFL:
6829     case TARGET_F_OFD_GETLK:
6830     case TARGET_F_OFD_SETLK:
6831     case TARGET_F_OFD_SETLKW:
6832         ret = cmd;
6833         break;
6834     case TARGET_F_GETLK:
6835         ret = F_GETLK64;
6836         break;
6837     case TARGET_F_SETLK:
6838         ret = F_SETLK64;
6839         break;
6840     case TARGET_F_SETLKW:
6841         ret = F_SETLKW64;
6842         break;
6843     case TARGET_F_GETOWN:
6844         ret = F_GETOWN;
6845         break;
6846     case TARGET_F_SETOWN:
6847         ret = F_SETOWN;
6848         break;
6849     case TARGET_F_GETSIG:
6850         ret = F_GETSIG;
6851         break;
6852     case TARGET_F_SETSIG:
6853         ret = F_SETSIG;
6854         break;
6855 #if TARGET_ABI_BITS == 32
6856     case TARGET_F_GETLK64:
6857         ret = F_GETLK64;
6858         break;
6859     case TARGET_F_SETLK64:
6860         ret = F_SETLK64;
6861         break;
6862     case TARGET_F_SETLKW64:
6863         ret = F_SETLKW64;
6864         break;
6865 #endif
6866     case TARGET_F_SETLEASE:
6867         ret = F_SETLEASE;
6868         break;
6869     case TARGET_F_GETLEASE:
6870         ret = F_GETLEASE;
6871         break;
6872 #ifdef F_DUPFD_CLOEXEC
6873     case TARGET_F_DUPFD_CLOEXEC:
6874         ret = F_DUPFD_CLOEXEC;
6875         break;
6876 #endif
6877     case TARGET_F_NOTIFY:
6878         ret = F_NOTIFY;
6879         break;
6880 #ifdef F_GETOWN_EX
6881     case TARGET_F_GETOWN_EX:
6882         ret = F_GETOWN_EX;
6883         break;
6884 #endif
6885 #ifdef F_SETOWN_EX
6886     case TARGET_F_SETOWN_EX:
6887         ret = F_SETOWN_EX;
6888         break;
6889 #endif
6890 #ifdef F_SETPIPE_SZ
6891     case TARGET_F_SETPIPE_SZ:
6892         ret = F_SETPIPE_SZ;
6893         break;
6894     case TARGET_F_GETPIPE_SZ:
6895         ret = F_GETPIPE_SZ;
6896         break;
6897 #endif
6898 #ifdef F_ADD_SEALS
6899     case TARGET_F_ADD_SEALS:
6900         ret = F_ADD_SEALS;
6901         break;
6902     case TARGET_F_GET_SEALS:
6903         ret = F_GET_SEALS;
6904         break;
6905 #endif
6906     default:
6907         ret = -TARGET_EINVAL;
6908         break;
6909     }
6910 
6911 #if defined(__powerpc64__)
6912     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6913      * is not supported by kernel. The glibc fcntl call actually adjusts
6914      * them to 5, 6 and 7 before making the syscall(). Since we make the
6915      * syscall directly, adjust to what is supported by the kernel.
6916      */
6917     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6918         ret -= F_GETLK64 - 5;
6919     }
6920 #endif
6921 
6922     return ret;
6923 }
6924 
6925 #define FLOCK_TRANSTBL \
6926     switch (type) { \
6927     TRANSTBL_CONVERT(F_RDLCK); \
6928     TRANSTBL_CONVERT(F_WRLCK); \
6929     TRANSTBL_CONVERT(F_UNLCK); \
6930     }
6931 
6932 static int target_to_host_flock(int type)
6933 {
6934 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6935     FLOCK_TRANSTBL
6936 #undef  TRANSTBL_CONVERT
6937     return -TARGET_EINVAL;
6938 }
6939 
6940 static int host_to_target_flock(int type)
6941 {
6942 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6943     FLOCK_TRANSTBL
6944 #undef  TRANSTBL_CONVERT
6945     /* if we don't know how to convert the value coming
6946      * from the host we copy to the target field as-is
6947      */
6948     return type;
6949 }
6950 
6951 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6952                                             abi_ulong target_flock_addr)
6953 {
6954     struct target_flock *target_fl;
6955     int l_type;
6956 
6957     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6958         return -TARGET_EFAULT;
6959     }
6960 
6961     __get_user(l_type, &target_fl->l_type);
6962     l_type = target_to_host_flock(l_type);
6963     if (l_type < 0) {
6964         return l_type;
6965     }
6966     fl->l_type = l_type;
6967     __get_user(fl->l_whence, &target_fl->l_whence);
6968     __get_user(fl->l_start, &target_fl->l_start);
6969     __get_user(fl->l_len, &target_fl->l_len);
6970     __get_user(fl->l_pid, &target_fl->l_pid);
6971     unlock_user_struct(target_fl, target_flock_addr, 0);
6972     return 0;
6973 }
6974 
6975 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6976                                           const struct flock64 *fl)
6977 {
6978     struct target_flock *target_fl;
6979     short l_type;
6980 
6981     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6982         return -TARGET_EFAULT;
6983     }
6984 
6985     l_type = host_to_target_flock(fl->l_type);
6986     __put_user(l_type, &target_fl->l_type);
6987     __put_user(fl->l_whence, &target_fl->l_whence);
6988     __put_user(fl->l_start, &target_fl->l_start);
6989     __put_user(fl->l_len, &target_fl->l_len);
6990     __put_user(fl->l_pid, &target_fl->l_pid);
6991     unlock_user_struct(target_fl, target_flock_addr, 1);
6992     return 0;
6993 }
6994 
6995 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6996 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6997 
6998 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6999 struct target_oabi_flock64 {
7000     abi_short l_type;
7001     abi_short l_whence;
7002     abi_llong l_start;
7003     abi_llong l_len;
7004     abi_int   l_pid;
7005 } QEMU_PACKED;
7006 
7007 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
7008                                                    abi_ulong target_flock_addr)
7009 {
7010     struct target_oabi_flock64 *target_fl;
7011     int l_type;
7012 
7013     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7014         return -TARGET_EFAULT;
7015     }
7016 
7017     __get_user(l_type, &target_fl->l_type);
7018     l_type = target_to_host_flock(l_type);
7019     if (l_type < 0) {
7020         return l_type;
7021     }
7022     fl->l_type = l_type;
7023     __get_user(fl->l_whence, &target_fl->l_whence);
7024     __get_user(fl->l_start, &target_fl->l_start);
7025     __get_user(fl->l_len, &target_fl->l_len);
7026     __get_user(fl->l_pid, &target_fl->l_pid);
7027     unlock_user_struct(target_fl, target_flock_addr, 0);
7028     return 0;
7029 }
7030 
7031 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7032                                                  const struct flock64 *fl)
7033 {
7034     struct target_oabi_flock64 *target_fl;
7035     short l_type;
7036 
7037     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7038         return -TARGET_EFAULT;
7039     }
7040 
7041     l_type = host_to_target_flock(fl->l_type);
7042     __put_user(l_type, &target_fl->l_type);
7043     __put_user(fl->l_whence, &target_fl->l_whence);
7044     __put_user(fl->l_start, &target_fl->l_start);
7045     __put_user(fl->l_len, &target_fl->l_len);
7046     __put_user(fl->l_pid, &target_fl->l_pid);
7047     unlock_user_struct(target_fl, target_flock_addr, 1);
7048     return 0;
7049 }
7050 #endif
7051 
7052 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7053                                               abi_ulong target_flock_addr)
7054 {
7055     struct target_flock64 *target_fl;
7056     int l_type;
7057 
7058     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7059         return -TARGET_EFAULT;
7060     }
7061 
7062     __get_user(l_type, &target_fl->l_type);
7063     l_type = target_to_host_flock(l_type);
7064     if (l_type < 0) {
7065         return l_type;
7066     }
7067     fl->l_type = l_type;
7068     __get_user(fl->l_whence, &target_fl->l_whence);
7069     __get_user(fl->l_start, &target_fl->l_start);
7070     __get_user(fl->l_len, &target_fl->l_len);
7071     __get_user(fl->l_pid, &target_fl->l_pid);
7072     unlock_user_struct(target_fl, target_flock_addr, 0);
7073     return 0;
7074 }
7075 
7076 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7077                                             const struct flock64 *fl)
7078 {
7079     struct target_flock64 *target_fl;
7080     short l_type;
7081 
7082     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7083         return -TARGET_EFAULT;
7084     }
7085 
7086     l_type = host_to_target_flock(fl->l_type);
7087     __put_user(l_type, &target_fl->l_type);
7088     __put_user(fl->l_whence, &target_fl->l_whence);
7089     __put_user(fl->l_start, &target_fl->l_start);
7090     __put_user(fl->l_len, &target_fl->l_len);
7091     __put_user(fl->l_pid, &target_fl->l_pid);
7092     unlock_user_struct(target_fl, target_flock_addr, 1);
7093     return 0;
7094 }
7095 
7096 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7097 {
7098     struct flock64 fl64;
7099 #ifdef F_GETOWN_EX
7100     struct f_owner_ex fox;
7101     struct target_f_owner_ex *target_fox;
7102 #endif
7103     abi_long ret;
7104     int host_cmd = target_to_host_fcntl_cmd(cmd);
7105 
7106     if (host_cmd == -TARGET_EINVAL)
7107 	    return host_cmd;
7108 
7109     switch(cmd) {
7110     case TARGET_F_GETLK:
7111         ret = copy_from_user_flock(&fl64, arg);
7112         if (ret) {
7113             return ret;
7114         }
7115         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7116         if (ret == 0) {
7117             ret = copy_to_user_flock(arg, &fl64);
7118         }
7119         break;
7120 
7121     case TARGET_F_SETLK:
7122     case TARGET_F_SETLKW:
7123         ret = copy_from_user_flock(&fl64, arg);
7124         if (ret) {
7125             return ret;
7126         }
7127         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7128         break;
7129 
7130     case TARGET_F_GETLK64:
7131     case TARGET_F_OFD_GETLK:
7132         ret = copy_from_user_flock64(&fl64, arg);
7133         if (ret) {
7134             return ret;
7135         }
7136         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7137         if (ret == 0) {
7138             ret = copy_to_user_flock64(arg, &fl64);
7139         }
7140         break;
7141     case TARGET_F_SETLK64:
7142     case TARGET_F_SETLKW64:
7143     case TARGET_F_OFD_SETLK:
7144     case TARGET_F_OFD_SETLKW:
7145         ret = copy_from_user_flock64(&fl64, arg);
7146         if (ret) {
7147             return ret;
7148         }
7149         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7150         break;
7151 
7152     case TARGET_F_GETFL:
7153         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7154         if (ret >= 0) {
7155             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7156             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7157             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7158                 ret |= TARGET_O_LARGEFILE;
7159             }
7160         }
7161         break;
7162 
7163     case TARGET_F_SETFL:
7164         ret = get_errno(safe_fcntl(fd, host_cmd,
7165                                    target_to_host_bitmask(arg,
7166                                                           fcntl_flags_tbl)));
7167         break;
7168 
7169 #ifdef F_GETOWN_EX
7170     case TARGET_F_GETOWN_EX:
7171         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7172         if (ret >= 0) {
7173             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7174                 return -TARGET_EFAULT;
7175             target_fox->type = tswap32(fox.type);
7176             target_fox->pid = tswap32(fox.pid);
7177             unlock_user_struct(target_fox, arg, 1);
7178         }
7179         break;
7180 #endif
7181 
7182 #ifdef F_SETOWN_EX
7183     case TARGET_F_SETOWN_EX:
7184         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7185             return -TARGET_EFAULT;
7186         fox.type = tswap32(target_fox->type);
7187         fox.pid = tswap32(target_fox->pid);
7188         unlock_user_struct(target_fox, arg, 0);
7189         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7190         break;
7191 #endif
7192 
7193     case TARGET_F_SETSIG:
7194         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7195         break;
7196 
7197     case TARGET_F_GETSIG:
7198         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7199         break;
7200 
7201     case TARGET_F_SETOWN:
7202     case TARGET_F_GETOWN:
7203     case TARGET_F_SETLEASE:
7204     case TARGET_F_GETLEASE:
7205     case TARGET_F_SETPIPE_SZ:
7206     case TARGET_F_GETPIPE_SZ:
7207     case TARGET_F_ADD_SEALS:
7208     case TARGET_F_GET_SEALS:
7209         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7210         break;
7211 
7212     default:
7213         ret = get_errno(safe_fcntl(fd, cmd, arg));
7214         break;
7215     }
7216     return ret;
7217 }
7218 
7219 #ifdef USE_UID16
7220 
7221 static inline int high2lowuid(int uid)
7222 {
7223     if (uid > 65535)
7224         return 65534;
7225     else
7226         return uid;
7227 }
7228 
7229 static inline int high2lowgid(int gid)
7230 {
7231     if (gid > 65535)
7232         return 65534;
7233     else
7234         return gid;
7235 }
7236 
7237 static inline int low2highuid(int uid)
7238 {
7239     if ((int16_t)uid == -1)
7240         return -1;
7241     else
7242         return uid;
7243 }
7244 
7245 static inline int low2highgid(int gid)
7246 {
7247     if ((int16_t)gid == -1)
7248         return -1;
7249     else
7250         return gid;
7251 }
7252 static inline int tswapid(int id)
7253 {
7254     return tswap16(id);
7255 }
7256 
7257 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7258 
7259 #else /* !USE_UID16 */
7260 static inline int high2lowuid(int uid)
7261 {
7262     return uid;
7263 }
7264 static inline int high2lowgid(int gid)
7265 {
7266     return gid;
7267 }
7268 static inline int low2highuid(int uid)
7269 {
7270     return uid;
7271 }
7272 static inline int low2highgid(int gid)
7273 {
7274     return gid;
7275 }
7276 static inline int tswapid(int id)
7277 {
7278     return tswap32(id);
7279 }
7280 
7281 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7282 
7283 #endif /* USE_UID16 */
7284 
7285 /* We must do direct syscalls for setting UID/GID, because we want to
7286  * implement the Linux system call semantics of "change only for this thread",
7287  * not the libc/POSIX semantics of "change for all threads in process".
7288  * (See http://ewontfix.com/17/ for more details.)
7289  * We use the 32-bit version of the syscalls if present; if it is not
7290  * then either the host architecture supports 32-bit UIDs natively with
7291  * the standard syscall, or the 16-bit UID is the best we can do.
7292  */
7293 #ifdef __NR_setuid32
7294 #define __NR_sys_setuid __NR_setuid32
7295 #else
7296 #define __NR_sys_setuid __NR_setuid
7297 #endif
7298 #ifdef __NR_setgid32
7299 #define __NR_sys_setgid __NR_setgid32
7300 #else
7301 #define __NR_sys_setgid __NR_setgid
7302 #endif
7303 #ifdef __NR_setresuid32
7304 #define __NR_sys_setresuid __NR_setresuid32
7305 #else
7306 #define __NR_sys_setresuid __NR_setresuid
7307 #endif
7308 #ifdef __NR_setresgid32
7309 #define __NR_sys_setresgid __NR_setresgid32
7310 #else
7311 #define __NR_sys_setresgid __NR_setresgid
7312 #endif
7313 
7314 _syscall1(int, sys_setuid, uid_t, uid)
7315 _syscall1(int, sys_setgid, gid_t, gid)
7316 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7317 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7318 
7319 void syscall_init(void)
7320 {
7321     IOCTLEntry *ie;
7322     const argtype *arg_type;
7323     int size;
7324 
7325     thunk_init(STRUCT_MAX);
7326 
7327 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7328 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7329 #include "syscall_types.h"
7330 #undef STRUCT
7331 #undef STRUCT_SPECIAL
7332 
7333     /* we patch the ioctl size if necessary. We rely on the fact that
7334        no ioctl has all the bits at '1' in the size field */
7335     ie = ioctl_entries;
7336     while (ie->target_cmd != 0) {
7337         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7338             TARGET_IOC_SIZEMASK) {
7339             arg_type = ie->arg_type;
7340             if (arg_type[0] != TYPE_PTR) {
7341                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7342                         ie->target_cmd);
7343                 exit(1);
7344             }
7345             arg_type++;
7346             size = thunk_type_size(arg_type, 0);
7347             ie->target_cmd = (ie->target_cmd &
7348                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7349                 (size << TARGET_IOC_SIZESHIFT);
7350         }
7351 
7352         /* automatic consistency check if same arch */
7353 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7354     (defined(__x86_64__) && defined(TARGET_X86_64))
7355         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7356             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7357                     ie->name, ie->target_cmd, ie->host_cmd);
7358         }
7359 #endif
7360         ie++;
7361     }
7362 }
7363 
7364 #ifdef TARGET_NR_truncate64
7365 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7366                                          abi_long arg2,
7367                                          abi_long arg3,
7368                                          abi_long arg4)
7369 {
7370     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7371         arg2 = arg3;
7372         arg3 = arg4;
7373     }
7374     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7375 }
7376 #endif
7377 
7378 #ifdef TARGET_NR_ftruncate64
7379 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7380                                           abi_long arg2,
7381                                           abi_long arg3,
7382                                           abi_long arg4)
7383 {
7384     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7385         arg2 = arg3;
7386         arg3 = arg4;
7387     }
7388     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7389 }
7390 #endif
7391 
7392 #if defined(TARGET_NR_timer_settime) || \
7393     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7394 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7395                                                  abi_ulong target_addr)
7396 {
7397     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7398                                 offsetof(struct target_itimerspec,
7399                                          it_interval)) ||
7400         target_to_host_timespec(&host_its->it_value, target_addr +
7401                                 offsetof(struct target_itimerspec,
7402                                          it_value))) {
7403         return -TARGET_EFAULT;
7404     }
7405 
7406     return 0;
7407 }
7408 #endif
7409 
7410 #if defined(TARGET_NR_timer_settime64) || \
7411     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7412 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7413                                                    abi_ulong target_addr)
7414 {
7415     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7416                                   offsetof(struct target__kernel_itimerspec,
7417                                            it_interval)) ||
7418         target_to_host_timespec64(&host_its->it_value, target_addr +
7419                                   offsetof(struct target__kernel_itimerspec,
7420                                            it_value))) {
7421         return -TARGET_EFAULT;
7422     }
7423 
7424     return 0;
7425 }
7426 #endif
7427 
7428 #if ((defined(TARGET_NR_timerfd_gettime) || \
7429       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7430       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7431 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7432                                                  struct itimerspec *host_its)
7433 {
7434     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7435                                                        it_interval),
7436                                 &host_its->it_interval) ||
7437         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7438                                                        it_value),
7439                                 &host_its->it_value)) {
7440         return -TARGET_EFAULT;
7441     }
7442     return 0;
7443 }
7444 #endif
7445 
7446 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7447       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7448       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7449 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7450                                                    struct itimerspec *host_its)
7451 {
7452     if (host_to_target_timespec64(target_addr +
7453                                   offsetof(struct target__kernel_itimerspec,
7454                                            it_interval),
7455                                   &host_its->it_interval) ||
7456         host_to_target_timespec64(target_addr +
7457                                   offsetof(struct target__kernel_itimerspec,
7458                                            it_value),
7459                                   &host_its->it_value)) {
7460         return -TARGET_EFAULT;
7461     }
7462     return 0;
7463 }
7464 #endif
7465 
7466 #if defined(TARGET_NR_adjtimex) || \
7467     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7468 static inline abi_long target_to_host_timex(struct timex *host_tx,
7469                                             abi_long target_addr)
7470 {
7471     struct target_timex *target_tx;
7472 
7473     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7474         return -TARGET_EFAULT;
7475     }
7476 
7477     __get_user(host_tx->modes, &target_tx->modes);
7478     __get_user(host_tx->offset, &target_tx->offset);
7479     __get_user(host_tx->freq, &target_tx->freq);
7480     __get_user(host_tx->maxerror, &target_tx->maxerror);
7481     __get_user(host_tx->esterror, &target_tx->esterror);
7482     __get_user(host_tx->status, &target_tx->status);
7483     __get_user(host_tx->constant, &target_tx->constant);
7484     __get_user(host_tx->precision, &target_tx->precision);
7485     __get_user(host_tx->tolerance, &target_tx->tolerance);
7486     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7487     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7488     __get_user(host_tx->tick, &target_tx->tick);
7489     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7490     __get_user(host_tx->jitter, &target_tx->jitter);
7491     __get_user(host_tx->shift, &target_tx->shift);
7492     __get_user(host_tx->stabil, &target_tx->stabil);
7493     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7494     __get_user(host_tx->calcnt, &target_tx->calcnt);
7495     __get_user(host_tx->errcnt, &target_tx->errcnt);
7496     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7497     __get_user(host_tx->tai, &target_tx->tai);
7498 
7499     unlock_user_struct(target_tx, target_addr, 0);
7500     return 0;
7501 }
7502 
7503 static inline abi_long host_to_target_timex(abi_long target_addr,
7504                                             struct timex *host_tx)
7505 {
7506     struct target_timex *target_tx;
7507 
7508     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7509         return -TARGET_EFAULT;
7510     }
7511 
7512     __put_user(host_tx->modes, &target_tx->modes);
7513     __put_user(host_tx->offset, &target_tx->offset);
7514     __put_user(host_tx->freq, &target_tx->freq);
7515     __put_user(host_tx->maxerror, &target_tx->maxerror);
7516     __put_user(host_tx->esterror, &target_tx->esterror);
7517     __put_user(host_tx->status, &target_tx->status);
7518     __put_user(host_tx->constant, &target_tx->constant);
7519     __put_user(host_tx->precision, &target_tx->precision);
7520     __put_user(host_tx->tolerance, &target_tx->tolerance);
7521     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7522     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7523     __put_user(host_tx->tick, &target_tx->tick);
7524     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7525     __put_user(host_tx->jitter, &target_tx->jitter);
7526     __put_user(host_tx->shift, &target_tx->shift);
7527     __put_user(host_tx->stabil, &target_tx->stabil);
7528     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7529     __put_user(host_tx->calcnt, &target_tx->calcnt);
7530     __put_user(host_tx->errcnt, &target_tx->errcnt);
7531     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7532     __put_user(host_tx->tai, &target_tx->tai);
7533 
7534     unlock_user_struct(target_tx, target_addr, 1);
7535     return 0;
7536 }
7537 #endif
7538 
7539 
7540 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7541 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7542                                               abi_long target_addr)
7543 {
7544     struct target__kernel_timex *target_tx;
7545 
7546     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7547                                  offsetof(struct target__kernel_timex,
7548                                           time))) {
7549         return -TARGET_EFAULT;
7550     }
7551 
7552     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7553         return -TARGET_EFAULT;
7554     }
7555 
7556     __get_user(host_tx->modes, &target_tx->modes);
7557     __get_user(host_tx->offset, &target_tx->offset);
7558     __get_user(host_tx->freq, &target_tx->freq);
7559     __get_user(host_tx->maxerror, &target_tx->maxerror);
7560     __get_user(host_tx->esterror, &target_tx->esterror);
7561     __get_user(host_tx->status, &target_tx->status);
7562     __get_user(host_tx->constant, &target_tx->constant);
7563     __get_user(host_tx->precision, &target_tx->precision);
7564     __get_user(host_tx->tolerance, &target_tx->tolerance);
7565     __get_user(host_tx->tick, &target_tx->tick);
7566     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7567     __get_user(host_tx->jitter, &target_tx->jitter);
7568     __get_user(host_tx->shift, &target_tx->shift);
7569     __get_user(host_tx->stabil, &target_tx->stabil);
7570     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7571     __get_user(host_tx->calcnt, &target_tx->calcnt);
7572     __get_user(host_tx->errcnt, &target_tx->errcnt);
7573     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7574     __get_user(host_tx->tai, &target_tx->tai);
7575 
7576     unlock_user_struct(target_tx, target_addr, 0);
7577     return 0;
7578 }
7579 
7580 static inline abi_long host_to_target_timex64(abi_long target_addr,
7581                                               struct timex *host_tx)
7582 {
7583     struct target__kernel_timex *target_tx;
7584 
7585    if (copy_to_user_timeval64(target_addr +
7586                               offsetof(struct target__kernel_timex, time),
7587                               &host_tx->time)) {
7588         return -TARGET_EFAULT;
7589     }
7590 
7591     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7592         return -TARGET_EFAULT;
7593     }
7594 
7595     __put_user(host_tx->modes, &target_tx->modes);
7596     __put_user(host_tx->offset, &target_tx->offset);
7597     __put_user(host_tx->freq, &target_tx->freq);
7598     __put_user(host_tx->maxerror, &target_tx->maxerror);
7599     __put_user(host_tx->esterror, &target_tx->esterror);
7600     __put_user(host_tx->status, &target_tx->status);
7601     __put_user(host_tx->constant, &target_tx->constant);
7602     __put_user(host_tx->precision, &target_tx->precision);
7603     __put_user(host_tx->tolerance, &target_tx->tolerance);
7604     __put_user(host_tx->tick, &target_tx->tick);
7605     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7606     __put_user(host_tx->jitter, &target_tx->jitter);
7607     __put_user(host_tx->shift, &target_tx->shift);
7608     __put_user(host_tx->stabil, &target_tx->stabil);
7609     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7610     __put_user(host_tx->calcnt, &target_tx->calcnt);
7611     __put_user(host_tx->errcnt, &target_tx->errcnt);
7612     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7613     __put_user(host_tx->tai, &target_tx->tai);
7614 
7615     unlock_user_struct(target_tx, target_addr, 1);
7616     return 0;
7617 }
7618 #endif
7619 
7620 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7621 #define sigev_notify_thread_id _sigev_un._tid
7622 #endif
7623 
7624 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7625                                                abi_ulong target_addr)
7626 {
7627     struct target_sigevent *target_sevp;
7628 
7629     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7630         return -TARGET_EFAULT;
7631     }
7632 
7633     /* This union is awkward on 64 bit systems because it has a 32 bit
7634      * integer and a pointer in it; we follow the conversion approach
7635      * used for handling sigval types in signal.c so the guest should get
7636      * the correct value back even if we did a 64 bit byteswap and it's
7637      * using the 32 bit integer.
7638      */
7639     host_sevp->sigev_value.sival_ptr =
7640         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7641     host_sevp->sigev_signo =
7642         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7643     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7644     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7645 
7646     unlock_user_struct(target_sevp, target_addr, 1);
7647     return 0;
7648 }
7649 
7650 #if defined(TARGET_NR_mlockall)
7651 static inline int target_to_host_mlockall_arg(int arg)
7652 {
7653     int result = 0;
7654 
7655     if (arg & TARGET_MCL_CURRENT) {
7656         result |= MCL_CURRENT;
7657     }
7658     if (arg & TARGET_MCL_FUTURE) {
7659         result |= MCL_FUTURE;
7660     }
7661 #ifdef MCL_ONFAULT
7662     if (arg & TARGET_MCL_ONFAULT) {
7663         result |= MCL_ONFAULT;
7664     }
7665 #endif
7666 
7667     return result;
7668 }
7669 #endif
7670 
7671 static inline int target_to_host_msync_arg(abi_long arg)
7672 {
7673     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7674            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7675            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7676            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7677 }
7678 
7679 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7680      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7681      defined(TARGET_NR_newfstatat))
7682 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7683                                              abi_ulong target_addr,
7684                                              struct stat *host_st)
7685 {
7686 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7687     if (cpu_env->eabi) {
7688         struct target_eabi_stat64 *target_st;
7689 
7690         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7691             return -TARGET_EFAULT;
7692         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7693         __put_user(host_st->st_dev, &target_st->st_dev);
7694         __put_user(host_st->st_ino, &target_st->st_ino);
7695 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7696         __put_user(host_st->st_ino, &target_st->__st_ino);
7697 #endif
7698         __put_user(host_st->st_mode, &target_st->st_mode);
7699         __put_user(host_st->st_nlink, &target_st->st_nlink);
7700         __put_user(host_st->st_uid, &target_st->st_uid);
7701         __put_user(host_st->st_gid, &target_st->st_gid);
7702         __put_user(host_st->st_rdev, &target_st->st_rdev);
7703         __put_user(host_st->st_size, &target_st->st_size);
7704         __put_user(host_st->st_blksize, &target_st->st_blksize);
7705         __put_user(host_st->st_blocks, &target_st->st_blocks);
7706         __put_user(host_st->st_atime, &target_st->target_st_atime);
7707         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7708         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7709 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7710         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7711         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7712         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7713 #endif
7714         unlock_user_struct(target_st, target_addr, 1);
7715     } else
7716 #endif
7717     {
7718 #if defined(TARGET_HAS_STRUCT_STAT64)
7719         struct target_stat64 *target_st;
7720 #else
7721         struct target_stat *target_st;
7722 #endif
7723 
7724         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7725             return -TARGET_EFAULT;
7726         memset(target_st, 0, sizeof(*target_st));
7727         __put_user(host_st->st_dev, &target_st->st_dev);
7728         __put_user(host_st->st_ino, &target_st->st_ino);
7729 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7730         __put_user(host_st->st_ino, &target_st->__st_ino);
7731 #endif
7732         __put_user(host_st->st_mode, &target_st->st_mode);
7733         __put_user(host_st->st_nlink, &target_st->st_nlink);
7734         __put_user(host_st->st_uid, &target_st->st_uid);
7735         __put_user(host_st->st_gid, &target_st->st_gid);
7736         __put_user(host_st->st_rdev, &target_st->st_rdev);
7737         /* XXX: better use of kernel struct */
7738         __put_user(host_st->st_size, &target_st->st_size);
7739         __put_user(host_st->st_blksize, &target_st->st_blksize);
7740         __put_user(host_st->st_blocks, &target_st->st_blocks);
7741         __put_user(host_st->st_atime, &target_st->target_st_atime);
7742         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7743         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7744 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7745         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7746         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7747         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7748 #endif
7749         unlock_user_struct(target_st, target_addr, 1);
7750     }
7751 
7752     return 0;
7753 }
7754 #endif
7755 
7756 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7757 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7758                                             abi_ulong target_addr)
7759 {
7760     struct target_statx *target_stx;
7761 
7762     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7763         return -TARGET_EFAULT;
7764     }
7765     memset(target_stx, 0, sizeof(*target_stx));
7766 
7767     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7768     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7769     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7770     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7771     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7772     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7773     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7774     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7775     __put_user(host_stx->stx_size, &target_stx->stx_size);
7776     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7777     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7778     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7779     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7780     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7781     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7782     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7783     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7784     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7785     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7786     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7787     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7788     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7789     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7790 
7791     unlock_user_struct(target_stx, target_addr, 1);
7792 
7793     return 0;
7794 }
7795 #endif
7796 
7797 static int do_sys_futex(int *uaddr, int op, int val,
7798                          const struct timespec *timeout, int *uaddr2,
7799                          int val3)
7800 {
7801 #if HOST_LONG_BITS == 64
7802 #if defined(__NR_futex)
7803     /* always a 64-bit time_t, it doesn't define _time64 version  */
7804     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7805 
7806 #endif
7807 #else /* HOST_LONG_BITS == 64 */
7808 #if defined(__NR_futex_time64)
7809     if (sizeof(timeout->tv_sec) == 8) {
7810         /* _time64 function on 32bit arch */
7811         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7812     }
7813 #endif
7814 #if defined(__NR_futex)
7815     /* old function on 32bit arch */
7816     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7817 #endif
7818 #endif /* HOST_LONG_BITS == 64 */
7819     g_assert_not_reached();
7820 }
7821 
7822 static int do_safe_futex(int *uaddr, int op, int val,
7823                          const struct timespec *timeout, int *uaddr2,
7824                          int val3)
7825 {
7826 #if HOST_LONG_BITS == 64
7827 #if defined(__NR_futex)
7828     /* always a 64-bit time_t, it doesn't define _time64 version  */
7829     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7830 #endif
7831 #else /* HOST_LONG_BITS == 64 */
7832 #if defined(__NR_futex_time64)
7833     if (sizeof(timeout->tv_sec) == 8) {
7834         /* _time64 function on 32bit arch */
7835         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7836                                            val3));
7837     }
7838 #endif
7839 #if defined(__NR_futex)
7840     /* old function on 32bit arch */
7841     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7842 #endif
7843 #endif /* HOST_LONG_BITS == 64 */
7844     return -TARGET_ENOSYS;
7845 }
7846 
7847 /* ??? Using host futex calls even when target atomic operations
7848    are not really atomic probably breaks things.  However implementing
7849    futexes locally would make futexes shared between multiple processes
7850    tricky.  However they're probably useless because guest atomic
7851    operations won't work either.  */
7852 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7853 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7854                     int op, int val, target_ulong timeout,
7855                     target_ulong uaddr2, int val3)
7856 {
7857     struct timespec ts, *pts = NULL;
7858     void *haddr2 = NULL;
7859     int base_op;
7860 
7861     /* We assume FUTEX_* constants are the same on both host and target. */
7862 #ifdef FUTEX_CMD_MASK
7863     base_op = op & FUTEX_CMD_MASK;
7864 #else
7865     base_op = op;
7866 #endif
7867     switch (base_op) {
7868     case FUTEX_WAIT:
7869     case FUTEX_WAIT_BITSET:
7870         val = tswap32(val);
7871         break;
7872     case FUTEX_WAIT_REQUEUE_PI:
7873         val = tswap32(val);
7874         haddr2 = g2h(cpu, uaddr2);
7875         break;
7876     case FUTEX_LOCK_PI:
7877     case FUTEX_LOCK_PI2:
7878         break;
7879     case FUTEX_WAKE:
7880     case FUTEX_WAKE_BITSET:
7881     case FUTEX_TRYLOCK_PI:
7882     case FUTEX_UNLOCK_PI:
7883         timeout = 0;
7884         break;
7885     case FUTEX_FD:
7886         val = target_to_host_signal(val);
7887         timeout = 0;
7888         break;
7889     case FUTEX_CMP_REQUEUE:
7890     case FUTEX_CMP_REQUEUE_PI:
7891         val3 = tswap32(val3);
7892         /* fall through */
7893     case FUTEX_REQUEUE:
7894     case FUTEX_WAKE_OP:
7895         /*
7896          * For these, the 4th argument is not TIMEOUT, but VAL2.
7897          * But the prototype of do_safe_futex takes a pointer, so
7898          * insert casts to satisfy the compiler.  We do not need
7899          * to tswap VAL2 since it's not compared to guest memory.
7900           */
7901         pts = (struct timespec *)(uintptr_t)timeout;
7902         timeout = 0;
7903         haddr2 = g2h(cpu, uaddr2);
7904         break;
7905     default:
7906         return -TARGET_ENOSYS;
7907     }
7908     if (timeout) {
7909         pts = &ts;
7910         if (time64
7911             ? target_to_host_timespec64(pts, timeout)
7912             : target_to_host_timespec(pts, timeout)) {
7913             return -TARGET_EFAULT;
7914         }
7915     }
7916     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7917 }
7918 #endif
7919 
7920 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7921 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7922                                      abi_long handle, abi_long mount_id,
7923                                      abi_long flags)
7924 {
7925     struct file_handle *target_fh;
7926     struct file_handle *fh;
7927     int mid = 0;
7928     abi_long ret;
7929     char *name;
7930     unsigned int size, total_size;
7931 
7932     if (get_user_s32(size, handle)) {
7933         return -TARGET_EFAULT;
7934     }
7935 
7936     name = lock_user_string(pathname);
7937     if (!name) {
7938         return -TARGET_EFAULT;
7939     }
7940 
7941     total_size = sizeof(struct file_handle) + size;
7942     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7943     if (!target_fh) {
7944         unlock_user(name, pathname, 0);
7945         return -TARGET_EFAULT;
7946     }
7947 
7948     fh = g_malloc0(total_size);
7949     fh->handle_bytes = size;
7950 
7951     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7952     unlock_user(name, pathname, 0);
7953 
7954     /* man name_to_handle_at(2):
7955      * Other than the use of the handle_bytes field, the caller should treat
7956      * the file_handle structure as an opaque data type
7957      */
7958 
7959     memcpy(target_fh, fh, total_size);
7960     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7961     target_fh->handle_type = tswap32(fh->handle_type);
7962     g_free(fh);
7963     unlock_user(target_fh, handle, total_size);
7964 
7965     if (put_user_s32(mid, mount_id)) {
7966         return -TARGET_EFAULT;
7967     }
7968 
7969     return ret;
7970 
7971 }
7972 #endif
7973 
7974 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7975 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7976                                      abi_long flags)
7977 {
7978     struct file_handle *target_fh;
7979     struct file_handle *fh;
7980     unsigned int size, total_size;
7981     abi_long ret;
7982 
7983     if (get_user_s32(size, handle)) {
7984         return -TARGET_EFAULT;
7985     }
7986 
7987     total_size = sizeof(struct file_handle) + size;
7988     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7989     if (!target_fh) {
7990         return -TARGET_EFAULT;
7991     }
7992 
7993     fh = g_memdup(target_fh, total_size);
7994     fh->handle_bytes = size;
7995     fh->handle_type = tswap32(target_fh->handle_type);
7996 
7997     ret = get_errno(open_by_handle_at(mount_fd, fh,
7998                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7999 
8000     g_free(fh);
8001 
8002     unlock_user(target_fh, handle, total_size);
8003 
8004     return ret;
8005 }
8006 #endif
8007 
8008 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
8009 
8010 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
8011 {
8012     int host_flags;
8013     target_sigset_t *target_mask;
8014     sigset_t host_mask;
8015     abi_long ret;
8016 
8017     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
8018         return -TARGET_EINVAL;
8019     }
8020     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8021         return -TARGET_EFAULT;
8022     }
8023 
8024     target_to_host_sigset(&host_mask, target_mask);
8025 
8026     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8027 
8028     ret = get_errno(signalfd(fd, &host_mask, host_flags));
8029     if (ret >= 0) {
8030         fd_trans_register(ret, &target_signalfd_trans);
8031     }
8032 
8033     unlock_user_struct(target_mask, mask, 0);
8034 
8035     return ret;
8036 }
8037 #endif
8038 
8039 /* Map host to target signal numbers for the wait family of syscalls.
8040    Assume all other status bits are the same.  */
8041 int host_to_target_waitstatus(int status)
8042 {
8043     if (WIFSIGNALED(status)) {
8044         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8045     }
8046     if (WIFSTOPPED(status)) {
8047         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8048                | (status & 0xff);
8049     }
8050     return status;
8051 }
8052 
8053 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8054 {
8055     CPUState *cpu = env_cpu(cpu_env);
8056     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8057     int i;
8058 
8059     for (i = 0; i < bprm->argc; i++) {
8060         size_t len = strlen(bprm->argv[i]) + 1;
8061 
8062         if (write(fd, bprm->argv[i], len) != len) {
8063             return -1;
8064         }
8065     }
8066 
8067     return 0;
8068 }
8069 
8070 static void show_smaps(int fd, unsigned long size)
8071 {
8072     unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8073     unsigned long size_kb = size >> 10;
8074 
8075     dprintf(fd, "Size:                  %lu kB\n"
8076                 "KernelPageSize:        %lu kB\n"
8077                 "MMUPageSize:           %lu kB\n"
8078                 "Rss:                   0 kB\n"
8079                 "Pss:                   0 kB\n"
8080                 "Pss_Dirty:             0 kB\n"
8081                 "Shared_Clean:          0 kB\n"
8082                 "Shared_Dirty:          0 kB\n"
8083                 "Private_Clean:         0 kB\n"
8084                 "Private_Dirty:         0 kB\n"
8085                 "Referenced:            0 kB\n"
8086                 "Anonymous:             0 kB\n"
8087                 "LazyFree:              0 kB\n"
8088                 "AnonHugePages:         0 kB\n"
8089                 "ShmemPmdMapped:        0 kB\n"
8090                 "FilePmdMapped:         0 kB\n"
8091                 "Shared_Hugetlb:        0 kB\n"
8092                 "Private_Hugetlb:       0 kB\n"
8093                 "Swap:                  0 kB\n"
8094                 "SwapPss:               0 kB\n"
8095                 "Locked:                0 kB\n"
8096                 "THPeligible:    0\n", size_kb, page_size_kb, page_size_kb);
8097 }
8098 
8099 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8100 {
8101     CPUState *cpu = env_cpu(cpu_env);
8102     TaskState *ts = cpu->opaque;
8103     GSList *map_info = read_self_maps();
8104     GSList *s;
8105     int count;
8106 
8107     for (s = map_info; s; s = g_slist_next(s)) {
8108         MapInfo *e = (MapInfo *) s->data;
8109 
8110         if (h2g_valid(e->start)) {
8111             unsigned long min = e->start;
8112             unsigned long max = e->end;
8113             int flags = page_get_flags(h2g(min));
8114             const char *path;
8115 
8116             max = h2g_valid(max - 1) ?
8117                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8118 
8119             if (!page_check_range(h2g(min), max - min, flags)) {
8120                 continue;
8121             }
8122 
8123 #ifdef TARGET_HPPA
8124             if (h2g(max) == ts->info->stack_limit) {
8125 #else
8126             if (h2g(min) == ts->info->stack_limit) {
8127 #endif
8128                 path = "[stack]";
8129             } else {
8130                 path = e->path;
8131             }
8132 
8133             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8134                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8135                             h2g(min), h2g(max - 1) + 1,
8136                             (flags & PAGE_READ) ? 'r' : '-',
8137                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8138                             (flags & PAGE_EXEC) ? 'x' : '-',
8139                             e->is_priv ? 'p' : 's',
8140                             (uint64_t) e->offset, e->dev, e->inode);
8141             if (path) {
8142                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8143             } else {
8144                 dprintf(fd, "\n");
8145             }
8146             if (smaps) {
8147                 show_smaps(fd, max - min);
8148                 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8149                         (flags & PAGE_READ) ? " rd" : "",
8150                         (flags & PAGE_WRITE_ORG) ? " wr" : "",
8151                         (flags & PAGE_EXEC) ? " ex" : "",
8152                         e->is_priv ? "" : " sh",
8153                         (flags & PAGE_READ) ? " mr" : "",
8154                         (flags & PAGE_WRITE_ORG) ? " mw" : "",
8155                         (flags & PAGE_EXEC) ? " me" : "",
8156                         e->is_priv ? "" : " ms");
8157             }
8158         }
8159     }
8160 
8161     free_self_maps(map_info);
8162 
8163 #ifdef TARGET_VSYSCALL_PAGE
8164     /*
8165      * We only support execution from the vsyscall page.
8166      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8167      */
8168     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8169                     " --xp 00000000 00:00 0",
8170                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8171     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8172     if (smaps) {
8173         show_smaps(fd, TARGET_PAGE_SIZE);
8174         dprintf(fd, "VmFlags: ex\n");
8175     }
8176 #endif
8177 
8178     return 0;
8179 }
8180 
8181 static int open_self_maps(CPUArchState *cpu_env, int fd)
8182 {
8183     return open_self_maps_1(cpu_env, fd, false);
8184 }
8185 
8186 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8187 {
8188     return open_self_maps_1(cpu_env, fd, true);
8189 }
8190 
8191 static int open_self_stat(CPUArchState *cpu_env, int fd)
8192 {
8193     CPUState *cpu = env_cpu(cpu_env);
8194     TaskState *ts = cpu->opaque;
8195     g_autoptr(GString) buf = g_string_new(NULL);
8196     int i;
8197 
8198     for (i = 0; i < 44; i++) {
8199         if (i == 0) {
8200             /* pid */
8201             g_string_printf(buf, FMT_pid " ", getpid());
8202         } else if (i == 1) {
8203             /* app name */
8204             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8205             bin = bin ? bin + 1 : ts->bprm->argv[0];
8206             g_string_printf(buf, "(%.15s) ", bin);
8207         } else if (i == 2) {
8208             /* task state */
8209             g_string_assign(buf, "R "); /* we are running right now */
8210         } else if (i == 3) {
8211             /* ppid */
8212             g_string_printf(buf, FMT_pid " ", getppid());
8213         } else if (i == 21) {
8214             /* starttime */
8215             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8216         } else if (i == 27) {
8217             /* stack bottom */
8218             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8219         } else {
8220             /* for the rest, there is MasterCard */
8221             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8222         }
8223 
8224         if (write(fd, buf->str, buf->len) != buf->len) {
8225             return -1;
8226         }
8227     }
8228 
8229     return 0;
8230 }
8231 
8232 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8233 {
8234     CPUState *cpu = env_cpu(cpu_env);
8235     TaskState *ts = cpu->opaque;
8236     abi_ulong auxv = ts->info->saved_auxv;
8237     abi_ulong len = ts->info->auxv_len;
8238     char *ptr;
8239 
8240     /*
8241      * Auxiliary vector is stored in target process stack.
8242      * read in whole auxv vector and copy it to file
8243      */
8244     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8245     if (ptr != NULL) {
8246         while (len > 0) {
8247             ssize_t r;
8248             r = write(fd, ptr, len);
8249             if (r <= 0) {
8250                 break;
8251             }
8252             len -= r;
8253             ptr += r;
8254         }
8255         lseek(fd, 0, SEEK_SET);
8256         unlock_user(ptr, auxv, len);
8257     }
8258 
8259     return 0;
8260 }
8261 
8262 static int is_proc_myself(const char *filename, const char *entry)
8263 {
8264     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8265         filename += strlen("/proc/");
8266         if (!strncmp(filename, "self/", strlen("self/"))) {
8267             filename += strlen("self/");
8268         } else if (*filename >= '1' && *filename <= '9') {
8269             char myself[80];
8270             snprintf(myself, sizeof(myself), "%d/", getpid());
8271             if (!strncmp(filename, myself, strlen(myself))) {
8272                 filename += strlen(myself);
8273             } else {
8274                 return 0;
8275             }
8276         } else {
8277             return 0;
8278         }
8279         if (!strcmp(filename, entry)) {
8280             return 1;
8281         }
8282     }
8283     return 0;
8284 }
8285 
8286 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8287                       const char *fmt, int code)
8288 {
8289     if (logfile) {
8290         CPUState *cs = env_cpu(env);
8291 
8292         fprintf(logfile, fmt, code);
8293         fprintf(logfile, "Failing executable: %s\n", exec_path);
8294         cpu_dump_state(cs, logfile, 0);
8295         open_self_maps(env, fileno(logfile));
8296     }
8297 }
8298 
8299 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8300 {
8301     /* dump to console */
8302     excp_dump_file(stderr, env, fmt, code);
8303 
8304     /* dump to log file */
8305     if (qemu_log_separate()) {
8306         FILE *logfile = qemu_log_trylock();
8307 
8308         excp_dump_file(logfile, env, fmt, code);
8309         qemu_log_unlock(logfile);
8310     }
8311 }
8312 
8313 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8314     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8315     defined(TARGET_RISCV) || defined(TARGET_S390X)
8316 static int is_proc(const char *filename, const char *entry)
8317 {
8318     return strcmp(filename, entry) == 0;
8319 }
8320 #endif
8321 
8322 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8323 static int open_net_route(CPUArchState *cpu_env, int fd)
8324 {
8325     FILE *fp;
8326     char *line = NULL;
8327     size_t len = 0;
8328     ssize_t read;
8329 
8330     fp = fopen("/proc/net/route", "r");
8331     if (fp == NULL) {
8332         return -1;
8333     }
8334 
8335     /* read header */
8336 
8337     read = getline(&line, &len, fp);
8338     dprintf(fd, "%s", line);
8339 
8340     /* read routes */
8341 
8342     while ((read = getline(&line, &len, fp)) != -1) {
8343         char iface[16];
8344         uint32_t dest, gw, mask;
8345         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8346         int fields;
8347 
8348         fields = sscanf(line,
8349                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8350                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8351                         &mask, &mtu, &window, &irtt);
8352         if (fields != 11) {
8353             continue;
8354         }
8355         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8356                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8357                 metric, tswap32(mask), mtu, window, irtt);
8358     }
8359 
8360     free(line);
8361     fclose(fp);
8362 
8363     return 0;
8364 }
8365 #endif
8366 
8367 #if defined(TARGET_SPARC)
8368 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8369 {
8370     dprintf(fd, "type\t\t: sun4u\n");
8371     return 0;
8372 }
8373 #endif
8374 
8375 #if defined(TARGET_HPPA)
8376 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8377 {
8378     int i, num_cpus;
8379 
8380     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8381     for (i = 0; i < num_cpus; i++) {
8382         dprintf(fd, "processor\t: %d\n", i);
8383         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8384         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8385         dprintf(fd, "capabilities\t: os32\n");
8386         dprintf(fd, "model\t\t: 9000/778/B160L - "
8387                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8388     }
8389     return 0;
8390 }
8391 #endif
8392 
8393 #if defined(TARGET_RISCV)
8394 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8395 {
8396     int i;
8397     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8398     RISCVCPU *cpu = env_archcpu(cpu_env);
8399     const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8400     char *isa_string = riscv_isa_string(cpu);
8401     const char *mmu;
8402 
8403     if (cfg->mmu) {
8404         mmu = (cpu_env->xl == MXL_RV32) ? "sv32"  : "sv48";
8405     } else {
8406         mmu = "none";
8407     }
8408 
8409     for (i = 0; i < num_cpus; i++) {
8410         dprintf(fd, "processor\t: %d\n", i);
8411         dprintf(fd, "hart\t\t: %d\n", i);
8412         dprintf(fd, "isa\t\t: %s\n", isa_string);
8413         dprintf(fd, "mmu\t\t: %s\n", mmu);
8414         dprintf(fd, "uarch\t\t: qemu\n\n");
8415     }
8416 
8417     g_free(isa_string);
8418     return 0;
8419 }
8420 #endif
8421 
8422 #if defined(TARGET_S390X)
8423 /*
8424  * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8425  * show in /proc/cpuinfo.
8426  *
8427  * Skip the following in order to match the missing support in op_ecag():
8428  * - show_cacheinfo().
8429  * - show_cpu_topology().
8430  * - show_cpu_mhz().
8431  *
8432  * Use fixed values for certain fields:
8433  * - bogomips per cpu - from a qemu-system-s390x run.
8434  * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8435  *
8436  * Keep the code structure close to arch/s390/kernel/processor.c.
8437  */
8438 
8439 static void show_facilities(int fd)
8440 {
8441     size_t sizeof_stfl_bytes = 2048;
8442     g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8443     unsigned int bit;
8444 
8445     dprintf(fd, "facilities      :");
8446     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8447     for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8448         if (test_be_bit(bit, stfl_bytes)) {
8449             dprintf(fd, " %d", bit);
8450         }
8451     }
8452     dprintf(fd, "\n");
8453 }
8454 
8455 static int cpu_ident(unsigned long n)
8456 {
8457     return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8458                      n);
8459 }
8460 
8461 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8462 {
8463     S390CPUModel *model = env_archcpu(cpu_env)->model;
8464     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8465     uint32_t elf_hwcap = get_elf_hwcap();
8466     const char *hwcap_str;
8467     int i;
8468 
8469     dprintf(fd, "vendor_id       : IBM/S390\n"
8470                 "# processors    : %i\n"
8471                 "bogomips per cpu: 13370.00\n",
8472             num_cpus);
8473     dprintf(fd, "max thread id   : 0\n");
8474     dprintf(fd, "features\t: ");
8475     for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8476         if (!(elf_hwcap & (1 << i))) {
8477             continue;
8478         }
8479         hwcap_str = elf_hwcap_str(i);
8480         if (hwcap_str) {
8481             dprintf(fd, "%s ", hwcap_str);
8482         }
8483     }
8484     dprintf(fd, "\n");
8485     show_facilities(fd);
8486     for (i = 0; i < num_cpus; i++) {
8487         dprintf(fd, "processor %d: "
8488                "version = %02X,  "
8489                "identification = %06X,  "
8490                "machine = %04X\n",
8491                i, model->cpu_ver, cpu_ident(i), model->def->type);
8492     }
8493 }
8494 
8495 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8496 {
8497     S390CPUModel *model = env_archcpu(cpu_env)->model;
8498 
8499     dprintf(fd, "version         : %02X\n", model->cpu_ver);
8500     dprintf(fd, "identification  : %06X\n", cpu_ident(n));
8501     dprintf(fd, "machine         : %04X\n", model->def->type);
8502 }
8503 
8504 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8505 {
8506     dprintf(fd, "\ncpu number      : %ld\n", n);
8507     show_cpu_ids(cpu_env, fd, n);
8508 }
8509 
8510 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8511 {
8512     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8513     int i;
8514 
8515     show_cpu_summary(cpu_env, fd);
8516     for (i = 0; i < num_cpus; i++) {
8517         show_cpuinfo(cpu_env, fd, i);
8518     }
8519     return 0;
8520 }
8521 #endif
8522 
8523 #if defined(TARGET_M68K)
8524 static int open_hardware(CPUArchState *cpu_env, int fd)
8525 {
8526     dprintf(fd, "Model:\t\tqemu-m68k\n");
8527     return 0;
8528 }
8529 #endif
8530 
8531 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8532                     int flags, mode_t mode, bool safe)
8533 {
8534     struct fake_open {
8535         const char *filename;
8536         int (*fill)(CPUArchState *cpu_env, int fd);
8537         int (*cmp)(const char *s1, const char *s2);
8538     };
8539     const struct fake_open *fake_open;
8540     static const struct fake_open fakes[] = {
8541         { "maps", open_self_maps, is_proc_myself },
8542         { "smaps", open_self_smaps, is_proc_myself },
8543         { "stat", open_self_stat, is_proc_myself },
8544         { "auxv", open_self_auxv, is_proc_myself },
8545         { "cmdline", open_self_cmdline, is_proc_myself },
8546 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8547         { "/proc/net/route", open_net_route, is_proc },
8548 #endif
8549 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8550     defined(TARGET_RISCV) || defined(TARGET_S390X)
8551         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8552 #endif
8553 #if defined(TARGET_M68K)
8554         { "/proc/hardware", open_hardware, is_proc },
8555 #endif
8556         { NULL, NULL, NULL }
8557     };
8558 
8559     if (is_proc_myself(pathname, "exe")) {
8560         if (safe) {
8561             return safe_openat(dirfd, exec_path, flags, mode);
8562         } else {
8563             return openat(dirfd, exec_path, flags, mode);
8564         }
8565     }
8566 
8567     for (fake_open = fakes; fake_open->filename; fake_open++) {
8568         if (fake_open->cmp(pathname, fake_open->filename)) {
8569             break;
8570         }
8571     }
8572 
8573     if (fake_open->filename) {
8574         const char *tmpdir;
8575         char filename[PATH_MAX];
8576         int fd, r;
8577 
8578         fd = memfd_create("qemu-open", 0);
8579         if (fd < 0) {
8580             if (errno != ENOSYS) {
8581                 return fd;
8582             }
8583             /* create temporary file to map stat to */
8584             tmpdir = getenv("TMPDIR");
8585             if (!tmpdir)
8586                 tmpdir = "/tmp";
8587             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8588             fd = mkstemp(filename);
8589             if (fd < 0) {
8590                 return fd;
8591             }
8592             unlink(filename);
8593         }
8594 
8595         if ((r = fake_open->fill(cpu_env, fd))) {
8596             int e = errno;
8597             close(fd);
8598             errno = e;
8599             return r;
8600         }
8601         lseek(fd, 0, SEEK_SET);
8602 
8603         return fd;
8604     }
8605 
8606     if (safe) {
8607         return safe_openat(dirfd, path(pathname), flags, mode);
8608     } else {
8609         return openat(dirfd, path(pathname), flags, mode);
8610     }
8611 }
8612 
8613 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8614 {
8615     ssize_t ret;
8616 
8617     if (!pathname || !buf) {
8618         errno = EFAULT;
8619         return -1;
8620     }
8621 
8622     if (!bufsiz) {
8623         /* Short circuit this for the magic exe check. */
8624         errno = EINVAL;
8625         return -1;
8626     }
8627 
8628     if (is_proc_myself((const char *)pathname, "exe")) {
8629         /*
8630          * Don't worry about sign mismatch as earlier mapping
8631          * logic would have thrown a bad address error.
8632          */
8633         ret = MIN(strlen(exec_path), bufsiz);
8634         /* We cannot NUL terminate the string. */
8635         memcpy(buf, exec_path, ret);
8636     } else {
8637         ret = readlink(path(pathname), buf, bufsiz);
8638     }
8639 
8640     return ret;
8641 }
8642 
8643 static int do_execv(CPUArchState *cpu_env, int dirfd,
8644                     abi_long pathname, abi_long guest_argp,
8645                     abi_long guest_envp, int flags, bool is_execveat)
8646 {
8647     int ret;
8648     char **argp, **envp;
8649     int argc, envc;
8650     abi_ulong gp;
8651     abi_ulong addr;
8652     char **q;
8653     void *p;
8654 
8655     argc = 0;
8656 
8657     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8658         if (get_user_ual(addr, gp)) {
8659             return -TARGET_EFAULT;
8660         }
8661         if (!addr) {
8662             break;
8663         }
8664         argc++;
8665     }
8666     envc = 0;
8667     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8668         if (get_user_ual(addr, gp)) {
8669             return -TARGET_EFAULT;
8670         }
8671         if (!addr) {
8672             break;
8673         }
8674         envc++;
8675     }
8676 
8677     argp = g_new0(char *, argc + 1);
8678     envp = g_new0(char *, envc + 1);
8679 
8680     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8681         if (get_user_ual(addr, gp)) {
8682             goto execve_efault;
8683         }
8684         if (!addr) {
8685             break;
8686         }
8687         *q = lock_user_string(addr);
8688         if (!*q) {
8689             goto execve_efault;
8690         }
8691     }
8692     *q = NULL;
8693 
8694     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8695         if (get_user_ual(addr, gp)) {
8696             goto execve_efault;
8697         }
8698         if (!addr) {
8699             break;
8700         }
8701         *q = lock_user_string(addr);
8702         if (!*q) {
8703             goto execve_efault;
8704         }
8705     }
8706     *q = NULL;
8707 
8708     /*
8709      * Although execve() is not an interruptible syscall it is
8710      * a special case where we must use the safe_syscall wrapper:
8711      * if we allow a signal to happen before we make the host
8712      * syscall then we will 'lose' it, because at the point of
8713      * execve the process leaves QEMU's control. So we use the
8714      * safe syscall wrapper to ensure that we either take the
8715      * signal as a guest signal, or else it does not happen
8716      * before the execve completes and makes it the other
8717      * program's problem.
8718      */
8719     p = lock_user_string(pathname);
8720     if (!p) {
8721         goto execve_efault;
8722     }
8723 
8724     const char *exe = p;
8725     if (is_proc_myself(p, "exe")) {
8726         exe = exec_path;
8727     }
8728     ret = is_execveat
8729         ? safe_execveat(dirfd, exe, argp, envp, flags)
8730         : safe_execve(exe, argp, envp);
8731     ret = get_errno(ret);
8732 
8733     unlock_user(p, pathname, 0);
8734 
8735     goto execve_end;
8736 
8737 execve_efault:
8738     ret = -TARGET_EFAULT;
8739 
8740 execve_end:
8741     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8742         if (get_user_ual(addr, gp) || !addr) {
8743             break;
8744         }
8745         unlock_user(*q, addr, 0);
8746     }
8747     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8748         if (get_user_ual(addr, gp) || !addr) {
8749             break;
8750         }
8751         unlock_user(*q, addr, 0);
8752     }
8753 
8754     g_free(argp);
8755     g_free(envp);
8756     return ret;
8757 }
8758 
8759 #define TIMER_MAGIC 0x0caf0000
8760 #define TIMER_MAGIC_MASK 0xffff0000
8761 
8762 /* Convert QEMU provided timer ID back to internal 16bit index format */
8763 static target_timer_t get_timer_id(abi_long arg)
8764 {
8765     target_timer_t timerid = arg;
8766 
8767     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8768         return -TARGET_EINVAL;
8769     }
8770 
8771     timerid &= 0xffff;
8772 
8773     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8774         return -TARGET_EINVAL;
8775     }
8776 
8777     return timerid;
8778 }
8779 
8780 static int target_to_host_cpu_mask(unsigned long *host_mask,
8781                                    size_t host_size,
8782                                    abi_ulong target_addr,
8783                                    size_t target_size)
8784 {
8785     unsigned target_bits = sizeof(abi_ulong) * 8;
8786     unsigned host_bits = sizeof(*host_mask) * 8;
8787     abi_ulong *target_mask;
8788     unsigned i, j;
8789 
8790     assert(host_size >= target_size);
8791 
8792     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8793     if (!target_mask) {
8794         return -TARGET_EFAULT;
8795     }
8796     memset(host_mask, 0, host_size);
8797 
8798     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8799         unsigned bit = i * target_bits;
8800         abi_ulong val;
8801 
8802         __get_user(val, &target_mask[i]);
8803         for (j = 0; j < target_bits; j++, bit++) {
8804             if (val & (1UL << j)) {
8805                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8806             }
8807         }
8808     }
8809 
8810     unlock_user(target_mask, target_addr, 0);
8811     return 0;
8812 }
8813 
8814 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8815                                    size_t host_size,
8816                                    abi_ulong target_addr,
8817                                    size_t target_size)
8818 {
8819     unsigned target_bits = sizeof(abi_ulong) * 8;
8820     unsigned host_bits = sizeof(*host_mask) * 8;
8821     abi_ulong *target_mask;
8822     unsigned i, j;
8823 
8824     assert(host_size >= target_size);
8825 
8826     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8827     if (!target_mask) {
8828         return -TARGET_EFAULT;
8829     }
8830 
8831     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8832         unsigned bit = i * target_bits;
8833         abi_ulong val = 0;
8834 
8835         for (j = 0; j < target_bits; j++, bit++) {
8836             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8837                 val |= 1UL << j;
8838             }
8839         }
8840         __put_user(val, &target_mask[i]);
8841     }
8842 
8843     unlock_user(target_mask, target_addr, target_size);
8844     return 0;
8845 }
8846 
8847 #ifdef TARGET_NR_getdents
8848 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8849 {
8850     g_autofree void *hdirp = NULL;
8851     void *tdirp;
8852     int hlen, hoff, toff;
8853     int hreclen, treclen;
8854     off64_t prev_diroff = 0;
8855 
8856     hdirp = g_try_malloc(count);
8857     if (!hdirp) {
8858         return -TARGET_ENOMEM;
8859     }
8860 
8861 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8862     hlen = sys_getdents(dirfd, hdirp, count);
8863 #else
8864     hlen = sys_getdents64(dirfd, hdirp, count);
8865 #endif
8866 
8867     hlen = get_errno(hlen);
8868     if (is_error(hlen)) {
8869         return hlen;
8870     }
8871 
8872     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8873     if (!tdirp) {
8874         return -TARGET_EFAULT;
8875     }
8876 
8877     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8878 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8879         struct linux_dirent *hde = hdirp + hoff;
8880 #else
8881         struct linux_dirent64 *hde = hdirp + hoff;
8882 #endif
8883         struct target_dirent *tde = tdirp + toff;
8884         int namelen;
8885         uint8_t type;
8886 
8887         namelen = strlen(hde->d_name);
8888         hreclen = hde->d_reclen;
8889         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8890         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8891 
8892         if (toff + treclen > count) {
8893             /*
8894              * If the host struct is smaller than the target struct, or
8895              * requires less alignment and thus packs into less space,
8896              * then the host can return more entries than we can pass
8897              * on to the guest.
8898              */
8899             if (toff == 0) {
8900                 toff = -TARGET_EINVAL; /* result buffer is too small */
8901                 break;
8902             }
8903             /*
8904              * Return what we have, resetting the file pointer to the
8905              * location of the first record not returned.
8906              */
8907             lseek64(dirfd, prev_diroff, SEEK_SET);
8908             break;
8909         }
8910 
8911         prev_diroff = hde->d_off;
8912         tde->d_ino = tswapal(hde->d_ino);
8913         tde->d_off = tswapal(hde->d_off);
8914         tde->d_reclen = tswap16(treclen);
8915         memcpy(tde->d_name, hde->d_name, namelen + 1);
8916 
8917         /*
8918          * The getdents type is in what was formerly a padding byte at the
8919          * end of the structure.
8920          */
8921 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8922         type = *((uint8_t *)hde + hreclen - 1);
8923 #else
8924         type = hde->d_type;
8925 #endif
8926         *((uint8_t *)tde + treclen - 1) = type;
8927     }
8928 
8929     unlock_user(tdirp, arg2, toff);
8930     return toff;
8931 }
8932 #endif /* TARGET_NR_getdents */
8933 
8934 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8935 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8936 {
8937     g_autofree void *hdirp = NULL;
8938     void *tdirp;
8939     int hlen, hoff, toff;
8940     int hreclen, treclen;
8941     off64_t prev_diroff = 0;
8942 
8943     hdirp = g_try_malloc(count);
8944     if (!hdirp) {
8945         return -TARGET_ENOMEM;
8946     }
8947 
8948     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8949     if (is_error(hlen)) {
8950         return hlen;
8951     }
8952 
8953     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8954     if (!tdirp) {
8955         return -TARGET_EFAULT;
8956     }
8957 
8958     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8959         struct linux_dirent64 *hde = hdirp + hoff;
8960         struct target_dirent64 *tde = tdirp + toff;
8961         int namelen;
8962 
8963         namelen = strlen(hde->d_name) + 1;
8964         hreclen = hde->d_reclen;
8965         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8966         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8967 
8968         if (toff + treclen > count) {
8969             /*
8970              * If the host struct is smaller than the target struct, or
8971              * requires less alignment and thus packs into less space,
8972              * then the host can return more entries than we can pass
8973              * on to the guest.
8974              */
8975             if (toff == 0) {
8976                 toff = -TARGET_EINVAL; /* result buffer is too small */
8977                 break;
8978             }
8979             /*
8980              * Return what we have, resetting the file pointer to the
8981              * location of the first record not returned.
8982              */
8983             lseek64(dirfd, prev_diroff, SEEK_SET);
8984             break;
8985         }
8986 
8987         prev_diroff = hde->d_off;
8988         tde->d_ino = tswap64(hde->d_ino);
8989         tde->d_off = tswap64(hde->d_off);
8990         tde->d_reclen = tswap16(treclen);
8991         tde->d_type = hde->d_type;
8992         memcpy(tde->d_name, hde->d_name, namelen);
8993     }
8994 
8995     unlock_user(tdirp, arg2, toff);
8996     return toff;
8997 }
8998 #endif /* TARGET_NR_getdents64 */
8999 
9000 #if defined(TARGET_NR_riscv_hwprobe)
9001 
9002 #define RISCV_HWPROBE_KEY_MVENDORID     0
9003 #define RISCV_HWPROBE_KEY_MARCHID       1
9004 #define RISCV_HWPROBE_KEY_MIMPID        2
9005 
9006 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
9007 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
9008 
9009 #define RISCV_HWPROBE_KEY_IMA_EXT_0     4
9010 #define     RISCV_HWPROBE_IMA_FD       (1 << 0)
9011 #define     RISCV_HWPROBE_IMA_C        (1 << 1)
9012 
9013 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
9014 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
9015 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
9016 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
9017 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
9018 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9019 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9020 
9021 struct riscv_hwprobe {
9022     abi_llong  key;
9023     abi_ullong value;
9024 };
9025 
9026 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9027                                     struct riscv_hwprobe *pair,
9028                                     size_t pair_count)
9029 {
9030     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9031 
9032     for (; pair_count > 0; pair_count--, pair++) {
9033         abi_llong key;
9034         abi_ullong value;
9035         __put_user(0, &pair->value);
9036         __get_user(key, &pair->key);
9037         switch (key) {
9038         case RISCV_HWPROBE_KEY_MVENDORID:
9039             __put_user(cfg->mvendorid, &pair->value);
9040             break;
9041         case RISCV_HWPROBE_KEY_MARCHID:
9042             __put_user(cfg->marchid, &pair->value);
9043             break;
9044         case RISCV_HWPROBE_KEY_MIMPID:
9045             __put_user(cfg->mimpid, &pair->value);
9046             break;
9047         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9048             value = riscv_has_ext(env, RVI) &&
9049                     riscv_has_ext(env, RVM) &&
9050                     riscv_has_ext(env, RVA) ?
9051                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9052             __put_user(value, &pair->value);
9053             break;
9054         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9055             value = riscv_has_ext(env, RVF) &&
9056                     riscv_has_ext(env, RVD) ?
9057                     RISCV_HWPROBE_IMA_FD : 0;
9058             value |= riscv_has_ext(env, RVC) ?
9059                      RISCV_HWPROBE_IMA_C : pair->value;
9060             __put_user(value, &pair->value);
9061             break;
9062         case RISCV_HWPROBE_KEY_CPUPERF_0:
9063             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9064             break;
9065         default:
9066             __put_user(-1, &pair->key);
9067             break;
9068         }
9069     }
9070 }
9071 
9072 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9073 {
9074     int ret, i, tmp;
9075     size_t host_mask_size, target_mask_size;
9076     unsigned long *host_mask;
9077 
9078     /*
9079      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9080      * arg3 contains the cpu count.
9081      */
9082     tmp = (8 * sizeof(abi_ulong));
9083     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9084     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9085                      ~(sizeof(*host_mask) - 1);
9086 
9087     host_mask = alloca(host_mask_size);
9088 
9089     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9090                                   arg4, target_mask_size);
9091     if (ret != 0) {
9092         return ret;
9093     }
9094 
9095     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9096         if (host_mask[i] != 0) {
9097             return 0;
9098         }
9099     }
9100     return -TARGET_EINVAL;
9101 }
9102 
9103 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9104                                  abi_long arg2, abi_long arg3,
9105                                  abi_long arg4, abi_long arg5)
9106 {
9107     int ret;
9108     struct riscv_hwprobe *host_pairs;
9109 
9110     /* flags must be 0 */
9111     if (arg5 != 0) {
9112         return -TARGET_EINVAL;
9113     }
9114 
9115     /* check cpu_set */
9116     if (arg3 != 0) {
9117         ret = cpu_set_valid(arg3, arg4);
9118         if (ret != 0) {
9119             return ret;
9120         }
9121     } else if (arg4 != 0) {
9122         return -TARGET_EINVAL;
9123     }
9124 
9125     /* no pairs */
9126     if (arg2 == 0) {
9127         return 0;
9128     }
9129 
9130     host_pairs = lock_user(VERIFY_WRITE, arg1,
9131                            sizeof(*host_pairs) * (size_t)arg2, 0);
9132     if (host_pairs == NULL) {
9133         return -TARGET_EFAULT;
9134     }
9135     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9136     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9137     return 0;
9138 }
9139 #endif /* TARGET_NR_riscv_hwprobe */
9140 
9141 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9142 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9143 #endif
9144 
9145 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9146 #define __NR_sys_open_tree __NR_open_tree
9147 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9148           unsigned int, __flags)
9149 #endif
9150 
9151 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9152 #define __NR_sys_move_mount __NR_move_mount
9153 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9154            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9155 #endif
9156 
9157 /* This is an internal helper for do_syscall so that it is easier
9158  * to have a single return point, so that actions, such as logging
9159  * of syscall results, can be performed.
9160  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9161  */
9162 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9163                             abi_long arg2, abi_long arg3, abi_long arg4,
9164                             abi_long arg5, abi_long arg6, abi_long arg7,
9165                             abi_long arg8)
9166 {
9167     CPUState *cpu = env_cpu(cpu_env);
9168     abi_long ret;
9169 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9170     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9171     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9172     || defined(TARGET_NR_statx)
9173     struct stat st;
9174 #endif
9175 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9176     || defined(TARGET_NR_fstatfs)
9177     struct statfs stfs;
9178 #endif
9179     void *p;
9180 
9181     switch(num) {
9182     case TARGET_NR_exit:
9183         /* In old applications this may be used to implement _exit(2).
9184            However in threaded applications it is used for thread termination,
9185            and _exit_group is used for application termination.
9186            Do thread termination if we have more then one thread.  */
9187 
9188         if (block_signals()) {
9189             return -QEMU_ERESTARTSYS;
9190         }
9191 
9192         pthread_mutex_lock(&clone_lock);
9193 
9194         if (CPU_NEXT(first_cpu)) {
9195             TaskState *ts = cpu->opaque;
9196 
9197             if (ts->child_tidptr) {
9198                 put_user_u32(0, ts->child_tidptr);
9199                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9200                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9201             }
9202 
9203             object_unparent(OBJECT(cpu));
9204             object_unref(OBJECT(cpu));
9205             /*
9206              * At this point the CPU should be unrealized and removed
9207              * from cpu lists. We can clean-up the rest of the thread
9208              * data without the lock held.
9209              */
9210 
9211             pthread_mutex_unlock(&clone_lock);
9212 
9213             thread_cpu = NULL;
9214             g_free(ts);
9215             rcu_unregister_thread();
9216             pthread_exit(NULL);
9217         }
9218 
9219         pthread_mutex_unlock(&clone_lock);
9220         preexit_cleanup(cpu_env, arg1);
9221         _exit(arg1);
9222         return 0; /* avoid warning */
9223     case TARGET_NR_read:
9224         if (arg2 == 0 && arg3 == 0) {
9225             return get_errno(safe_read(arg1, 0, 0));
9226         } else {
9227             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9228                 return -TARGET_EFAULT;
9229             ret = get_errno(safe_read(arg1, p, arg3));
9230             if (ret >= 0 &&
9231                 fd_trans_host_to_target_data(arg1)) {
9232                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9233             }
9234             unlock_user(p, arg2, ret);
9235         }
9236         return ret;
9237     case TARGET_NR_write:
9238         if (arg2 == 0 && arg3 == 0) {
9239             return get_errno(safe_write(arg1, 0, 0));
9240         }
9241         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9242             return -TARGET_EFAULT;
9243         if (fd_trans_target_to_host_data(arg1)) {
9244             void *copy = g_malloc(arg3);
9245             memcpy(copy, p, arg3);
9246             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9247             if (ret >= 0) {
9248                 ret = get_errno(safe_write(arg1, copy, ret));
9249             }
9250             g_free(copy);
9251         } else {
9252             ret = get_errno(safe_write(arg1, p, arg3));
9253         }
9254         unlock_user(p, arg2, 0);
9255         return ret;
9256 
9257 #ifdef TARGET_NR_open
9258     case TARGET_NR_open:
9259         if (!(p = lock_user_string(arg1)))
9260             return -TARGET_EFAULT;
9261         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9262                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9263                                   arg3, true));
9264         fd_trans_unregister(ret);
9265         unlock_user(p, arg1, 0);
9266         return ret;
9267 #endif
9268     case TARGET_NR_openat:
9269         if (!(p = lock_user_string(arg2)))
9270             return -TARGET_EFAULT;
9271         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9272                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9273                                   arg4, true));
9274         fd_trans_unregister(ret);
9275         unlock_user(p, arg2, 0);
9276         return ret;
9277 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9278     case TARGET_NR_name_to_handle_at:
9279         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9280         return ret;
9281 #endif
9282 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9283     case TARGET_NR_open_by_handle_at:
9284         ret = do_open_by_handle_at(arg1, arg2, arg3);
9285         fd_trans_unregister(ret);
9286         return ret;
9287 #endif
9288 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9289     case TARGET_NR_pidfd_open:
9290         return get_errno(pidfd_open(arg1, arg2));
9291 #endif
9292 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9293     case TARGET_NR_pidfd_send_signal:
9294         {
9295             siginfo_t uinfo, *puinfo;
9296 
9297             if (arg3) {
9298                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9299                 if (!p) {
9300                     return -TARGET_EFAULT;
9301                  }
9302                  target_to_host_siginfo(&uinfo, p);
9303                  unlock_user(p, arg3, 0);
9304                  puinfo = &uinfo;
9305             } else {
9306                  puinfo = NULL;
9307             }
9308             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9309                                               puinfo, arg4));
9310         }
9311         return ret;
9312 #endif
9313 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9314     case TARGET_NR_pidfd_getfd:
9315         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9316 #endif
9317     case TARGET_NR_close:
9318         fd_trans_unregister(arg1);
9319         return get_errno(close(arg1));
9320 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9321     case TARGET_NR_close_range:
9322         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9323         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9324             abi_long fd, maxfd;
9325             maxfd = MIN(arg2, target_fd_max);
9326             for (fd = arg1; fd < maxfd; fd++) {
9327                 fd_trans_unregister(fd);
9328             }
9329         }
9330         return ret;
9331 #endif
9332 
9333     case TARGET_NR_brk:
9334         return do_brk(arg1);
9335 #ifdef TARGET_NR_fork
9336     case TARGET_NR_fork:
9337         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9338 #endif
9339 #ifdef TARGET_NR_waitpid
9340     case TARGET_NR_waitpid:
9341         {
9342             int status;
9343             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9344             if (!is_error(ret) && arg2 && ret
9345                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9346                 return -TARGET_EFAULT;
9347         }
9348         return ret;
9349 #endif
9350 #ifdef TARGET_NR_waitid
9351     case TARGET_NR_waitid:
9352         {
9353             siginfo_t info;
9354             info.si_pid = 0;
9355             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9356             if (!is_error(ret) && arg3 && info.si_pid != 0) {
9357                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9358                     return -TARGET_EFAULT;
9359                 host_to_target_siginfo(p, &info);
9360                 unlock_user(p, arg3, sizeof(target_siginfo_t));
9361             }
9362         }
9363         return ret;
9364 #endif
9365 #ifdef TARGET_NR_creat /* not on alpha */
9366     case TARGET_NR_creat:
9367         if (!(p = lock_user_string(arg1)))
9368             return -TARGET_EFAULT;
9369         ret = get_errno(creat(p, arg2));
9370         fd_trans_unregister(ret);
9371         unlock_user(p, arg1, 0);
9372         return ret;
9373 #endif
9374 #ifdef TARGET_NR_link
9375     case TARGET_NR_link:
9376         {
9377             void * p2;
9378             p = lock_user_string(arg1);
9379             p2 = lock_user_string(arg2);
9380             if (!p || !p2)
9381                 ret = -TARGET_EFAULT;
9382             else
9383                 ret = get_errno(link(p, p2));
9384             unlock_user(p2, arg2, 0);
9385             unlock_user(p, arg1, 0);
9386         }
9387         return ret;
9388 #endif
9389 #if defined(TARGET_NR_linkat)
9390     case TARGET_NR_linkat:
9391         {
9392             void * p2 = NULL;
9393             if (!arg2 || !arg4)
9394                 return -TARGET_EFAULT;
9395             p  = lock_user_string(arg2);
9396             p2 = lock_user_string(arg4);
9397             if (!p || !p2)
9398                 ret = -TARGET_EFAULT;
9399             else
9400                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9401             unlock_user(p, arg2, 0);
9402             unlock_user(p2, arg4, 0);
9403         }
9404         return ret;
9405 #endif
9406 #ifdef TARGET_NR_unlink
9407     case TARGET_NR_unlink:
9408         if (!(p = lock_user_string(arg1)))
9409             return -TARGET_EFAULT;
9410         ret = get_errno(unlink(p));
9411         unlock_user(p, arg1, 0);
9412         return ret;
9413 #endif
9414 #if defined(TARGET_NR_unlinkat)
9415     case TARGET_NR_unlinkat:
9416         if (!(p = lock_user_string(arg2)))
9417             return -TARGET_EFAULT;
9418         ret = get_errno(unlinkat(arg1, p, arg3));
9419         unlock_user(p, arg2, 0);
9420         return ret;
9421 #endif
9422     case TARGET_NR_execveat:
9423         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9424     case TARGET_NR_execve:
9425         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9426     case TARGET_NR_chdir:
9427         if (!(p = lock_user_string(arg1)))
9428             return -TARGET_EFAULT;
9429         ret = get_errno(chdir(p));
9430         unlock_user(p, arg1, 0);
9431         return ret;
9432 #ifdef TARGET_NR_time
9433     case TARGET_NR_time:
9434         {
9435             time_t host_time;
9436             ret = get_errno(time(&host_time));
9437             if (!is_error(ret)
9438                 && arg1
9439                 && put_user_sal(host_time, arg1))
9440                 return -TARGET_EFAULT;
9441         }
9442         return ret;
9443 #endif
9444 #ifdef TARGET_NR_mknod
9445     case TARGET_NR_mknod:
9446         if (!(p = lock_user_string(arg1)))
9447             return -TARGET_EFAULT;
9448         ret = get_errno(mknod(p, arg2, arg3));
9449         unlock_user(p, arg1, 0);
9450         return ret;
9451 #endif
9452 #if defined(TARGET_NR_mknodat)
9453     case TARGET_NR_mknodat:
9454         if (!(p = lock_user_string(arg2)))
9455             return -TARGET_EFAULT;
9456         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9457         unlock_user(p, arg2, 0);
9458         return ret;
9459 #endif
9460 #ifdef TARGET_NR_chmod
9461     case TARGET_NR_chmod:
9462         if (!(p = lock_user_string(arg1)))
9463             return -TARGET_EFAULT;
9464         ret = get_errno(chmod(p, arg2));
9465         unlock_user(p, arg1, 0);
9466         return ret;
9467 #endif
9468 #ifdef TARGET_NR_lseek
9469     case TARGET_NR_lseek:
9470         return get_errno(lseek(arg1, arg2, arg3));
9471 #endif
9472 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9473     /* Alpha specific */
9474     case TARGET_NR_getxpid:
9475         cpu_env->ir[IR_A4] = getppid();
9476         return get_errno(getpid());
9477 #endif
9478 #ifdef TARGET_NR_getpid
9479     case TARGET_NR_getpid:
9480         return get_errno(getpid());
9481 #endif
9482     case TARGET_NR_mount:
9483         {
9484             /* need to look at the data field */
9485             void *p2, *p3;
9486 
9487             if (arg1) {
9488                 p = lock_user_string(arg1);
9489                 if (!p) {
9490                     return -TARGET_EFAULT;
9491                 }
9492             } else {
9493                 p = NULL;
9494             }
9495 
9496             p2 = lock_user_string(arg2);
9497             if (!p2) {
9498                 if (arg1) {
9499                     unlock_user(p, arg1, 0);
9500                 }
9501                 return -TARGET_EFAULT;
9502             }
9503 
9504             if (arg3) {
9505                 p3 = lock_user_string(arg3);
9506                 if (!p3) {
9507                     if (arg1) {
9508                         unlock_user(p, arg1, 0);
9509                     }
9510                     unlock_user(p2, arg2, 0);
9511                     return -TARGET_EFAULT;
9512                 }
9513             } else {
9514                 p3 = NULL;
9515             }
9516 
9517             /* FIXME - arg5 should be locked, but it isn't clear how to
9518              * do that since it's not guaranteed to be a NULL-terminated
9519              * string.
9520              */
9521             if (!arg5) {
9522                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9523             } else {
9524                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9525             }
9526             ret = get_errno(ret);
9527 
9528             if (arg1) {
9529                 unlock_user(p, arg1, 0);
9530             }
9531             unlock_user(p2, arg2, 0);
9532             if (arg3) {
9533                 unlock_user(p3, arg3, 0);
9534             }
9535         }
9536         return ret;
9537 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9538 #if defined(TARGET_NR_umount)
9539     case TARGET_NR_umount:
9540 #endif
9541 #if defined(TARGET_NR_oldumount)
9542     case TARGET_NR_oldumount:
9543 #endif
9544         if (!(p = lock_user_string(arg1)))
9545             return -TARGET_EFAULT;
9546         ret = get_errno(umount(p));
9547         unlock_user(p, arg1, 0);
9548         return ret;
9549 #endif
9550 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9551     case TARGET_NR_move_mount:
9552         {
9553             void *p2, *p4;
9554 
9555             if (!arg2 || !arg4) {
9556                 return -TARGET_EFAULT;
9557             }
9558 
9559             p2 = lock_user_string(arg2);
9560             if (!p2) {
9561                 return -TARGET_EFAULT;
9562             }
9563 
9564             p4 = lock_user_string(arg4);
9565             if (!p4) {
9566                 unlock_user(p2, arg2, 0);
9567                 return -TARGET_EFAULT;
9568             }
9569             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9570 
9571             unlock_user(p2, arg2, 0);
9572             unlock_user(p4, arg4, 0);
9573 
9574             return ret;
9575         }
9576 #endif
9577 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9578     case TARGET_NR_open_tree:
9579         {
9580             void *p2;
9581             int host_flags;
9582 
9583             if (!arg2) {
9584                 return -TARGET_EFAULT;
9585             }
9586 
9587             p2 = lock_user_string(arg2);
9588             if (!p2) {
9589                 return -TARGET_EFAULT;
9590             }
9591 
9592             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9593             if (arg3 & TARGET_O_CLOEXEC) {
9594                 host_flags |= O_CLOEXEC;
9595             }
9596 
9597             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9598 
9599             unlock_user(p2, arg2, 0);
9600 
9601             return ret;
9602         }
9603 #endif
9604 #ifdef TARGET_NR_stime /* not on alpha */
9605     case TARGET_NR_stime:
9606         {
9607             struct timespec ts;
9608             ts.tv_nsec = 0;
9609             if (get_user_sal(ts.tv_sec, arg1)) {
9610                 return -TARGET_EFAULT;
9611             }
9612             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9613         }
9614 #endif
9615 #ifdef TARGET_NR_alarm /* not on alpha */
9616     case TARGET_NR_alarm:
9617         return alarm(arg1);
9618 #endif
9619 #ifdef TARGET_NR_pause /* not on alpha */
9620     case TARGET_NR_pause:
9621         if (!block_signals()) {
9622             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9623         }
9624         return -TARGET_EINTR;
9625 #endif
9626 #ifdef TARGET_NR_utime
9627     case TARGET_NR_utime:
9628         {
9629             struct utimbuf tbuf, *host_tbuf;
9630             struct target_utimbuf *target_tbuf;
9631             if (arg2) {
9632                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9633                     return -TARGET_EFAULT;
9634                 tbuf.actime = tswapal(target_tbuf->actime);
9635                 tbuf.modtime = tswapal(target_tbuf->modtime);
9636                 unlock_user_struct(target_tbuf, arg2, 0);
9637                 host_tbuf = &tbuf;
9638             } else {
9639                 host_tbuf = NULL;
9640             }
9641             if (!(p = lock_user_string(arg1)))
9642                 return -TARGET_EFAULT;
9643             ret = get_errno(utime(p, host_tbuf));
9644             unlock_user(p, arg1, 0);
9645         }
9646         return ret;
9647 #endif
9648 #ifdef TARGET_NR_utimes
9649     case TARGET_NR_utimes:
9650         {
9651             struct timeval *tvp, tv[2];
9652             if (arg2) {
9653                 if (copy_from_user_timeval(&tv[0], arg2)
9654                     || copy_from_user_timeval(&tv[1],
9655                                               arg2 + sizeof(struct target_timeval)))
9656                     return -TARGET_EFAULT;
9657                 tvp = tv;
9658             } else {
9659                 tvp = NULL;
9660             }
9661             if (!(p = lock_user_string(arg1)))
9662                 return -TARGET_EFAULT;
9663             ret = get_errno(utimes(p, tvp));
9664             unlock_user(p, arg1, 0);
9665         }
9666         return ret;
9667 #endif
9668 #if defined(TARGET_NR_futimesat)
9669     case TARGET_NR_futimesat:
9670         {
9671             struct timeval *tvp, tv[2];
9672             if (arg3) {
9673                 if (copy_from_user_timeval(&tv[0], arg3)
9674                     || copy_from_user_timeval(&tv[1],
9675                                               arg3 + sizeof(struct target_timeval)))
9676                     return -TARGET_EFAULT;
9677                 tvp = tv;
9678             } else {
9679                 tvp = NULL;
9680             }
9681             if (!(p = lock_user_string(arg2))) {
9682                 return -TARGET_EFAULT;
9683             }
9684             ret = get_errno(futimesat(arg1, path(p), tvp));
9685             unlock_user(p, arg2, 0);
9686         }
9687         return ret;
9688 #endif
9689 #ifdef TARGET_NR_access
9690     case TARGET_NR_access:
9691         if (!(p = lock_user_string(arg1))) {
9692             return -TARGET_EFAULT;
9693         }
9694         ret = get_errno(access(path(p), arg2));
9695         unlock_user(p, arg1, 0);
9696         return ret;
9697 #endif
9698 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9699     case TARGET_NR_faccessat:
9700         if (!(p = lock_user_string(arg2))) {
9701             return -TARGET_EFAULT;
9702         }
9703         ret = get_errno(faccessat(arg1, p, arg3, 0));
9704         unlock_user(p, arg2, 0);
9705         return ret;
9706 #endif
9707 #if defined(TARGET_NR_faccessat2)
9708     case TARGET_NR_faccessat2:
9709         if (!(p = lock_user_string(arg2))) {
9710             return -TARGET_EFAULT;
9711         }
9712         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9713         unlock_user(p, arg2, 0);
9714         return ret;
9715 #endif
9716 #ifdef TARGET_NR_nice /* not on alpha */
9717     case TARGET_NR_nice:
9718         return get_errno(nice(arg1));
9719 #endif
9720     case TARGET_NR_sync:
9721         sync();
9722         return 0;
9723 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9724     case TARGET_NR_syncfs:
9725         return get_errno(syncfs(arg1));
9726 #endif
9727     case TARGET_NR_kill:
9728         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9729 #ifdef TARGET_NR_rename
9730     case TARGET_NR_rename:
9731         {
9732             void *p2;
9733             p = lock_user_string(arg1);
9734             p2 = lock_user_string(arg2);
9735             if (!p || !p2)
9736                 ret = -TARGET_EFAULT;
9737             else
9738                 ret = get_errno(rename(p, p2));
9739             unlock_user(p2, arg2, 0);
9740             unlock_user(p, arg1, 0);
9741         }
9742         return ret;
9743 #endif
9744 #if defined(TARGET_NR_renameat)
9745     case TARGET_NR_renameat:
9746         {
9747             void *p2;
9748             p  = lock_user_string(arg2);
9749             p2 = lock_user_string(arg4);
9750             if (!p || !p2)
9751                 ret = -TARGET_EFAULT;
9752             else
9753                 ret = get_errno(renameat(arg1, p, arg3, p2));
9754             unlock_user(p2, arg4, 0);
9755             unlock_user(p, arg2, 0);
9756         }
9757         return ret;
9758 #endif
9759 #if defined(TARGET_NR_renameat2)
9760     case TARGET_NR_renameat2:
9761         {
9762             void *p2;
9763             p  = lock_user_string(arg2);
9764             p2 = lock_user_string(arg4);
9765             if (!p || !p2) {
9766                 ret = -TARGET_EFAULT;
9767             } else {
9768                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9769             }
9770             unlock_user(p2, arg4, 0);
9771             unlock_user(p, arg2, 0);
9772         }
9773         return ret;
9774 #endif
9775 #ifdef TARGET_NR_mkdir
9776     case TARGET_NR_mkdir:
9777         if (!(p = lock_user_string(arg1)))
9778             return -TARGET_EFAULT;
9779         ret = get_errno(mkdir(p, arg2));
9780         unlock_user(p, arg1, 0);
9781         return ret;
9782 #endif
9783 #if defined(TARGET_NR_mkdirat)
9784     case TARGET_NR_mkdirat:
9785         if (!(p = lock_user_string(arg2)))
9786             return -TARGET_EFAULT;
9787         ret = get_errno(mkdirat(arg1, p, arg3));
9788         unlock_user(p, arg2, 0);
9789         return ret;
9790 #endif
9791 #ifdef TARGET_NR_rmdir
9792     case TARGET_NR_rmdir:
9793         if (!(p = lock_user_string(arg1)))
9794             return -TARGET_EFAULT;
9795         ret = get_errno(rmdir(p));
9796         unlock_user(p, arg1, 0);
9797         return ret;
9798 #endif
9799     case TARGET_NR_dup:
9800         ret = get_errno(dup(arg1));
9801         if (ret >= 0) {
9802             fd_trans_dup(arg1, ret);
9803         }
9804         return ret;
9805 #ifdef TARGET_NR_pipe
9806     case TARGET_NR_pipe:
9807         return do_pipe(cpu_env, arg1, 0, 0);
9808 #endif
9809 #ifdef TARGET_NR_pipe2
9810     case TARGET_NR_pipe2:
9811         return do_pipe(cpu_env, arg1,
9812                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9813 #endif
9814     case TARGET_NR_times:
9815         {
9816             struct target_tms *tmsp;
9817             struct tms tms;
9818             ret = get_errno(times(&tms));
9819             if (arg1) {
9820                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9821                 if (!tmsp)
9822                     return -TARGET_EFAULT;
9823                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9824                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9825                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9826                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9827             }
9828             if (!is_error(ret))
9829                 ret = host_to_target_clock_t(ret);
9830         }
9831         return ret;
9832     case TARGET_NR_acct:
9833         if (arg1 == 0) {
9834             ret = get_errno(acct(NULL));
9835         } else {
9836             if (!(p = lock_user_string(arg1))) {
9837                 return -TARGET_EFAULT;
9838             }
9839             ret = get_errno(acct(path(p)));
9840             unlock_user(p, arg1, 0);
9841         }
9842         return ret;
9843 #ifdef TARGET_NR_umount2
9844     case TARGET_NR_umount2:
9845         if (!(p = lock_user_string(arg1)))
9846             return -TARGET_EFAULT;
9847         ret = get_errno(umount2(p, arg2));
9848         unlock_user(p, arg1, 0);
9849         return ret;
9850 #endif
9851     case TARGET_NR_ioctl:
9852         return do_ioctl(arg1, arg2, arg3);
9853 #ifdef TARGET_NR_fcntl
9854     case TARGET_NR_fcntl:
9855         return do_fcntl(arg1, arg2, arg3);
9856 #endif
9857     case TARGET_NR_setpgid:
9858         return get_errno(setpgid(arg1, arg2));
9859     case TARGET_NR_umask:
9860         return get_errno(umask(arg1));
9861     case TARGET_NR_chroot:
9862         if (!(p = lock_user_string(arg1)))
9863             return -TARGET_EFAULT;
9864         ret = get_errno(chroot(p));
9865         unlock_user(p, arg1, 0);
9866         return ret;
9867 #ifdef TARGET_NR_dup2
9868     case TARGET_NR_dup2:
9869         ret = get_errno(dup2(arg1, arg2));
9870         if (ret >= 0) {
9871             fd_trans_dup(arg1, arg2);
9872         }
9873         return ret;
9874 #endif
9875 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9876     case TARGET_NR_dup3:
9877     {
9878         int host_flags;
9879 
9880         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9881             return -EINVAL;
9882         }
9883         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9884         ret = get_errno(dup3(arg1, arg2, host_flags));
9885         if (ret >= 0) {
9886             fd_trans_dup(arg1, arg2);
9887         }
9888         return ret;
9889     }
9890 #endif
9891 #ifdef TARGET_NR_getppid /* not on alpha */
9892     case TARGET_NR_getppid:
9893         return get_errno(getppid());
9894 #endif
9895 #ifdef TARGET_NR_getpgrp
9896     case TARGET_NR_getpgrp:
9897         return get_errno(getpgrp());
9898 #endif
9899     case TARGET_NR_setsid:
9900         return get_errno(setsid());
9901 #ifdef TARGET_NR_sigaction
9902     case TARGET_NR_sigaction:
9903         {
9904 #if defined(TARGET_MIPS)
9905 	    struct target_sigaction act, oact, *pact, *old_act;
9906 
9907 	    if (arg2) {
9908                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9909                     return -TARGET_EFAULT;
9910 		act._sa_handler = old_act->_sa_handler;
9911 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9912 		act.sa_flags = old_act->sa_flags;
9913 		unlock_user_struct(old_act, arg2, 0);
9914 		pact = &act;
9915 	    } else {
9916 		pact = NULL;
9917 	    }
9918 
9919         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9920 
9921 	    if (!is_error(ret) && arg3) {
9922                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9923                     return -TARGET_EFAULT;
9924 		old_act->_sa_handler = oact._sa_handler;
9925 		old_act->sa_flags = oact.sa_flags;
9926 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9927 		old_act->sa_mask.sig[1] = 0;
9928 		old_act->sa_mask.sig[2] = 0;
9929 		old_act->sa_mask.sig[3] = 0;
9930 		unlock_user_struct(old_act, arg3, 1);
9931 	    }
9932 #else
9933             struct target_old_sigaction *old_act;
9934             struct target_sigaction act, oact, *pact;
9935             if (arg2) {
9936                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9937                     return -TARGET_EFAULT;
9938                 act._sa_handler = old_act->_sa_handler;
9939                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9940                 act.sa_flags = old_act->sa_flags;
9941 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9942                 act.sa_restorer = old_act->sa_restorer;
9943 #endif
9944                 unlock_user_struct(old_act, arg2, 0);
9945                 pact = &act;
9946             } else {
9947                 pact = NULL;
9948             }
9949             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9950             if (!is_error(ret) && arg3) {
9951                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9952                     return -TARGET_EFAULT;
9953                 old_act->_sa_handler = oact._sa_handler;
9954                 old_act->sa_mask = oact.sa_mask.sig[0];
9955                 old_act->sa_flags = oact.sa_flags;
9956 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9957                 old_act->sa_restorer = oact.sa_restorer;
9958 #endif
9959                 unlock_user_struct(old_act, arg3, 1);
9960             }
9961 #endif
9962         }
9963         return ret;
9964 #endif
9965     case TARGET_NR_rt_sigaction:
9966         {
9967             /*
9968              * For Alpha and SPARC this is a 5 argument syscall, with
9969              * a 'restorer' parameter which must be copied into the
9970              * sa_restorer field of the sigaction struct.
9971              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9972              * and arg5 is the sigsetsize.
9973              */
9974 #if defined(TARGET_ALPHA)
9975             target_ulong sigsetsize = arg4;
9976             target_ulong restorer = arg5;
9977 #elif defined(TARGET_SPARC)
9978             target_ulong restorer = arg4;
9979             target_ulong sigsetsize = arg5;
9980 #else
9981             target_ulong sigsetsize = arg4;
9982             target_ulong restorer = 0;
9983 #endif
9984             struct target_sigaction *act = NULL;
9985             struct target_sigaction *oact = NULL;
9986 
9987             if (sigsetsize != sizeof(target_sigset_t)) {
9988                 return -TARGET_EINVAL;
9989             }
9990             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9991                 return -TARGET_EFAULT;
9992             }
9993             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9994                 ret = -TARGET_EFAULT;
9995             } else {
9996                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9997                 if (oact) {
9998                     unlock_user_struct(oact, arg3, 1);
9999                 }
10000             }
10001             if (act) {
10002                 unlock_user_struct(act, arg2, 0);
10003             }
10004         }
10005         return ret;
10006 #ifdef TARGET_NR_sgetmask /* not on alpha */
10007     case TARGET_NR_sgetmask:
10008         {
10009             sigset_t cur_set;
10010             abi_ulong target_set;
10011             ret = do_sigprocmask(0, NULL, &cur_set);
10012             if (!ret) {
10013                 host_to_target_old_sigset(&target_set, &cur_set);
10014                 ret = target_set;
10015             }
10016         }
10017         return ret;
10018 #endif
10019 #ifdef TARGET_NR_ssetmask /* not on alpha */
10020     case TARGET_NR_ssetmask:
10021         {
10022             sigset_t set, oset;
10023             abi_ulong target_set = arg1;
10024             target_to_host_old_sigset(&set, &target_set);
10025             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10026             if (!ret) {
10027                 host_to_target_old_sigset(&target_set, &oset);
10028                 ret = target_set;
10029             }
10030         }
10031         return ret;
10032 #endif
10033 #ifdef TARGET_NR_sigprocmask
10034     case TARGET_NR_sigprocmask:
10035         {
10036 #if defined(TARGET_ALPHA)
10037             sigset_t set, oldset;
10038             abi_ulong mask;
10039             int how;
10040 
10041             switch (arg1) {
10042             case TARGET_SIG_BLOCK:
10043                 how = SIG_BLOCK;
10044                 break;
10045             case TARGET_SIG_UNBLOCK:
10046                 how = SIG_UNBLOCK;
10047                 break;
10048             case TARGET_SIG_SETMASK:
10049                 how = SIG_SETMASK;
10050                 break;
10051             default:
10052                 return -TARGET_EINVAL;
10053             }
10054             mask = arg2;
10055             target_to_host_old_sigset(&set, &mask);
10056 
10057             ret = do_sigprocmask(how, &set, &oldset);
10058             if (!is_error(ret)) {
10059                 host_to_target_old_sigset(&mask, &oldset);
10060                 ret = mask;
10061                 cpu_env->ir[IR_V0] = 0; /* force no error */
10062             }
10063 #else
10064             sigset_t set, oldset, *set_ptr;
10065             int how;
10066 
10067             if (arg2) {
10068                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10069                 if (!p) {
10070                     return -TARGET_EFAULT;
10071                 }
10072                 target_to_host_old_sigset(&set, p);
10073                 unlock_user(p, arg2, 0);
10074                 set_ptr = &set;
10075                 switch (arg1) {
10076                 case TARGET_SIG_BLOCK:
10077                     how = SIG_BLOCK;
10078                     break;
10079                 case TARGET_SIG_UNBLOCK:
10080                     how = SIG_UNBLOCK;
10081                     break;
10082                 case TARGET_SIG_SETMASK:
10083                     how = SIG_SETMASK;
10084                     break;
10085                 default:
10086                     return -TARGET_EINVAL;
10087                 }
10088             } else {
10089                 how = 0;
10090                 set_ptr = NULL;
10091             }
10092             ret = do_sigprocmask(how, set_ptr, &oldset);
10093             if (!is_error(ret) && arg3) {
10094                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10095                     return -TARGET_EFAULT;
10096                 host_to_target_old_sigset(p, &oldset);
10097                 unlock_user(p, arg3, sizeof(target_sigset_t));
10098             }
10099 #endif
10100         }
10101         return ret;
10102 #endif
10103     case TARGET_NR_rt_sigprocmask:
10104         {
10105             int how = arg1;
10106             sigset_t set, oldset, *set_ptr;
10107 
10108             if (arg4 != sizeof(target_sigset_t)) {
10109                 return -TARGET_EINVAL;
10110             }
10111 
10112             if (arg2) {
10113                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10114                 if (!p) {
10115                     return -TARGET_EFAULT;
10116                 }
10117                 target_to_host_sigset(&set, p);
10118                 unlock_user(p, arg2, 0);
10119                 set_ptr = &set;
10120                 switch(how) {
10121                 case TARGET_SIG_BLOCK:
10122                     how = SIG_BLOCK;
10123                     break;
10124                 case TARGET_SIG_UNBLOCK:
10125                     how = SIG_UNBLOCK;
10126                     break;
10127                 case TARGET_SIG_SETMASK:
10128                     how = SIG_SETMASK;
10129                     break;
10130                 default:
10131                     return -TARGET_EINVAL;
10132                 }
10133             } else {
10134                 how = 0;
10135                 set_ptr = NULL;
10136             }
10137             ret = do_sigprocmask(how, set_ptr, &oldset);
10138             if (!is_error(ret) && arg3) {
10139                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10140                     return -TARGET_EFAULT;
10141                 host_to_target_sigset(p, &oldset);
10142                 unlock_user(p, arg3, sizeof(target_sigset_t));
10143             }
10144         }
10145         return ret;
10146 #ifdef TARGET_NR_sigpending
10147     case TARGET_NR_sigpending:
10148         {
10149             sigset_t set;
10150             ret = get_errno(sigpending(&set));
10151             if (!is_error(ret)) {
10152                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10153                     return -TARGET_EFAULT;
10154                 host_to_target_old_sigset(p, &set);
10155                 unlock_user(p, arg1, sizeof(target_sigset_t));
10156             }
10157         }
10158         return ret;
10159 #endif
10160     case TARGET_NR_rt_sigpending:
10161         {
10162             sigset_t set;
10163 
10164             /* Yes, this check is >, not != like most. We follow the kernel's
10165              * logic and it does it like this because it implements
10166              * NR_sigpending through the same code path, and in that case
10167              * the old_sigset_t is smaller in size.
10168              */
10169             if (arg2 > sizeof(target_sigset_t)) {
10170                 return -TARGET_EINVAL;
10171             }
10172 
10173             ret = get_errno(sigpending(&set));
10174             if (!is_error(ret)) {
10175                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10176                     return -TARGET_EFAULT;
10177                 host_to_target_sigset(p, &set);
10178                 unlock_user(p, arg1, sizeof(target_sigset_t));
10179             }
10180         }
10181         return ret;
10182 #ifdef TARGET_NR_sigsuspend
10183     case TARGET_NR_sigsuspend:
10184         {
10185             sigset_t *set;
10186 
10187 #if defined(TARGET_ALPHA)
10188             TaskState *ts = cpu->opaque;
10189             /* target_to_host_old_sigset will bswap back */
10190             abi_ulong mask = tswapal(arg1);
10191             set = &ts->sigsuspend_mask;
10192             target_to_host_old_sigset(set, &mask);
10193 #else
10194             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10195             if (ret != 0) {
10196                 return ret;
10197             }
10198 #endif
10199             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10200             finish_sigsuspend_mask(ret);
10201         }
10202         return ret;
10203 #endif
10204     case TARGET_NR_rt_sigsuspend:
10205         {
10206             sigset_t *set;
10207 
10208             ret = process_sigsuspend_mask(&set, arg1, arg2);
10209             if (ret != 0) {
10210                 return ret;
10211             }
10212             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10213             finish_sigsuspend_mask(ret);
10214         }
10215         return ret;
10216 #ifdef TARGET_NR_rt_sigtimedwait
10217     case TARGET_NR_rt_sigtimedwait:
10218         {
10219             sigset_t set;
10220             struct timespec uts, *puts;
10221             siginfo_t uinfo;
10222 
10223             if (arg4 != sizeof(target_sigset_t)) {
10224                 return -TARGET_EINVAL;
10225             }
10226 
10227             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10228                 return -TARGET_EFAULT;
10229             target_to_host_sigset(&set, p);
10230             unlock_user(p, arg1, 0);
10231             if (arg3) {
10232                 puts = &uts;
10233                 if (target_to_host_timespec(puts, arg3)) {
10234                     return -TARGET_EFAULT;
10235                 }
10236             } else {
10237                 puts = NULL;
10238             }
10239             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10240                                                  SIGSET_T_SIZE));
10241             if (!is_error(ret)) {
10242                 if (arg2) {
10243                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10244                                   0);
10245                     if (!p) {
10246                         return -TARGET_EFAULT;
10247                     }
10248                     host_to_target_siginfo(p, &uinfo);
10249                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10250                 }
10251                 ret = host_to_target_signal(ret);
10252             }
10253         }
10254         return ret;
10255 #endif
10256 #ifdef TARGET_NR_rt_sigtimedwait_time64
10257     case TARGET_NR_rt_sigtimedwait_time64:
10258         {
10259             sigset_t set;
10260             struct timespec uts, *puts;
10261             siginfo_t uinfo;
10262 
10263             if (arg4 != sizeof(target_sigset_t)) {
10264                 return -TARGET_EINVAL;
10265             }
10266 
10267             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10268             if (!p) {
10269                 return -TARGET_EFAULT;
10270             }
10271             target_to_host_sigset(&set, p);
10272             unlock_user(p, arg1, 0);
10273             if (arg3) {
10274                 puts = &uts;
10275                 if (target_to_host_timespec64(puts, arg3)) {
10276                     return -TARGET_EFAULT;
10277                 }
10278             } else {
10279                 puts = NULL;
10280             }
10281             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10282                                                  SIGSET_T_SIZE));
10283             if (!is_error(ret)) {
10284                 if (arg2) {
10285                     p = lock_user(VERIFY_WRITE, arg2,
10286                                   sizeof(target_siginfo_t), 0);
10287                     if (!p) {
10288                         return -TARGET_EFAULT;
10289                     }
10290                     host_to_target_siginfo(p, &uinfo);
10291                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10292                 }
10293                 ret = host_to_target_signal(ret);
10294             }
10295         }
10296         return ret;
10297 #endif
10298     case TARGET_NR_rt_sigqueueinfo:
10299         {
10300             siginfo_t uinfo;
10301 
10302             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10303             if (!p) {
10304                 return -TARGET_EFAULT;
10305             }
10306             target_to_host_siginfo(&uinfo, p);
10307             unlock_user(p, arg3, 0);
10308             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10309         }
10310         return ret;
10311     case TARGET_NR_rt_tgsigqueueinfo:
10312         {
10313             siginfo_t uinfo;
10314 
10315             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10316             if (!p) {
10317                 return -TARGET_EFAULT;
10318             }
10319             target_to_host_siginfo(&uinfo, p);
10320             unlock_user(p, arg4, 0);
10321             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10322         }
10323         return ret;
10324 #ifdef TARGET_NR_sigreturn
10325     case TARGET_NR_sigreturn:
10326         if (block_signals()) {
10327             return -QEMU_ERESTARTSYS;
10328         }
10329         return do_sigreturn(cpu_env);
10330 #endif
10331     case TARGET_NR_rt_sigreturn:
10332         if (block_signals()) {
10333             return -QEMU_ERESTARTSYS;
10334         }
10335         return do_rt_sigreturn(cpu_env);
10336     case TARGET_NR_sethostname:
10337         if (!(p = lock_user_string(arg1)))
10338             return -TARGET_EFAULT;
10339         ret = get_errno(sethostname(p, arg2));
10340         unlock_user(p, arg1, 0);
10341         return ret;
10342 #ifdef TARGET_NR_setrlimit
10343     case TARGET_NR_setrlimit:
10344         {
10345             int resource = target_to_host_resource(arg1);
10346             struct target_rlimit *target_rlim;
10347             struct rlimit rlim;
10348             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10349                 return -TARGET_EFAULT;
10350             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10351             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10352             unlock_user_struct(target_rlim, arg2, 0);
10353             /*
10354              * If we just passed through resource limit settings for memory then
10355              * they would also apply to QEMU's own allocations, and QEMU will
10356              * crash or hang or die if its allocations fail. Ideally we would
10357              * track the guest allocations in QEMU and apply the limits ourselves.
10358              * For now, just tell the guest the call succeeded but don't actually
10359              * limit anything.
10360              */
10361             if (resource != RLIMIT_AS &&
10362                 resource != RLIMIT_DATA &&
10363                 resource != RLIMIT_STACK) {
10364                 return get_errno(setrlimit(resource, &rlim));
10365             } else {
10366                 return 0;
10367             }
10368         }
10369 #endif
10370 #ifdef TARGET_NR_getrlimit
10371     case TARGET_NR_getrlimit:
10372         {
10373             int resource = target_to_host_resource(arg1);
10374             struct target_rlimit *target_rlim;
10375             struct rlimit rlim;
10376 
10377             ret = get_errno(getrlimit(resource, &rlim));
10378             if (!is_error(ret)) {
10379                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10380                     return -TARGET_EFAULT;
10381                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10382                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10383                 unlock_user_struct(target_rlim, arg2, 1);
10384             }
10385         }
10386         return ret;
10387 #endif
10388     case TARGET_NR_getrusage:
10389         {
10390             struct rusage rusage;
10391             ret = get_errno(getrusage(arg1, &rusage));
10392             if (!is_error(ret)) {
10393                 ret = host_to_target_rusage(arg2, &rusage);
10394             }
10395         }
10396         return ret;
10397 #if defined(TARGET_NR_gettimeofday)
10398     case TARGET_NR_gettimeofday:
10399         {
10400             struct timeval tv;
10401             struct timezone tz;
10402 
10403             ret = get_errno(gettimeofday(&tv, &tz));
10404             if (!is_error(ret)) {
10405                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10406                     return -TARGET_EFAULT;
10407                 }
10408                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10409                     return -TARGET_EFAULT;
10410                 }
10411             }
10412         }
10413         return ret;
10414 #endif
10415 #if defined(TARGET_NR_settimeofday)
10416     case TARGET_NR_settimeofday:
10417         {
10418             struct timeval tv, *ptv = NULL;
10419             struct timezone tz, *ptz = NULL;
10420 
10421             if (arg1) {
10422                 if (copy_from_user_timeval(&tv, arg1)) {
10423                     return -TARGET_EFAULT;
10424                 }
10425                 ptv = &tv;
10426             }
10427 
10428             if (arg2) {
10429                 if (copy_from_user_timezone(&tz, arg2)) {
10430                     return -TARGET_EFAULT;
10431                 }
10432                 ptz = &tz;
10433             }
10434 
10435             return get_errno(settimeofday(ptv, ptz));
10436         }
10437 #endif
10438 #if defined(TARGET_NR_select)
10439     case TARGET_NR_select:
10440 #if defined(TARGET_WANT_NI_OLD_SELECT)
10441         /* some architectures used to have old_select here
10442          * but now ENOSYS it.
10443          */
10444         ret = -TARGET_ENOSYS;
10445 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10446         ret = do_old_select(arg1);
10447 #else
10448         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10449 #endif
10450         return ret;
10451 #endif
10452 #ifdef TARGET_NR_pselect6
10453     case TARGET_NR_pselect6:
10454         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10455 #endif
10456 #ifdef TARGET_NR_pselect6_time64
10457     case TARGET_NR_pselect6_time64:
10458         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10459 #endif
10460 #ifdef TARGET_NR_symlink
10461     case TARGET_NR_symlink:
10462         {
10463             void *p2;
10464             p = lock_user_string(arg1);
10465             p2 = lock_user_string(arg2);
10466             if (!p || !p2)
10467                 ret = -TARGET_EFAULT;
10468             else
10469                 ret = get_errno(symlink(p, p2));
10470             unlock_user(p2, arg2, 0);
10471             unlock_user(p, arg1, 0);
10472         }
10473         return ret;
10474 #endif
10475 #if defined(TARGET_NR_symlinkat)
10476     case TARGET_NR_symlinkat:
10477         {
10478             void *p2;
10479             p  = lock_user_string(arg1);
10480             p2 = lock_user_string(arg3);
10481             if (!p || !p2)
10482                 ret = -TARGET_EFAULT;
10483             else
10484                 ret = get_errno(symlinkat(p, arg2, p2));
10485             unlock_user(p2, arg3, 0);
10486             unlock_user(p, arg1, 0);
10487         }
10488         return ret;
10489 #endif
10490 #ifdef TARGET_NR_readlink
10491     case TARGET_NR_readlink:
10492         {
10493             void *p2;
10494             p = lock_user_string(arg1);
10495             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10496             ret = get_errno(do_guest_readlink(p, p2, arg3));
10497             unlock_user(p2, arg2, ret);
10498             unlock_user(p, arg1, 0);
10499         }
10500         return ret;
10501 #endif
10502 #if defined(TARGET_NR_readlinkat)
10503     case TARGET_NR_readlinkat:
10504         {
10505             void *p2;
10506             p  = lock_user_string(arg2);
10507             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10508             if (!p || !p2) {
10509                 ret = -TARGET_EFAULT;
10510             } else if (!arg4) {
10511                 /* Short circuit this for the magic exe check. */
10512                 ret = -TARGET_EINVAL;
10513             } else if (is_proc_myself((const char *)p, "exe")) {
10514                 /*
10515                  * Don't worry about sign mismatch as earlier mapping
10516                  * logic would have thrown a bad address error.
10517                  */
10518                 ret = MIN(strlen(exec_path), arg4);
10519                 /* We cannot NUL terminate the string. */
10520                 memcpy(p2, exec_path, ret);
10521             } else {
10522                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10523             }
10524             unlock_user(p2, arg3, ret);
10525             unlock_user(p, arg2, 0);
10526         }
10527         return ret;
10528 #endif
10529 #ifdef TARGET_NR_swapon
10530     case TARGET_NR_swapon:
10531         if (!(p = lock_user_string(arg1)))
10532             return -TARGET_EFAULT;
10533         ret = get_errno(swapon(p, arg2));
10534         unlock_user(p, arg1, 0);
10535         return ret;
10536 #endif
10537     case TARGET_NR_reboot:
10538         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10539            /* arg4 must be ignored in all other cases */
10540            p = lock_user_string(arg4);
10541            if (!p) {
10542                return -TARGET_EFAULT;
10543            }
10544            ret = get_errno(reboot(arg1, arg2, arg3, p));
10545            unlock_user(p, arg4, 0);
10546         } else {
10547            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10548         }
10549         return ret;
10550 #ifdef TARGET_NR_mmap
10551     case TARGET_NR_mmap:
10552 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10553     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10554     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10555     || defined(TARGET_S390X)
10556         {
10557             abi_ulong *v;
10558             abi_ulong v1, v2, v3, v4, v5, v6;
10559             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10560                 return -TARGET_EFAULT;
10561             v1 = tswapal(v[0]);
10562             v2 = tswapal(v[1]);
10563             v3 = tswapal(v[2]);
10564             v4 = tswapal(v[3]);
10565             v5 = tswapal(v[4]);
10566             v6 = tswapal(v[5]);
10567             unlock_user(v, arg1, 0);
10568             ret = get_errno(target_mmap(v1, v2, v3,
10569                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10570                                         v5, v6));
10571         }
10572 #else
10573         /* mmap pointers are always untagged */
10574         ret = get_errno(target_mmap(arg1, arg2, arg3,
10575                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10576                                     arg5,
10577                                     arg6));
10578 #endif
10579         return ret;
10580 #endif
10581 #ifdef TARGET_NR_mmap2
10582     case TARGET_NR_mmap2:
10583 #ifndef MMAP_SHIFT
10584 #define MMAP_SHIFT 12
10585 #endif
10586         ret = target_mmap(arg1, arg2, arg3,
10587                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10588                           arg5, (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
10589         return get_errno(ret);
10590 #endif
10591     case TARGET_NR_munmap:
10592         arg1 = cpu_untagged_addr(cpu, arg1);
10593         return get_errno(target_munmap(arg1, arg2));
10594     case TARGET_NR_mprotect:
10595         arg1 = cpu_untagged_addr(cpu, arg1);
10596         {
10597             TaskState *ts = cpu->opaque;
10598             /* Special hack to detect libc making the stack executable.  */
10599             if ((arg3 & PROT_GROWSDOWN)
10600                 && arg1 >= ts->info->stack_limit
10601                 && arg1 <= ts->info->start_stack) {
10602                 arg3 &= ~PROT_GROWSDOWN;
10603                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10604                 arg1 = ts->info->stack_limit;
10605             }
10606         }
10607         return get_errno(target_mprotect(arg1, arg2, arg3));
10608 #ifdef TARGET_NR_mremap
10609     case TARGET_NR_mremap:
10610         arg1 = cpu_untagged_addr(cpu, arg1);
10611         /* mremap new_addr (arg5) is always untagged */
10612         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10613 #endif
10614         /* ??? msync/mlock/munlock are broken for softmmu.  */
10615 #ifdef TARGET_NR_msync
10616     case TARGET_NR_msync:
10617         return get_errno(msync(g2h(cpu, arg1), arg2,
10618                                target_to_host_msync_arg(arg3)));
10619 #endif
10620 #ifdef TARGET_NR_mlock
10621     case TARGET_NR_mlock:
10622         return get_errno(mlock(g2h(cpu, arg1), arg2));
10623 #endif
10624 #ifdef TARGET_NR_munlock
10625     case TARGET_NR_munlock:
10626         return get_errno(munlock(g2h(cpu, arg1), arg2));
10627 #endif
10628 #ifdef TARGET_NR_mlockall
10629     case TARGET_NR_mlockall:
10630         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10631 #endif
10632 #ifdef TARGET_NR_munlockall
10633     case TARGET_NR_munlockall:
10634         return get_errno(munlockall());
10635 #endif
10636 #ifdef TARGET_NR_truncate
10637     case TARGET_NR_truncate:
10638         if (!(p = lock_user_string(arg1)))
10639             return -TARGET_EFAULT;
10640         ret = get_errno(truncate(p, arg2));
10641         unlock_user(p, arg1, 0);
10642         return ret;
10643 #endif
10644 #ifdef TARGET_NR_ftruncate
10645     case TARGET_NR_ftruncate:
10646         return get_errno(ftruncate(arg1, arg2));
10647 #endif
10648     case TARGET_NR_fchmod:
10649         return get_errno(fchmod(arg1, arg2));
10650 #if defined(TARGET_NR_fchmodat)
10651     case TARGET_NR_fchmodat:
10652         if (!(p = lock_user_string(arg2)))
10653             return -TARGET_EFAULT;
10654         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10655         unlock_user(p, arg2, 0);
10656         return ret;
10657 #endif
10658     case TARGET_NR_getpriority:
10659         /* Note that negative values are valid for getpriority, so we must
10660            differentiate based on errno settings.  */
10661         errno = 0;
10662         ret = getpriority(arg1, arg2);
10663         if (ret == -1 && errno != 0) {
10664             return -host_to_target_errno(errno);
10665         }
10666 #ifdef TARGET_ALPHA
10667         /* Return value is the unbiased priority.  Signal no error.  */
10668         cpu_env->ir[IR_V0] = 0;
10669 #else
10670         /* Return value is a biased priority to avoid negative numbers.  */
10671         ret = 20 - ret;
10672 #endif
10673         return ret;
10674     case TARGET_NR_setpriority:
10675         return get_errno(setpriority(arg1, arg2, arg3));
10676 #ifdef TARGET_NR_statfs
10677     case TARGET_NR_statfs:
10678         if (!(p = lock_user_string(arg1))) {
10679             return -TARGET_EFAULT;
10680         }
10681         ret = get_errno(statfs(path(p), &stfs));
10682         unlock_user(p, arg1, 0);
10683     convert_statfs:
10684         if (!is_error(ret)) {
10685             struct target_statfs *target_stfs;
10686 
10687             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10688                 return -TARGET_EFAULT;
10689             __put_user(stfs.f_type, &target_stfs->f_type);
10690             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10691             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10692             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10693             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10694             __put_user(stfs.f_files, &target_stfs->f_files);
10695             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10696             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10697             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10698             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10699             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10700 #ifdef _STATFS_F_FLAGS
10701             __put_user(stfs.f_flags, &target_stfs->f_flags);
10702 #else
10703             __put_user(0, &target_stfs->f_flags);
10704 #endif
10705             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10706             unlock_user_struct(target_stfs, arg2, 1);
10707         }
10708         return ret;
10709 #endif
10710 #ifdef TARGET_NR_fstatfs
10711     case TARGET_NR_fstatfs:
10712         ret = get_errno(fstatfs(arg1, &stfs));
10713         goto convert_statfs;
10714 #endif
10715 #ifdef TARGET_NR_statfs64
10716     case TARGET_NR_statfs64:
10717         if (!(p = lock_user_string(arg1))) {
10718             return -TARGET_EFAULT;
10719         }
10720         ret = get_errno(statfs(path(p), &stfs));
10721         unlock_user(p, arg1, 0);
10722     convert_statfs64:
10723         if (!is_error(ret)) {
10724             struct target_statfs64 *target_stfs;
10725 
10726             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10727                 return -TARGET_EFAULT;
10728             __put_user(stfs.f_type, &target_stfs->f_type);
10729             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10730             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10731             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10732             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10733             __put_user(stfs.f_files, &target_stfs->f_files);
10734             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10735             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10736             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10737             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10738             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10739 #ifdef _STATFS_F_FLAGS
10740             __put_user(stfs.f_flags, &target_stfs->f_flags);
10741 #else
10742             __put_user(0, &target_stfs->f_flags);
10743 #endif
10744             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10745             unlock_user_struct(target_stfs, arg3, 1);
10746         }
10747         return ret;
10748     case TARGET_NR_fstatfs64:
10749         ret = get_errno(fstatfs(arg1, &stfs));
10750         goto convert_statfs64;
10751 #endif
10752 #ifdef TARGET_NR_socketcall
10753     case TARGET_NR_socketcall:
10754         return do_socketcall(arg1, arg2);
10755 #endif
10756 #ifdef TARGET_NR_accept
10757     case TARGET_NR_accept:
10758         return do_accept4(arg1, arg2, arg3, 0);
10759 #endif
10760 #ifdef TARGET_NR_accept4
10761     case TARGET_NR_accept4:
10762         return do_accept4(arg1, arg2, arg3, arg4);
10763 #endif
10764 #ifdef TARGET_NR_bind
10765     case TARGET_NR_bind:
10766         return do_bind(arg1, arg2, arg3);
10767 #endif
10768 #ifdef TARGET_NR_connect
10769     case TARGET_NR_connect:
10770         return do_connect(arg1, arg2, arg3);
10771 #endif
10772 #ifdef TARGET_NR_getpeername
10773     case TARGET_NR_getpeername:
10774         return do_getpeername(arg1, arg2, arg3);
10775 #endif
10776 #ifdef TARGET_NR_getsockname
10777     case TARGET_NR_getsockname:
10778         return do_getsockname(arg1, arg2, arg3);
10779 #endif
10780 #ifdef TARGET_NR_getsockopt
10781     case TARGET_NR_getsockopt:
10782         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10783 #endif
10784 #ifdef TARGET_NR_listen
10785     case TARGET_NR_listen:
10786         return get_errno(listen(arg1, arg2));
10787 #endif
10788 #ifdef TARGET_NR_recv
10789     case TARGET_NR_recv:
10790         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10791 #endif
10792 #ifdef TARGET_NR_recvfrom
10793     case TARGET_NR_recvfrom:
10794         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10795 #endif
10796 #ifdef TARGET_NR_recvmsg
10797     case TARGET_NR_recvmsg:
10798         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10799 #endif
10800 #ifdef TARGET_NR_send
10801     case TARGET_NR_send:
10802         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10803 #endif
10804 #ifdef TARGET_NR_sendmsg
10805     case TARGET_NR_sendmsg:
10806         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10807 #endif
10808 #ifdef TARGET_NR_sendmmsg
10809     case TARGET_NR_sendmmsg:
10810         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10811 #endif
10812 #ifdef TARGET_NR_recvmmsg
10813     case TARGET_NR_recvmmsg:
10814         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10815 #endif
10816 #ifdef TARGET_NR_sendto
10817     case TARGET_NR_sendto:
10818         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10819 #endif
10820 #ifdef TARGET_NR_shutdown
10821     case TARGET_NR_shutdown:
10822         return get_errno(shutdown(arg1, arg2));
10823 #endif
10824 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10825     case TARGET_NR_getrandom:
10826         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10827         if (!p) {
10828             return -TARGET_EFAULT;
10829         }
10830         ret = get_errno(getrandom(p, arg2, arg3));
10831         unlock_user(p, arg1, ret);
10832         return ret;
10833 #endif
10834 #ifdef TARGET_NR_socket
10835     case TARGET_NR_socket:
10836         return do_socket(arg1, arg2, arg3);
10837 #endif
10838 #ifdef TARGET_NR_socketpair
10839     case TARGET_NR_socketpair:
10840         return do_socketpair(arg1, arg2, arg3, arg4);
10841 #endif
10842 #ifdef TARGET_NR_setsockopt
10843     case TARGET_NR_setsockopt:
10844         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10845 #endif
10846 #if defined(TARGET_NR_syslog)
10847     case TARGET_NR_syslog:
10848         {
10849             int len = arg2;
10850 
10851             switch (arg1) {
10852             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10853             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10854             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10855             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10856             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10857             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10858             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10859             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10860                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10861             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10862             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10863             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10864                 {
10865                     if (len < 0) {
10866                         return -TARGET_EINVAL;
10867                     }
10868                     if (len == 0) {
10869                         return 0;
10870                     }
10871                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10872                     if (!p) {
10873                         return -TARGET_EFAULT;
10874                     }
10875                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10876                     unlock_user(p, arg2, arg3);
10877                 }
10878                 return ret;
10879             default:
10880                 return -TARGET_EINVAL;
10881             }
10882         }
10883         break;
10884 #endif
10885     case TARGET_NR_setitimer:
10886         {
10887             struct itimerval value, ovalue, *pvalue;
10888 
10889             if (arg2) {
10890                 pvalue = &value;
10891                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10892                     || copy_from_user_timeval(&pvalue->it_value,
10893                                               arg2 + sizeof(struct target_timeval)))
10894                     return -TARGET_EFAULT;
10895             } else {
10896                 pvalue = NULL;
10897             }
10898             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10899             if (!is_error(ret) && arg3) {
10900                 if (copy_to_user_timeval(arg3,
10901                                          &ovalue.it_interval)
10902                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10903                                             &ovalue.it_value))
10904                     return -TARGET_EFAULT;
10905             }
10906         }
10907         return ret;
10908     case TARGET_NR_getitimer:
10909         {
10910             struct itimerval value;
10911 
10912             ret = get_errno(getitimer(arg1, &value));
10913             if (!is_error(ret) && arg2) {
10914                 if (copy_to_user_timeval(arg2,
10915                                          &value.it_interval)
10916                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10917                                             &value.it_value))
10918                     return -TARGET_EFAULT;
10919             }
10920         }
10921         return ret;
10922 #ifdef TARGET_NR_stat
10923     case TARGET_NR_stat:
10924         if (!(p = lock_user_string(arg1))) {
10925             return -TARGET_EFAULT;
10926         }
10927         ret = get_errno(stat(path(p), &st));
10928         unlock_user(p, arg1, 0);
10929         goto do_stat;
10930 #endif
10931 #ifdef TARGET_NR_lstat
10932     case TARGET_NR_lstat:
10933         if (!(p = lock_user_string(arg1))) {
10934             return -TARGET_EFAULT;
10935         }
10936         ret = get_errno(lstat(path(p), &st));
10937         unlock_user(p, arg1, 0);
10938         goto do_stat;
10939 #endif
10940 #ifdef TARGET_NR_fstat
10941     case TARGET_NR_fstat:
10942         {
10943             ret = get_errno(fstat(arg1, &st));
10944 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10945         do_stat:
10946 #endif
10947             if (!is_error(ret)) {
10948                 struct target_stat *target_st;
10949 
10950                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10951                     return -TARGET_EFAULT;
10952                 memset(target_st, 0, sizeof(*target_st));
10953                 __put_user(st.st_dev, &target_st->st_dev);
10954                 __put_user(st.st_ino, &target_st->st_ino);
10955                 __put_user(st.st_mode, &target_st->st_mode);
10956                 __put_user(st.st_uid, &target_st->st_uid);
10957                 __put_user(st.st_gid, &target_st->st_gid);
10958                 __put_user(st.st_nlink, &target_st->st_nlink);
10959                 __put_user(st.st_rdev, &target_st->st_rdev);
10960                 __put_user(st.st_size, &target_st->st_size);
10961                 __put_user(st.st_blksize, &target_st->st_blksize);
10962                 __put_user(st.st_blocks, &target_st->st_blocks);
10963                 __put_user(st.st_atime, &target_st->target_st_atime);
10964                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10965                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10966 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10967                 __put_user(st.st_atim.tv_nsec,
10968                            &target_st->target_st_atime_nsec);
10969                 __put_user(st.st_mtim.tv_nsec,
10970                            &target_st->target_st_mtime_nsec);
10971                 __put_user(st.st_ctim.tv_nsec,
10972                            &target_st->target_st_ctime_nsec);
10973 #endif
10974                 unlock_user_struct(target_st, arg2, 1);
10975             }
10976         }
10977         return ret;
10978 #endif
10979     case TARGET_NR_vhangup:
10980         return get_errno(vhangup());
10981 #ifdef TARGET_NR_syscall
10982     case TARGET_NR_syscall:
10983         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10984                           arg6, arg7, arg8, 0);
10985 #endif
10986 #if defined(TARGET_NR_wait4)
10987     case TARGET_NR_wait4:
10988         {
10989             int status;
10990             abi_long status_ptr = arg2;
10991             struct rusage rusage, *rusage_ptr;
10992             abi_ulong target_rusage = arg4;
10993             abi_long rusage_err;
10994             if (target_rusage)
10995                 rusage_ptr = &rusage;
10996             else
10997                 rusage_ptr = NULL;
10998             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10999             if (!is_error(ret)) {
11000                 if (status_ptr && ret) {
11001                     status = host_to_target_waitstatus(status);
11002                     if (put_user_s32(status, status_ptr))
11003                         return -TARGET_EFAULT;
11004                 }
11005                 if (target_rusage) {
11006                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
11007                     if (rusage_err) {
11008                         ret = rusage_err;
11009                     }
11010                 }
11011             }
11012         }
11013         return ret;
11014 #endif
11015 #ifdef TARGET_NR_swapoff
11016     case TARGET_NR_swapoff:
11017         if (!(p = lock_user_string(arg1)))
11018             return -TARGET_EFAULT;
11019         ret = get_errno(swapoff(p));
11020         unlock_user(p, arg1, 0);
11021         return ret;
11022 #endif
11023     case TARGET_NR_sysinfo:
11024         {
11025             struct target_sysinfo *target_value;
11026             struct sysinfo value;
11027             ret = get_errno(sysinfo(&value));
11028             if (!is_error(ret) && arg1)
11029             {
11030                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11031                     return -TARGET_EFAULT;
11032                 __put_user(value.uptime, &target_value->uptime);
11033                 __put_user(value.loads[0], &target_value->loads[0]);
11034                 __put_user(value.loads[1], &target_value->loads[1]);
11035                 __put_user(value.loads[2], &target_value->loads[2]);
11036                 __put_user(value.totalram, &target_value->totalram);
11037                 __put_user(value.freeram, &target_value->freeram);
11038                 __put_user(value.sharedram, &target_value->sharedram);
11039                 __put_user(value.bufferram, &target_value->bufferram);
11040                 __put_user(value.totalswap, &target_value->totalswap);
11041                 __put_user(value.freeswap, &target_value->freeswap);
11042                 __put_user(value.procs, &target_value->procs);
11043                 __put_user(value.totalhigh, &target_value->totalhigh);
11044                 __put_user(value.freehigh, &target_value->freehigh);
11045                 __put_user(value.mem_unit, &target_value->mem_unit);
11046                 unlock_user_struct(target_value, arg1, 1);
11047             }
11048         }
11049         return ret;
11050 #ifdef TARGET_NR_ipc
11051     case TARGET_NR_ipc:
11052         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11053 #endif
11054 #ifdef TARGET_NR_semget
11055     case TARGET_NR_semget:
11056         return get_errno(semget(arg1, arg2, arg3));
11057 #endif
11058 #ifdef TARGET_NR_semop
11059     case TARGET_NR_semop:
11060         return do_semtimedop(arg1, arg2, arg3, 0, false);
11061 #endif
11062 #ifdef TARGET_NR_semtimedop
11063     case TARGET_NR_semtimedop:
11064         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11065 #endif
11066 #ifdef TARGET_NR_semtimedop_time64
11067     case TARGET_NR_semtimedop_time64:
11068         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11069 #endif
11070 #ifdef TARGET_NR_semctl
11071     case TARGET_NR_semctl:
11072         return do_semctl(arg1, arg2, arg3, arg4);
11073 #endif
11074 #ifdef TARGET_NR_msgctl
11075     case TARGET_NR_msgctl:
11076         return do_msgctl(arg1, arg2, arg3);
11077 #endif
11078 #ifdef TARGET_NR_msgget
11079     case TARGET_NR_msgget:
11080         return get_errno(msgget(arg1, arg2));
11081 #endif
11082 #ifdef TARGET_NR_msgrcv
11083     case TARGET_NR_msgrcv:
11084         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11085 #endif
11086 #ifdef TARGET_NR_msgsnd
11087     case TARGET_NR_msgsnd:
11088         return do_msgsnd(arg1, arg2, arg3, arg4);
11089 #endif
11090 #ifdef TARGET_NR_shmget
11091     case TARGET_NR_shmget:
11092         return get_errno(shmget(arg1, arg2, arg3));
11093 #endif
11094 #ifdef TARGET_NR_shmctl
11095     case TARGET_NR_shmctl:
11096         return do_shmctl(arg1, arg2, arg3);
11097 #endif
11098 #ifdef TARGET_NR_shmat
11099     case TARGET_NR_shmat:
11100         return do_shmat(cpu_env, arg1, arg2, arg3);
11101 #endif
11102 #ifdef TARGET_NR_shmdt
11103     case TARGET_NR_shmdt:
11104         return do_shmdt(arg1);
11105 #endif
11106     case TARGET_NR_fsync:
11107         return get_errno(fsync(arg1));
11108     case TARGET_NR_clone:
11109         /* Linux manages to have three different orderings for its
11110          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11111          * match the kernel's CONFIG_CLONE_* settings.
11112          * Microblaze is further special in that it uses a sixth
11113          * implicit argument to clone for the TLS pointer.
11114          */
11115 #if defined(TARGET_MICROBLAZE)
11116         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11117 #elif defined(TARGET_CLONE_BACKWARDS)
11118         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11119 #elif defined(TARGET_CLONE_BACKWARDS2)
11120         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11121 #else
11122         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11123 #endif
11124         return ret;
11125 #ifdef __NR_exit_group
11126         /* new thread calls */
11127     case TARGET_NR_exit_group:
11128         preexit_cleanup(cpu_env, arg1);
11129         return get_errno(exit_group(arg1));
11130 #endif
11131     case TARGET_NR_setdomainname:
11132         if (!(p = lock_user_string(arg1)))
11133             return -TARGET_EFAULT;
11134         ret = get_errno(setdomainname(p, arg2));
11135         unlock_user(p, arg1, 0);
11136         return ret;
11137     case TARGET_NR_uname:
11138         /* no need to transcode because we use the linux syscall */
11139         {
11140             struct new_utsname * buf;
11141 
11142             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11143                 return -TARGET_EFAULT;
11144             ret = get_errno(sys_uname(buf));
11145             if (!is_error(ret)) {
11146                 /* Overwrite the native machine name with whatever is being
11147                    emulated. */
11148                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11149                           sizeof(buf->machine));
11150                 /* Allow the user to override the reported release.  */
11151                 if (qemu_uname_release && *qemu_uname_release) {
11152                     g_strlcpy(buf->release, qemu_uname_release,
11153                               sizeof(buf->release));
11154                 }
11155             }
11156             unlock_user_struct(buf, arg1, 1);
11157         }
11158         return ret;
11159 #ifdef TARGET_I386
11160     case TARGET_NR_modify_ldt:
11161         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11162 #if !defined(TARGET_X86_64)
11163     case TARGET_NR_vm86:
11164         return do_vm86(cpu_env, arg1, arg2);
11165 #endif
11166 #endif
11167 #if defined(TARGET_NR_adjtimex)
11168     case TARGET_NR_adjtimex:
11169         {
11170             struct timex host_buf;
11171 
11172             if (target_to_host_timex(&host_buf, arg1) != 0) {
11173                 return -TARGET_EFAULT;
11174             }
11175             ret = get_errno(adjtimex(&host_buf));
11176             if (!is_error(ret)) {
11177                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11178                     return -TARGET_EFAULT;
11179                 }
11180             }
11181         }
11182         return ret;
11183 #endif
11184 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11185     case TARGET_NR_clock_adjtime:
11186         {
11187             struct timex htx;
11188 
11189             if (target_to_host_timex(&htx, arg2) != 0) {
11190                 return -TARGET_EFAULT;
11191             }
11192             ret = get_errno(clock_adjtime(arg1, &htx));
11193             if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
11194                 return -TARGET_EFAULT;
11195             }
11196         }
11197         return ret;
11198 #endif
11199 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11200     case TARGET_NR_clock_adjtime64:
11201         {
11202             struct timex htx;
11203 
11204             if (target_to_host_timex64(&htx, arg2) != 0) {
11205                 return -TARGET_EFAULT;
11206             }
11207             ret = get_errno(clock_adjtime(arg1, &htx));
11208             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11209                     return -TARGET_EFAULT;
11210             }
11211         }
11212         return ret;
11213 #endif
11214     case TARGET_NR_getpgid:
11215         return get_errno(getpgid(arg1));
11216     case TARGET_NR_fchdir:
11217         return get_errno(fchdir(arg1));
11218     case TARGET_NR_personality:
11219         return get_errno(personality(arg1));
11220 #ifdef TARGET_NR__llseek /* Not on alpha */
11221     case TARGET_NR__llseek:
11222         {
11223             int64_t res;
11224 #if !defined(__NR_llseek)
11225             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11226             if (res == -1) {
11227                 ret = get_errno(res);
11228             } else {
11229                 ret = 0;
11230             }
11231 #else
11232             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11233 #endif
11234             if ((ret == 0) && put_user_s64(res, arg4)) {
11235                 return -TARGET_EFAULT;
11236             }
11237         }
11238         return ret;
11239 #endif
11240 #ifdef TARGET_NR_getdents
11241     case TARGET_NR_getdents:
11242         return do_getdents(arg1, arg2, arg3);
11243 #endif /* TARGET_NR_getdents */
11244 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11245     case TARGET_NR_getdents64:
11246         return do_getdents64(arg1, arg2, arg3);
11247 #endif /* TARGET_NR_getdents64 */
11248 #if defined(TARGET_NR__newselect)
11249     case TARGET_NR__newselect:
11250         return do_select(arg1, arg2, arg3, arg4, arg5);
11251 #endif
11252 #ifdef TARGET_NR_poll
11253     case TARGET_NR_poll:
11254         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11255 #endif
11256 #ifdef TARGET_NR_ppoll
11257     case TARGET_NR_ppoll:
11258         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11259 #endif
11260 #ifdef TARGET_NR_ppoll_time64
11261     case TARGET_NR_ppoll_time64:
11262         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11263 #endif
11264     case TARGET_NR_flock:
11265         /* NOTE: the flock constant seems to be the same for every
11266            Linux platform */
11267         return get_errno(safe_flock(arg1, arg2));
11268     case TARGET_NR_readv:
11269         {
11270             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11271             if (vec != NULL) {
11272                 ret = get_errno(safe_readv(arg1, vec, arg3));
11273                 unlock_iovec(vec, arg2, arg3, 1);
11274             } else {
11275                 ret = -host_to_target_errno(errno);
11276             }
11277         }
11278         return ret;
11279     case TARGET_NR_writev:
11280         {
11281             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11282             if (vec != NULL) {
11283                 ret = get_errno(safe_writev(arg1, vec, arg3));
11284                 unlock_iovec(vec, arg2, arg3, 0);
11285             } else {
11286                 ret = -host_to_target_errno(errno);
11287             }
11288         }
11289         return ret;
11290 #if defined(TARGET_NR_preadv)
11291     case TARGET_NR_preadv:
11292         {
11293             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11294             if (vec != NULL) {
11295                 unsigned long low, high;
11296 
11297                 target_to_host_low_high(arg4, arg5, &low, &high);
11298                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11299                 unlock_iovec(vec, arg2, arg3, 1);
11300             } else {
11301                 ret = -host_to_target_errno(errno);
11302            }
11303         }
11304         return ret;
11305 #endif
11306 #if defined(TARGET_NR_pwritev)
11307     case TARGET_NR_pwritev:
11308         {
11309             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11310             if (vec != NULL) {
11311                 unsigned long low, high;
11312 
11313                 target_to_host_low_high(arg4, arg5, &low, &high);
11314                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11315                 unlock_iovec(vec, arg2, arg3, 0);
11316             } else {
11317                 ret = -host_to_target_errno(errno);
11318            }
11319         }
11320         return ret;
11321 #endif
11322     case TARGET_NR_getsid:
11323         return get_errno(getsid(arg1));
11324 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11325     case TARGET_NR_fdatasync:
11326         return get_errno(fdatasync(arg1));
11327 #endif
11328     case TARGET_NR_sched_getaffinity:
11329         {
11330             unsigned int mask_size;
11331             unsigned long *mask;
11332 
11333             /*
11334              * sched_getaffinity needs multiples of ulong, so need to take
11335              * care of mismatches between target ulong and host ulong sizes.
11336              */
11337             if (arg2 & (sizeof(abi_ulong) - 1)) {
11338                 return -TARGET_EINVAL;
11339             }
11340             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11341 
11342             mask = alloca(mask_size);
11343             memset(mask, 0, mask_size);
11344             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11345 
11346             if (!is_error(ret)) {
11347                 if (ret > arg2) {
11348                     /* More data returned than the caller's buffer will fit.
11349                      * This only happens if sizeof(abi_long) < sizeof(long)
11350                      * and the caller passed us a buffer holding an odd number
11351                      * of abi_longs. If the host kernel is actually using the
11352                      * extra 4 bytes then fail EINVAL; otherwise we can just
11353                      * ignore them and only copy the interesting part.
11354                      */
11355                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11356                     if (numcpus > arg2 * 8) {
11357                         return -TARGET_EINVAL;
11358                     }
11359                     ret = arg2;
11360                 }
11361 
11362                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11363                     return -TARGET_EFAULT;
11364                 }
11365             }
11366         }
11367         return ret;
11368     case TARGET_NR_sched_setaffinity:
11369         {
11370             unsigned int mask_size;
11371             unsigned long *mask;
11372 
11373             /*
11374              * sched_setaffinity needs multiples of ulong, so need to take
11375              * care of mismatches between target ulong and host ulong sizes.
11376              */
11377             if (arg2 & (sizeof(abi_ulong) - 1)) {
11378                 return -TARGET_EINVAL;
11379             }
11380             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11381             mask = alloca(mask_size);
11382 
11383             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11384             if (ret) {
11385                 return ret;
11386             }
11387 
11388             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11389         }
11390     case TARGET_NR_getcpu:
11391         {
11392             unsigned cpu, node;
11393             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11394                                        arg2 ? &node : NULL,
11395                                        NULL));
11396             if (is_error(ret)) {
11397                 return ret;
11398             }
11399             if (arg1 && put_user_u32(cpu, arg1)) {
11400                 return -TARGET_EFAULT;
11401             }
11402             if (arg2 && put_user_u32(node, arg2)) {
11403                 return -TARGET_EFAULT;
11404             }
11405         }
11406         return ret;
11407     case TARGET_NR_sched_setparam:
11408         {
11409             struct target_sched_param *target_schp;
11410             struct sched_param schp;
11411 
11412             if (arg2 == 0) {
11413                 return -TARGET_EINVAL;
11414             }
11415             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11416                 return -TARGET_EFAULT;
11417             }
11418             schp.sched_priority = tswap32(target_schp->sched_priority);
11419             unlock_user_struct(target_schp, arg2, 0);
11420             return get_errno(sys_sched_setparam(arg1, &schp));
11421         }
11422     case TARGET_NR_sched_getparam:
11423         {
11424             struct target_sched_param *target_schp;
11425             struct sched_param schp;
11426 
11427             if (arg2 == 0) {
11428                 return -TARGET_EINVAL;
11429             }
11430             ret = get_errno(sys_sched_getparam(arg1, &schp));
11431             if (!is_error(ret)) {
11432                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11433                     return -TARGET_EFAULT;
11434                 }
11435                 target_schp->sched_priority = tswap32(schp.sched_priority);
11436                 unlock_user_struct(target_schp, arg2, 1);
11437             }
11438         }
11439         return ret;
11440     case TARGET_NR_sched_setscheduler:
11441         {
11442             struct target_sched_param *target_schp;
11443             struct sched_param schp;
11444             if (arg3 == 0) {
11445                 return -TARGET_EINVAL;
11446             }
11447             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11448                 return -TARGET_EFAULT;
11449             }
11450             schp.sched_priority = tswap32(target_schp->sched_priority);
11451             unlock_user_struct(target_schp, arg3, 0);
11452             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11453         }
11454     case TARGET_NR_sched_getscheduler:
11455         return get_errno(sys_sched_getscheduler(arg1));
11456     case TARGET_NR_sched_getattr:
11457         {
11458             struct target_sched_attr *target_scha;
11459             struct sched_attr scha;
11460             if (arg2 == 0) {
11461                 return -TARGET_EINVAL;
11462             }
11463             if (arg3 > sizeof(scha)) {
11464                 arg3 = sizeof(scha);
11465             }
11466             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11467             if (!is_error(ret)) {
11468                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11469                 if (!target_scha) {
11470                     return -TARGET_EFAULT;
11471                 }
11472                 target_scha->size = tswap32(scha.size);
11473                 target_scha->sched_policy = tswap32(scha.sched_policy);
11474                 target_scha->sched_flags = tswap64(scha.sched_flags);
11475                 target_scha->sched_nice = tswap32(scha.sched_nice);
11476                 target_scha->sched_priority = tswap32(scha.sched_priority);
11477                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11478                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11479                 target_scha->sched_period = tswap64(scha.sched_period);
11480                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11481                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11482                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11483                 }
11484                 unlock_user(target_scha, arg2, arg3);
11485             }
11486             return ret;
11487         }
11488     case TARGET_NR_sched_setattr:
11489         {
11490             struct target_sched_attr *target_scha;
11491             struct sched_attr scha;
11492             uint32_t size;
11493             int zeroed;
11494             if (arg2 == 0) {
11495                 return -TARGET_EINVAL;
11496             }
11497             if (get_user_u32(size, arg2)) {
11498                 return -TARGET_EFAULT;
11499             }
11500             if (!size) {
11501                 size = offsetof(struct target_sched_attr, sched_util_min);
11502             }
11503             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11504                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11505                     return -TARGET_EFAULT;
11506                 }
11507                 return -TARGET_E2BIG;
11508             }
11509 
11510             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11511             if (zeroed < 0) {
11512                 return zeroed;
11513             } else if (zeroed == 0) {
11514                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11515                     return -TARGET_EFAULT;
11516                 }
11517                 return -TARGET_E2BIG;
11518             }
11519             if (size > sizeof(struct target_sched_attr)) {
11520                 size = sizeof(struct target_sched_attr);
11521             }
11522 
11523             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11524             if (!target_scha) {
11525                 return -TARGET_EFAULT;
11526             }
11527             scha.size = size;
11528             scha.sched_policy = tswap32(target_scha->sched_policy);
11529             scha.sched_flags = tswap64(target_scha->sched_flags);
11530             scha.sched_nice = tswap32(target_scha->sched_nice);
11531             scha.sched_priority = tswap32(target_scha->sched_priority);
11532             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11533             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11534             scha.sched_period = tswap64(target_scha->sched_period);
11535             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11536                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11537                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11538             }
11539             unlock_user(target_scha, arg2, 0);
11540             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11541         }
11542     case TARGET_NR_sched_yield:
11543         return get_errno(sched_yield());
11544     case TARGET_NR_sched_get_priority_max:
11545         return get_errno(sched_get_priority_max(arg1));
11546     case TARGET_NR_sched_get_priority_min:
11547         return get_errno(sched_get_priority_min(arg1));
11548 #ifdef TARGET_NR_sched_rr_get_interval
11549     case TARGET_NR_sched_rr_get_interval:
11550         {
11551             struct timespec ts;
11552             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11553             if (!is_error(ret)) {
11554                 ret = host_to_target_timespec(arg2, &ts);
11555             }
11556         }
11557         return ret;
11558 #endif
11559 #ifdef TARGET_NR_sched_rr_get_interval_time64
11560     case TARGET_NR_sched_rr_get_interval_time64:
11561         {
11562             struct timespec ts;
11563             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11564             if (!is_error(ret)) {
11565                 ret = host_to_target_timespec64(arg2, &ts);
11566             }
11567         }
11568         return ret;
11569 #endif
11570 #if defined(TARGET_NR_nanosleep)
11571     case TARGET_NR_nanosleep:
11572         {
11573             struct timespec req, rem;
11574             target_to_host_timespec(&req, arg1);
11575             ret = get_errno(safe_nanosleep(&req, &rem));
11576             if (is_error(ret) && arg2) {
11577                 host_to_target_timespec(arg2, &rem);
11578             }
11579         }
11580         return ret;
11581 #endif
11582     case TARGET_NR_prctl:
11583         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11584         break;
11585 #ifdef TARGET_NR_arch_prctl
11586     case TARGET_NR_arch_prctl:
11587         return do_arch_prctl(cpu_env, arg1, arg2);
11588 #endif
11589 #ifdef TARGET_NR_pread64
11590     case TARGET_NR_pread64:
11591         if (regpairs_aligned(cpu_env, num)) {
11592             arg4 = arg5;
11593             arg5 = arg6;
11594         }
11595         if (arg2 == 0 && arg3 == 0) {
11596             /* Special-case NULL buffer and zero length, which should succeed */
11597             p = 0;
11598         } else {
11599             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11600             if (!p) {
11601                 return -TARGET_EFAULT;
11602             }
11603         }
11604         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11605         unlock_user(p, arg2, ret);
11606         return ret;
11607     case TARGET_NR_pwrite64:
11608         if (regpairs_aligned(cpu_env, num)) {
11609             arg4 = arg5;
11610             arg5 = arg6;
11611         }
11612         if (arg2 == 0 && arg3 == 0) {
11613             /* Special-case NULL buffer and zero length, which should succeed */
11614             p = 0;
11615         } else {
11616             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11617             if (!p) {
11618                 return -TARGET_EFAULT;
11619             }
11620         }
11621         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11622         unlock_user(p, arg2, 0);
11623         return ret;
11624 #endif
11625     case TARGET_NR_getcwd:
11626         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11627             return -TARGET_EFAULT;
11628         ret = get_errno(sys_getcwd1(p, arg2));
11629         unlock_user(p, arg1, ret);
11630         return ret;
11631     case TARGET_NR_capget:
11632     case TARGET_NR_capset:
11633     {
11634         struct target_user_cap_header *target_header;
11635         struct target_user_cap_data *target_data = NULL;
11636         struct __user_cap_header_struct header;
11637         struct __user_cap_data_struct data[2];
11638         struct __user_cap_data_struct *dataptr = NULL;
11639         int i, target_datalen;
11640         int data_items = 1;
11641 
11642         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11643             return -TARGET_EFAULT;
11644         }
11645         header.version = tswap32(target_header->version);
11646         header.pid = tswap32(target_header->pid);
11647 
11648         if (header.version != _LINUX_CAPABILITY_VERSION) {
11649             /* Version 2 and up takes pointer to two user_data structs */
11650             data_items = 2;
11651         }
11652 
11653         target_datalen = sizeof(*target_data) * data_items;
11654 
11655         if (arg2) {
11656             if (num == TARGET_NR_capget) {
11657                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11658             } else {
11659                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11660             }
11661             if (!target_data) {
11662                 unlock_user_struct(target_header, arg1, 0);
11663                 return -TARGET_EFAULT;
11664             }
11665 
11666             if (num == TARGET_NR_capset) {
11667                 for (i = 0; i < data_items; i++) {
11668                     data[i].effective = tswap32(target_data[i].effective);
11669                     data[i].permitted = tswap32(target_data[i].permitted);
11670                     data[i].inheritable = tswap32(target_data[i].inheritable);
11671                 }
11672             }
11673 
11674             dataptr = data;
11675         }
11676 
11677         if (num == TARGET_NR_capget) {
11678             ret = get_errno(capget(&header, dataptr));
11679         } else {
11680             ret = get_errno(capset(&header, dataptr));
11681         }
11682 
11683         /* The kernel always updates version for both capget and capset */
11684         target_header->version = tswap32(header.version);
11685         unlock_user_struct(target_header, arg1, 1);
11686 
11687         if (arg2) {
11688             if (num == TARGET_NR_capget) {
11689                 for (i = 0; i < data_items; i++) {
11690                     target_data[i].effective = tswap32(data[i].effective);
11691                     target_data[i].permitted = tswap32(data[i].permitted);
11692                     target_data[i].inheritable = tswap32(data[i].inheritable);
11693                 }
11694                 unlock_user(target_data, arg2, target_datalen);
11695             } else {
11696                 unlock_user(target_data, arg2, 0);
11697             }
11698         }
11699         return ret;
11700     }
11701     case TARGET_NR_sigaltstack:
11702         return do_sigaltstack(arg1, arg2, cpu_env);
11703 
11704 #ifdef CONFIG_SENDFILE
11705 #ifdef TARGET_NR_sendfile
11706     case TARGET_NR_sendfile:
11707     {
11708         off_t *offp = NULL;
11709         off_t off;
11710         if (arg3) {
11711             ret = get_user_sal(off, arg3);
11712             if (is_error(ret)) {
11713                 return ret;
11714             }
11715             offp = &off;
11716         }
11717         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11718         if (!is_error(ret) && arg3) {
11719             abi_long ret2 = put_user_sal(off, arg3);
11720             if (is_error(ret2)) {
11721                 ret = ret2;
11722             }
11723         }
11724         return ret;
11725     }
11726 #endif
11727 #ifdef TARGET_NR_sendfile64
11728     case TARGET_NR_sendfile64:
11729     {
11730         off_t *offp = NULL;
11731         off_t off;
11732         if (arg3) {
11733             ret = get_user_s64(off, arg3);
11734             if (is_error(ret)) {
11735                 return ret;
11736             }
11737             offp = &off;
11738         }
11739         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11740         if (!is_error(ret) && arg3) {
11741             abi_long ret2 = put_user_s64(off, arg3);
11742             if (is_error(ret2)) {
11743                 ret = ret2;
11744             }
11745         }
11746         return ret;
11747     }
11748 #endif
11749 #endif
11750 #ifdef TARGET_NR_vfork
11751     case TARGET_NR_vfork:
11752         return get_errno(do_fork(cpu_env,
11753                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11754                          0, 0, 0, 0));
11755 #endif
11756 #ifdef TARGET_NR_ugetrlimit
11757     case TARGET_NR_ugetrlimit:
11758     {
11759 	struct rlimit rlim;
11760 	int resource = target_to_host_resource(arg1);
11761 	ret = get_errno(getrlimit(resource, &rlim));
11762 	if (!is_error(ret)) {
11763 	    struct target_rlimit *target_rlim;
11764             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11765                 return -TARGET_EFAULT;
11766 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11767 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11768             unlock_user_struct(target_rlim, arg2, 1);
11769 	}
11770         return ret;
11771     }
11772 #endif
11773 #ifdef TARGET_NR_truncate64
11774     case TARGET_NR_truncate64:
11775         if (!(p = lock_user_string(arg1)))
11776             return -TARGET_EFAULT;
11777 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11778         unlock_user(p, arg1, 0);
11779         return ret;
11780 #endif
11781 #ifdef TARGET_NR_ftruncate64
11782     case TARGET_NR_ftruncate64:
11783         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11784 #endif
11785 #ifdef TARGET_NR_stat64
11786     case TARGET_NR_stat64:
11787         if (!(p = lock_user_string(arg1))) {
11788             return -TARGET_EFAULT;
11789         }
11790         ret = get_errno(stat(path(p), &st));
11791         unlock_user(p, arg1, 0);
11792         if (!is_error(ret))
11793             ret = host_to_target_stat64(cpu_env, arg2, &st);
11794         return ret;
11795 #endif
11796 #ifdef TARGET_NR_lstat64
11797     case TARGET_NR_lstat64:
11798         if (!(p = lock_user_string(arg1))) {
11799             return -TARGET_EFAULT;
11800         }
11801         ret = get_errno(lstat(path(p), &st));
11802         unlock_user(p, arg1, 0);
11803         if (!is_error(ret))
11804             ret = host_to_target_stat64(cpu_env, arg2, &st);
11805         return ret;
11806 #endif
11807 #ifdef TARGET_NR_fstat64
11808     case TARGET_NR_fstat64:
11809         ret = get_errno(fstat(arg1, &st));
11810         if (!is_error(ret))
11811             ret = host_to_target_stat64(cpu_env, arg2, &st);
11812         return ret;
11813 #endif
11814 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11815 #ifdef TARGET_NR_fstatat64
11816     case TARGET_NR_fstatat64:
11817 #endif
11818 #ifdef TARGET_NR_newfstatat
11819     case TARGET_NR_newfstatat:
11820 #endif
11821         if (!(p = lock_user_string(arg2))) {
11822             return -TARGET_EFAULT;
11823         }
11824         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11825         unlock_user(p, arg2, 0);
11826         if (!is_error(ret))
11827             ret = host_to_target_stat64(cpu_env, arg3, &st);
11828         return ret;
11829 #endif
11830 #if defined(TARGET_NR_statx)
11831     case TARGET_NR_statx:
11832         {
11833             struct target_statx *target_stx;
11834             int dirfd = arg1;
11835             int flags = arg3;
11836 
11837             p = lock_user_string(arg2);
11838             if (p == NULL) {
11839                 return -TARGET_EFAULT;
11840             }
11841 #if defined(__NR_statx)
11842             {
11843                 /*
11844                  * It is assumed that struct statx is architecture independent.
11845                  */
11846                 struct target_statx host_stx;
11847                 int mask = arg4;
11848 
11849                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11850                 if (!is_error(ret)) {
11851                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11852                         unlock_user(p, arg2, 0);
11853                         return -TARGET_EFAULT;
11854                     }
11855                 }
11856 
11857                 if (ret != -TARGET_ENOSYS) {
11858                     unlock_user(p, arg2, 0);
11859                     return ret;
11860                 }
11861             }
11862 #endif
11863             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11864             unlock_user(p, arg2, 0);
11865 
11866             if (!is_error(ret)) {
11867                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11868                     return -TARGET_EFAULT;
11869                 }
11870                 memset(target_stx, 0, sizeof(*target_stx));
11871                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11872                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11873                 __put_user(st.st_ino, &target_stx->stx_ino);
11874                 __put_user(st.st_mode, &target_stx->stx_mode);
11875                 __put_user(st.st_uid, &target_stx->stx_uid);
11876                 __put_user(st.st_gid, &target_stx->stx_gid);
11877                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11878                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11879                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11880                 __put_user(st.st_size, &target_stx->stx_size);
11881                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11882                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11883                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11884                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11885                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11886                 unlock_user_struct(target_stx, arg5, 1);
11887             }
11888         }
11889         return ret;
11890 #endif
11891 #ifdef TARGET_NR_lchown
11892     case TARGET_NR_lchown:
11893         if (!(p = lock_user_string(arg1)))
11894             return -TARGET_EFAULT;
11895         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11896         unlock_user(p, arg1, 0);
11897         return ret;
11898 #endif
11899 #ifdef TARGET_NR_getuid
11900     case TARGET_NR_getuid:
11901         return get_errno(high2lowuid(getuid()));
11902 #endif
11903 #ifdef TARGET_NR_getgid
11904     case TARGET_NR_getgid:
11905         return get_errno(high2lowgid(getgid()));
11906 #endif
11907 #ifdef TARGET_NR_geteuid
11908     case TARGET_NR_geteuid:
11909         return get_errno(high2lowuid(geteuid()));
11910 #endif
11911 #ifdef TARGET_NR_getegid
11912     case TARGET_NR_getegid:
11913         return get_errno(high2lowgid(getegid()));
11914 #endif
11915     case TARGET_NR_setreuid:
11916         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11917     case TARGET_NR_setregid:
11918         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11919     case TARGET_NR_getgroups:
11920         { /* the same code as for TARGET_NR_getgroups32 */
11921             int gidsetsize = arg1;
11922             target_id *target_grouplist;
11923             g_autofree gid_t *grouplist = NULL;
11924             int i;
11925 
11926             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11927                 return -TARGET_EINVAL;
11928             }
11929             if (gidsetsize > 0) {
11930                 grouplist = g_try_new(gid_t, gidsetsize);
11931                 if (!grouplist) {
11932                     return -TARGET_ENOMEM;
11933                 }
11934             }
11935             ret = get_errno(getgroups(gidsetsize, grouplist));
11936             if (!is_error(ret) && gidsetsize > 0) {
11937                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11938                                              gidsetsize * sizeof(target_id), 0);
11939                 if (!target_grouplist) {
11940                     return -TARGET_EFAULT;
11941                 }
11942                 for (i = 0; i < ret; i++) {
11943                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11944                 }
11945                 unlock_user(target_grouplist, arg2,
11946                             gidsetsize * sizeof(target_id));
11947             }
11948             return ret;
11949         }
11950     case TARGET_NR_setgroups:
11951         { /* the same code as for TARGET_NR_setgroups32 */
11952             int gidsetsize = arg1;
11953             target_id *target_grouplist;
11954             g_autofree gid_t *grouplist = NULL;
11955             int i;
11956 
11957             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11958                 return -TARGET_EINVAL;
11959             }
11960             if (gidsetsize > 0) {
11961                 grouplist = g_try_new(gid_t, gidsetsize);
11962                 if (!grouplist) {
11963                     return -TARGET_ENOMEM;
11964                 }
11965                 target_grouplist = lock_user(VERIFY_READ, arg2,
11966                                              gidsetsize * sizeof(target_id), 1);
11967                 if (!target_grouplist) {
11968                     return -TARGET_EFAULT;
11969                 }
11970                 for (i = 0; i < gidsetsize; i++) {
11971                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11972                 }
11973                 unlock_user(target_grouplist, arg2,
11974                             gidsetsize * sizeof(target_id));
11975             }
11976             return get_errno(setgroups(gidsetsize, grouplist));
11977         }
11978     case TARGET_NR_fchown:
11979         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11980 #if defined(TARGET_NR_fchownat)
11981     case TARGET_NR_fchownat:
11982         if (!(p = lock_user_string(arg2)))
11983             return -TARGET_EFAULT;
11984         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11985                                  low2highgid(arg4), arg5));
11986         unlock_user(p, arg2, 0);
11987         return ret;
11988 #endif
11989 #ifdef TARGET_NR_setresuid
11990     case TARGET_NR_setresuid:
11991         return get_errno(sys_setresuid(low2highuid(arg1),
11992                                        low2highuid(arg2),
11993                                        low2highuid(arg3)));
11994 #endif
11995 #ifdef TARGET_NR_getresuid
11996     case TARGET_NR_getresuid:
11997         {
11998             uid_t ruid, euid, suid;
11999             ret = get_errno(getresuid(&ruid, &euid, &suid));
12000             if (!is_error(ret)) {
12001                 if (put_user_id(high2lowuid(ruid), arg1)
12002                     || put_user_id(high2lowuid(euid), arg2)
12003                     || put_user_id(high2lowuid(suid), arg3))
12004                     return -TARGET_EFAULT;
12005             }
12006         }
12007         return ret;
12008 #endif
12009 #ifdef TARGET_NR_getresgid
12010     case TARGET_NR_setresgid:
12011         return get_errno(sys_setresgid(low2highgid(arg1),
12012                                        low2highgid(arg2),
12013                                        low2highgid(arg3)));
12014 #endif
12015 #ifdef TARGET_NR_getresgid
12016     case TARGET_NR_getresgid:
12017         {
12018             gid_t rgid, egid, sgid;
12019             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12020             if (!is_error(ret)) {
12021                 if (put_user_id(high2lowgid(rgid), arg1)
12022                     || put_user_id(high2lowgid(egid), arg2)
12023                     || put_user_id(high2lowgid(sgid), arg3))
12024                     return -TARGET_EFAULT;
12025             }
12026         }
12027         return ret;
12028 #endif
12029 #ifdef TARGET_NR_chown
12030     case TARGET_NR_chown:
12031         if (!(p = lock_user_string(arg1)))
12032             return -TARGET_EFAULT;
12033         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12034         unlock_user(p, arg1, 0);
12035         return ret;
12036 #endif
12037     case TARGET_NR_setuid:
12038         return get_errno(sys_setuid(low2highuid(arg1)));
12039     case TARGET_NR_setgid:
12040         return get_errno(sys_setgid(low2highgid(arg1)));
12041     case TARGET_NR_setfsuid:
12042         return get_errno(setfsuid(arg1));
12043     case TARGET_NR_setfsgid:
12044         return get_errno(setfsgid(arg1));
12045 
12046 #ifdef TARGET_NR_lchown32
12047     case TARGET_NR_lchown32:
12048         if (!(p = lock_user_string(arg1)))
12049             return -TARGET_EFAULT;
12050         ret = get_errno(lchown(p, arg2, arg3));
12051         unlock_user(p, arg1, 0);
12052         return ret;
12053 #endif
12054 #ifdef TARGET_NR_getuid32
12055     case TARGET_NR_getuid32:
12056         return get_errno(getuid());
12057 #endif
12058 
12059 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12060    /* Alpha specific */
12061     case TARGET_NR_getxuid:
12062          {
12063             uid_t euid;
12064             euid=geteuid();
12065             cpu_env->ir[IR_A4]=euid;
12066          }
12067         return get_errno(getuid());
12068 #endif
12069 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12070    /* Alpha specific */
12071     case TARGET_NR_getxgid:
12072          {
12073             uid_t egid;
12074             egid=getegid();
12075             cpu_env->ir[IR_A4]=egid;
12076          }
12077         return get_errno(getgid());
12078 #endif
12079 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12080     /* Alpha specific */
12081     case TARGET_NR_osf_getsysinfo:
12082         ret = -TARGET_EOPNOTSUPP;
12083         switch (arg1) {
12084           case TARGET_GSI_IEEE_FP_CONTROL:
12085             {
12086                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12087                 uint64_t swcr = cpu_env->swcr;
12088 
12089                 swcr &= ~SWCR_STATUS_MASK;
12090                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12091 
12092                 if (put_user_u64 (swcr, arg2))
12093                         return -TARGET_EFAULT;
12094                 ret = 0;
12095             }
12096             break;
12097 
12098           /* case GSI_IEEE_STATE_AT_SIGNAL:
12099              -- Not implemented in linux kernel.
12100              case GSI_UACPROC:
12101              -- Retrieves current unaligned access state; not much used.
12102              case GSI_PROC_TYPE:
12103              -- Retrieves implver information; surely not used.
12104              case GSI_GET_HWRPB:
12105              -- Grabs a copy of the HWRPB; surely not used.
12106           */
12107         }
12108         return ret;
12109 #endif
12110 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12111     /* Alpha specific */
12112     case TARGET_NR_osf_setsysinfo:
12113         ret = -TARGET_EOPNOTSUPP;
12114         switch (arg1) {
12115           case TARGET_SSI_IEEE_FP_CONTROL:
12116             {
12117                 uint64_t swcr, fpcr;
12118 
12119                 if (get_user_u64 (swcr, arg2)) {
12120                     return -TARGET_EFAULT;
12121                 }
12122 
12123                 /*
12124                  * The kernel calls swcr_update_status to update the
12125                  * status bits from the fpcr at every point that it
12126                  * could be queried.  Therefore, we store the status
12127                  * bits only in FPCR.
12128                  */
12129                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12130 
12131                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12132                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12133                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12134                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12135                 ret = 0;
12136             }
12137             break;
12138 
12139           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12140             {
12141                 uint64_t exc, fpcr, fex;
12142 
12143                 if (get_user_u64(exc, arg2)) {
12144                     return -TARGET_EFAULT;
12145                 }
12146                 exc &= SWCR_STATUS_MASK;
12147                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12148 
12149                 /* Old exceptions are not signaled.  */
12150                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12151                 fex = exc & ~fex;
12152                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12153                 fex &= (cpu_env)->swcr;
12154 
12155                 /* Update the hardware fpcr.  */
12156                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12157                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12158 
12159                 if (fex) {
12160                     int si_code = TARGET_FPE_FLTUNK;
12161                     target_siginfo_t info;
12162 
12163                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12164                         si_code = TARGET_FPE_FLTUND;
12165                     }
12166                     if (fex & SWCR_TRAP_ENABLE_INE) {
12167                         si_code = TARGET_FPE_FLTRES;
12168                     }
12169                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12170                         si_code = TARGET_FPE_FLTUND;
12171                     }
12172                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12173                         si_code = TARGET_FPE_FLTOVF;
12174                     }
12175                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12176                         si_code = TARGET_FPE_FLTDIV;
12177                     }
12178                     if (fex & SWCR_TRAP_ENABLE_INV) {
12179                         si_code = TARGET_FPE_FLTINV;
12180                     }
12181 
12182                     info.si_signo = SIGFPE;
12183                     info.si_errno = 0;
12184                     info.si_code = si_code;
12185                     info._sifields._sigfault._addr = (cpu_env)->pc;
12186                     queue_signal(cpu_env, info.si_signo,
12187                                  QEMU_SI_FAULT, &info);
12188                 }
12189                 ret = 0;
12190             }
12191             break;
12192 
12193           /* case SSI_NVPAIRS:
12194              -- Used with SSIN_UACPROC to enable unaligned accesses.
12195              case SSI_IEEE_STATE_AT_SIGNAL:
12196              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12197              -- Not implemented in linux kernel
12198           */
12199         }
12200         return ret;
12201 #endif
12202 #ifdef TARGET_NR_osf_sigprocmask
12203     /* Alpha specific.  */
12204     case TARGET_NR_osf_sigprocmask:
12205         {
12206             abi_ulong mask;
12207             int how;
12208             sigset_t set, oldset;
12209 
12210             switch(arg1) {
12211             case TARGET_SIG_BLOCK:
12212                 how = SIG_BLOCK;
12213                 break;
12214             case TARGET_SIG_UNBLOCK:
12215                 how = SIG_UNBLOCK;
12216                 break;
12217             case TARGET_SIG_SETMASK:
12218                 how = SIG_SETMASK;
12219                 break;
12220             default:
12221                 return -TARGET_EINVAL;
12222             }
12223             mask = arg2;
12224             target_to_host_old_sigset(&set, &mask);
12225             ret = do_sigprocmask(how, &set, &oldset);
12226             if (!ret) {
12227                 host_to_target_old_sigset(&mask, &oldset);
12228                 ret = mask;
12229             }
12230         }
12231         return ret;
12232 #endif
12233 
12234 #ifdef TARGET_NR_getgid32
12235     case TARGET_NR_getgid32:
12236         return get_errno(getgid());
12237 #endif
12238 #ifdef TARGET_NR_geteuid32
12239     case TARGET_NR_geteuid32:
12240         return get_errno(geteuid());
12241 #endif
12242 #ifdef TARGET_NR_getegid32
12243     case TARGET_NR_getegid32:
12244         return get_errno(getegid());
12245 #endif
12246 #ifdef TARGET_NR_setreuid32
12247     case TARGET_NR_setreuid32:
12248         return get_errno(setreuid(arg1, arg2));
12249 #endif
12250 #ifdef TARGET_NR_setregid32
12251     case TARGET_NR_setregid32:
12252         return get_errno(setregid(arg1, arg2));
12253 #endif
12254 #ifdef TARGET_NR_getgroups32
12255     case TARGET_NR_getgroups32:
12256         { /* the same code as for TARGET_NR_getgroups */
12257             int gidsetsize = arg1;
12258             uint32_t *target_grouplist;
12259             g_autofree gid_t *grouplist = NULL;
12260             int i;
12261 
12262             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12263                 return -TARGET_EINVAL;
12264             }
12265             if (gidsetsize > 0) {
12266                 grouplist = g_try_new(gid_t, gidsetsize);
12267                 if (!grouplist) {
12268                     return -TARGET_ENOMEM;
12269                 }
12270             }
12271             ret = get_errno(getgroups(gidsetsize, grouplist));
12272             if (!is_error(ret) && gidsetsize > 0) {
12273                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12274                                              gidsetsize * 4, 0);
12275                 if (!target_grouplist) {
12276                     return -TARGET_EFAULT;
12277                 }
12278                 for (i = 0; i < ret; i++) {
12279                     target_grouplist[i] = tswap32(grouplist[i]);
12280                 }
12281                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12282             }
12283             return ret;
12284         }
12285 #endif
12286 #ifdef TARGET_NR_setgroups32
12287     case TARGET_NR_setgroups32:
12288         { /* the same code as for TARGET_NR_setgroups */
12289             int gidsetsize = arg1;
12290             uint32_t *target_grouplist;
12291             g_autofree gid_t *grouplist = NULL;
12292             int i;
12293 
12294             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12295                 return -TARGET_EINVAL;
12296             }
12297             if (gidsetsize > 0) {
12298                 grouplist = g_try_new(gid_t, gidsetsize);
12299                 if (!grouplist) {
12300                     return -TARGET_ENOMEM;
12301                 }
12302                 target_grouplist = lock_user(VERIFY_READ, arg2,
12303                                              gidsetsize * 4, 1);
12304                 if (!target_grouplist) {
12305                     return -TARGET_EFAULT;
12306                 }
12307                 for (i = 0; i < gidsetsize; i++) {
12308                     grouplist[i] = tswap32(target_grouplist[i]);
12309                 }
12310                 unlock_user(target_grouplist, arg2, 0);
12311             }
12312             return get_errno(setgroups(gidsetsize, grouplist));
12313         }
12314 #endif
12315 #ifdef TARGET_NR_fchown32
12316     case TARGET_NR_fchown32:
12317         return get_errno(fchown(arg1, arg2, arg3));
12318 #endif
12319 #ifdef TARGET_NR_setresuid32
12320     case TARGET_NR_setresuid32:
12321         return get_errno(sys_setresuid(arg1, arg2, arg3));
12322 #endif
12323 #ifdef TARGET_NR_getresuid32
12324     case TARGET_NR_getresuid32:
12325         {
12326             uid_t ruid, euid, suid;
12327             ret = get_errno(getresuid(&ruid, &euid, &suid));
12328             if (!is_error(ret)) {
12329                 if (put_user_u32(ruid, arg1)
12330                     || put_user_u32(euid, arg2)
12331                     || put_user_u32(suid, arg3))
12332                     return -TARGET_EFAULT;
12333             }
12334         }
12335         return ret;
12336 #endif
12337 #ifdef TARGET_NR_setresgid32
12338     case TARGET_NR_setresgid32:
12339         return get_errno(sys_setresgid(arg1, arg2, arg3));
12340 #endif
12341 #ifdef TARGET_NR_getresgid32
12342     case TARGET_NR_getresgid32:
12343         {
12344             gid_t rgid, egid, sgid;
12345             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12346             if (!is_error(ret)) {
12347                 if (put_user_u32(rgid, arg1)
12348                     || put_user_u32(egid, arg2)
12349                     || put_user_u32(sgid, arg3))
12350                     return -TARGET_EFAULT;
12351             }
12352         }
12353         return ret;
12354 #endif
12355 #ifdef TARGET_NR_chown32
12356     case TARGET_NR_chown32:
12357         if (!(p = lock_user_string(arg1)))
12358             return -TARGET_EFAULT;
12359         ret = get_errno(chown(p, arg2, arg3));
12360         unlock_user(p, arg1, 0);
12361         return ret;
12362 #endif
12363 #ifdef TARGET_NR_setuid32
12364     case TARGET_NR_setuid32:
12365         return get_errno(sys_setuid(arg1));
12366 #endif
12367 #ifdef TARGET_NR_setgid32
12368     case TARGET_NR_setgid32:
12369         return get_errno(sys_setgid(arg1));
12370 #endif
12371 #ifdef TARGET_NR_setfsuid32
12372     case TARGET_NR_setfsuid32:
12373         return get_errno(setfsuid(arg1));
12374 #endif
12375 #ifdef TARGET_NR_setfsgid32
12376     case TARGET_NR_setfsgid32:
12377         return get_errno(setfsgid(arg1));
12378 #endif
12379 #ifdef TARGET_NR_mincore
12380     case TARGET_NR_mincore:
12381         {
12382             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12383             if (!a) {
12384                 return -TARGET_ENOMEM;
12385             }
12386             p = lock_user_string(arg3);
12387             if (!p) {
12388                 ret = -TARGET_EFAULT;
12389             } else {
12390                 ret = get_errno(mincore(a, arg2, p));
12391                 unlock_user(p, arg3, ret);
12392             }
12393             unlock_user(a, arg1, 0);
12394         }
12395         return ret;
12396 #endif
12397 #ifdef TARGET_NR_arm_fadvise64_64
12398     case TARGET_NR_arm_fadvise64_64:
12399         /* arm_fadvise64_64 looks like fadvise64_64 but
12400          * with different argument order: fd, advice, offset, len
12401          * rather than the usual fd, offset, len, advice.
12402          * Note that offset and len are both 64-bit so appear as
12403          * pairs of 32-bit registers.
12404          */
12405         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12406                             target_offset64(arg5, arg6), arg2);
12407         return -host_to_target_errno(ret);
12408 #endif
12409 
12410 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12411 
12412 #ifdef TARGET_NR_fadvise64_64
12413     case TARGET_NR_fadvise64_64:
12414 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12415         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12416         ret = arg2;
12417         arg2 = arg3;
12418         arg3 = arg4;
12419         arg4 = arg5;
12420         arg5 = arg6;
12421         arg6 = ret;
12422 #else
12423         /* 6 args: fd, offset (high, low), len (high, low), advice */
12424         if (regpairs_aligned(cpu_env, num)) {
12425             /* offset is in (3,4), len in (5,6) and advice in 7 */
12426             arg2 = arg3;
12427             arg3 = arg4;
12428             arg4 = arg5;
12429             arg5 = arg6;
12430             arg6 = arg7;
12431         }
12432 #endif
12433         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12434                             target_offset64(arg4, arg5), arg6);
12435         return -host_to_target_errno(ret);
12436 #endif
12437 
12438 #ifdef TARGET_NR_fadvise64
12439     case TARGET_NR_fadvise64:
12440         /* 5 args: fd, offset (high, low), len, advice */
12441         if (regpairs_aligned(cpu_env, num)) {
12442             /* offset is in (3,4), len in 5 and advice in 6 */
12443             arg2 = arg3;
12444             arg3 = arg4;
12445             arg4 = arg5;
12446             arg5 = arg6;
12447         }
12448         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12449         return -host_to_target_errno(ret);
12450 #endif
12451 
12452 #else /* not a 32-bit ABI */
12453 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12454 #ifdef TARGET_NR_fadvise64_64
12455     case TARGET_NR_fadvise64_64:
12456 #endif
12457 #ifdef TARGET_NR_fadvise64
12458     case TARGET_NR_fadvise64:
12459 #endif
12460 #ifdef TARGET_S390X
12461         switch (arg4) {
12462         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12463         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12464         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12465         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12466         default: break;
12467         }
12468 #endif
12469         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12470 #endif
12471 #endif /* end of 64-bit ABI fadvise handling */
12472 
12473 #ifdef TARGET_NR_madvise
12474     case TARGET_NR_madvise:
12475         return target_madvise(arg1, arg2, arg3);
12476 #endif
12477 #ifdef TARGET_NR_fcntl64
12478     case TARGET_NR_fcntl64:
12479     {
12480         int cmd;
12481         struct flock64 fl;
12482         from_flock64_fn *copyfrom = copy_from_user_flock64;
12483         to_flock64_fn *copyto = copy_to_user_flock64;
12484 
12485 #ifdef TARGET_ARM
12486         if (!cpu_env->eabi) {
12487             copyfrom = copy_from_user_oabi_flock64;
12488             copyto = copy_to_user_oabi_flock64;
12489         }
12490 #endif
12491 
12492         cmd = target_to_host_fcntl_cmd(arg2);
12493         if (cmd == -TARGET_EINVAL) {
12494             return cmd;
12495         }
12496 
12497         switch(arg2) {
12498         case TARGET_F_GETLK64:
12499             ret = copyfrom(&fl, arg3);
12500             if (ret) {
12501                 break;
12502             }
12503             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12504             if (ret == 0) {
12505                 ret = copyto(arg3, &fl);
12506             }
12507 	    break;
12508 
12509         case TARGET_F_SETLK64:
12510         case TARGET_F_SETLKW64:
12511             ret = copyfrom(&fl, arg3);
12512             if (ret) {
12513                 break;
12514             }
12515             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12516 	    break;
12517         default:
12518             ret = do_fcntl(arg1, arg2, arg3);
12519             break;
12520         }
12521         return ret;
12522     }
12523 #endif
12524 #ifdef TARGET_NR_cacheflush
12525     case TARGET_NR_cacheflush:
12526         /* self-modifying code is handled automatically, so nothing needed */
12527         return 0;
12528 #endif
12529 #ifdef TARGET_NR_getpagesize
12530     case TARGET_NR_getpagesize:
12531         return TARGET_PAGE_SIZE;
12532 #endif
12533     case TARGET_NR_gettid:
12534         return get_errno(sys_gettid());
12535 #ifdef TARGET_NR_readahead
12536     case TARGET_NR_readahead:
12537 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12538         if (regpairs_aligned(cpu_env, num)) {
12539             arg2 = arg3;
12540             arg3 = arg4;
12541             arg4 = arg5;
12542         }
12543         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12544 #else
12545         ret = get_errno(readahead(arg1, arg2, arg3));
12546 #endif
12547         return ret;
12548 #endif
12549 #ifdef CONFIG_ATTR
12550 #ifdef TARGET_NR_setxattr
12551     case TARGET_NR_listxattr:
12552     case TARGET_NR_llistxattr:
12553     {
12554         void *p, *b = 0;
12555         if (arg2) {
12556             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12557             if (!b) {
12558                 return -TARGET_EFAULT;
12559             }
12560         }
12561         p = lock_user_string(arg1);
12562         if (p) {
12563             if (num == TARGET_NR_listxattr) {
12564                 ret = get_errno(listxattr(p, b, arg3));
12565             } else {
12566                 ret = get_errno(llistxattr(p, b, arg3));
12567             }
12568         } else {
12569             ret = -TARGET_EFAULT;
12570         }
12571         unlock_user(p, arg1, 0);
12572         unlock_user(b, arg2, arg3);
12573         return ret;
12574     }
12575     case TARGET_NR_flistxattr:
12576     {
12577         void *b = 0;
12578         if (arg2) {
12579             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12580             if (!b) {
12581                 return -TARGET_EFAULT;
12582             }
12583         }
12584         ret = get_errno(flistxattr(arg1, b, arg3));
12585         unlock_user(b, arg2, arg3);
12586         return ret;
12587     }
12588     case TARGET_NR_setxattr:
12589     case TARGET_NR_lsetxattr:
12590         {
12591             void *p, *n, *v = 0;
12592             if (arg3) {
12593                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12594                 if (!v) {
12595                     return -TARGET_EFAULT;
12596                 }
12597             }
12598             p = lock_user_string(arg1);
12599             n = lock_user_string(arg2);
12600             if (p && n) {
12601                 if (num == TARGET_NR_setxattr) {
12602                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12603                 } else {
12604                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12605                 }
12606             } else {
12607                 ret = -TARGET_EFAULT;
12608             }
12609             unlock_user(p, arg1, 0);
12610             unlock_user(n, arg2, 0);
12611             unlock_user(v, arg3, 0);
12612         }
12613         return ret;
12614     case TARGET_NR_fsetxattr:
12615         {
12616             void *n, *v = 0;
12617             if (arg3) {
12618                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12619                 if (!v) {
12620                     return -TARGET_EFAULT;
12621                 }
12622             }
12623             n = lock_user_string(arg2);
12624             if (n) {
12625                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12626             } else {
12627                 ret = -TARGET_EFAULT;
12628             }
12629             unlock_user(n, arg2, 0);
12630             unlock_user(v, arg3, 0);
12631         }
12632         return ret;
12633     case TARGET_NR_getxattr:
12634     case TARGET_NR_lgetxattr:
12635         {
12636             void *p, *n, *v = 0;
12637             if (arg3) {
12638                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12639                 if (!v) {
12640                     return -TARGET_EFAULT;
12641                 }
12642             }
12643             p = lock_user_string(arg1);
12644             n = lock_user_string(arg2);
12645             if (p && n) {
12646                 if (num == TARGET_NR_getxattr) {
12647                     ret = get_errno(getxattr(p, n, v, arg4));
12648                 } else {
12649                     ret = get_errno(lgetxattr(p, n, v, arg4));
12650                 }
12651             } else {
12652                 ret = -TARGET_EFAULT;
12653             }
12654             unlock_user(p, arg1, 0);
12655             unlock_user(n, arg2, 0);
12656             unlock_user(v, arg3, arg4);
12657         }
12658         return ret;
12659     case TARGET_NR_fgetxattr:
12660         {
12661             void *n, *v = 0;
12662             if (arg3) {
12663                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12664                 if (!v) {
12665                     return -TARGET_EFAULT;
12666                 }
12667             }
12668             n = lock_user_string(arg2);
12669             if (n) {
12670                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12671             } else {
12672                 ret = -TARGET_EFAULT;
12673             }
12674             unlock_user(n, arg2, 0);
12675             unlock_user(v, arg3, arg4);
12676         }
12677         return ret;
12678     case TARGET_NR_removexattr:
12679     case TARGET_NR_lremovexattr:
12680         {
12681             void *p, *n;
12682             p = lock_user_string(arg1);
12683             n = lock_user_string(arg2);
12684             if (p && n) {
12685                 if (num == TARGET_NR_removexattr) {
12686                     ret = get_errno(removexattr(p, n));
12687                 } else {
12688                     ret = get_errno(lremovexattr(p, n));
12689                 }
12690             } else {
12691                 ret = -TARGET_EFAULT;
12692             }
12693             unlock_user(p, arg1, 0);
12694             unlock_user(n, arg2, 0);
12695         }
12696         return ret;
12697     case TARGET_NR_fremovexattr:
12698         {
12699             void *n;
12700             n = lock_user_string(arg2);
12701             if (n) {
12702                 ret = get_errno(fremovexattr(arg1, n));
12703             } else {
12704                 ret = -TARGET_EFAULT;
12705             }
12706             unlock_user(n, arg2, 0);
12707         }
12708         return ret;
12709 #endif
12710 #endif /* CONFIG_ATTR */
12711 #ifdef TARGET_NR_set_thread_area
12712     case TARGET_NR_set_thread_area:
12713 #if defined(TARGET_MIPS)
12714       cpu_env->active_tc.CP0_UserLocal = arg1;
12715       return 0;
12716 #elif defined(TARGET_CRIS)
12717       if (arg1 & 0xff)
12718           ret = -TARGET_EINVAL;
12719       else {
12720           cpu_env->pregs[PR_PID] = arg1;
12721           ret = 0;
12722       }
12723       return ret;
12724 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12725       return do_set_thread_area(cpu_env, arg1);
12726 #elif defined(TARGET_M68K)
12727       {
12728           TaskState *ts = cpu->opaque;
12729           ts->tp_value = arg1;
12730           return 0;
12731       }
12732 #else
12733       return -TARGET_ENOSYS;
12734 #endif
12735 #endif
12736 #ifdef TARGET_NR_get_thread_area
12737     case TARGET_NR_get_thread_area:
12738 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12739         return do_get_thread_area(cpu_env, arg1);
12740 #elif defined(TARGET_M68K)
12741         {
12742             TaskState *ts = cpu->opaque;
12743             return ts->tp_value;
12744         }
12745 #else
12746         return -TARGET_ENOSYS;
12747 #endif
12748 #endif
12749 #ifdef TARGET_NR_getdomainname
12750     case TARGET_NR_getdomainname:
12751         return -TARGET_ENOSYS;
12752 #endif
12753 
12754 #ifdef TARGET_NR_clock_settime
12755     case TARGET_NR_clock_settime:
12756     {
12757         struct timespec ts;
12758 
12759         ret = target_to_host_timespec(&ts, arg2);
12760         if (!is_error(ret)) {
12761             ret = get_errno(clock_settime(arg1, &ts));
12762         }
12763         return ret;
12764     }
12765 #endif
12766 #ifdef TARGET_NR_clock_settime64
12767     case TARGET_NR_clock_settime64:
12768     {
12769         struct timespec ts;
12770 
12771         ret = target_to_host_timespec64(&ts, arg2);
12772         if (!is_error(ret)) {
12773             ret = get_errno(clock_settime(arg1, &ts));
12774         }
12775         return ret;
12776     }
12777 #endif
12778 #ifdef TARGET_NR_clock_gettime
12779     case TARGET_NR_clock_gettime:
12780     {
12781         struct timespec ts;
12782         ret = get_errno(clock_gettime(arg1, &ts));
12783         if (!is_error(ret)) {
12784             ret = host_to_target_timespec(arg2, &ts);
12785         }
12786         return ret;
12787     }
12788 #endif
12789 #ifdef TARGET_NR_clock_gettime64
12790     case TARGET_NR_clock_gettime64:
12791     {
12792         struct timespec ts;
12793         ret = get_errno(clock_gettime(arg1, &ts));
12794         if (!is_error(ret)) {
12795             ret = host_to_target_timespec64(arg2, &ts);
12796         }
12797         return ret;
12798     }
12799 #endif
12800 #ifdef TARGET_NR_clock_getres
12801     case TARGET_NR_clock_getres:
12802     {
12803         struct timespec ts;
12804         ret = get_errno(clock_getres(arg1, &ts));
12805         if (!is_error(ret)) {
12806             host_to_target_timespec(arg2, &ts);
12807         }
12808         return ret;
12809     }
12810 #endif
12811 #ifdef TARGET_NR_clock_getres_time64
12812     case TARGET_NR_clock_getres_time64:
12813     {
12814         struct timespec ts;
12815         ret = get_errno(clock_getres(arg1, &ts));
12816         if (!is_error(ret)) {
12817             host_to_target_timespec64(arg2, &ts);
12818         }
12819         return ret;
12820     }
12821 #endif
12822 #ifdef TARGET_NR_clock_nanosleep
12823     case TARGET_NR_clock_nanosleep:
12824     {
12825         struct timespec ts;
12826         if (target_to_host_timespec(&ts, arg3)) {
12827             return -TARGET_EFAULT;
12828         }
12829         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12830                                              &ts, arg4 ? &ts : NULL));
12831         /*
12832          * if the call is interrupted by a signal handler, it fails
12833          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12834          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12835          */
12836         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12837             host_to_target_timespec(arg4, &ts)) {
12838               return -TARGET_EFAULT;
12839         }
12840 
12841         return ret;
12842     }
12843 #endif
12844 #ifdef TARGET_NR_clock_nanosleep_time64
12845     case TARGET_NR_clock_nanosleep_time64:
12846     {
12847         struct timespec ts;
12848 
12849         if (target_to_host_timespec64(&ts, arg3)) {
12850             return -TARGET_EFAULT;
12851         }
12852 
12853         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12854                                              &ts, arg4 ? &ts : NULL));
12855 
12856         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12857             host_to_target_timespec64(arg4, &ts)) {
12858             return -TARGET_EFAULT;
12859         }
12860         return ret;
12861     }
12862 #endif
12863 
12864 #if defined(TARGET_NR_set_tid_address)
12865     case TARGET_NR_set_tid_address:
12866     {
12867         TaskState *ts = cpu->opaque;
12868         ts->child_tidptr = arg1;
12869         /* do not call host set_tid_address() syscall, instead return tid() */
12870         return get_errno(sys_gettid());
12871     }
12872 #endif
12873 
12874     case TARGET_NR_tkill:
12875         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12876 
12877     case TARGET_NR_tgkill:
12878         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12879                          target_to_host_signal(arg3)));
12880 
12881 #ifdef TARGET_NR_set_robust_list
12882     case TARGET_NR_set_robust_list:
12883     case TARGET_NR_get_robust_list:
12884         /* The ABI for supporting robust futexes has userspace pass
12885          * the kernel a pointer to a linked list which is updated by
12886          * userspace after the syscall; the list is walked by the kernel
12887          * when the thread exits. Since the linked list in QEMU guest
12888          * memory isn't a valid linked list for the host and we have
12889          * no way to reliably intercept the thread-death event, we can't
12890          * support these. Silently return ENOSYS so that guest userspace
12891          * falls back to a non-robust futex implementation (which should
12892          * be OK except in the corner case of the guest crashing while
12893          * holding a mutex that is shared with another process via
12894          * shared memory).
12895          */
12896         return -TARGET_ENOSYS;
12897 #endif
12898 
12899 #if defined(TARGET_NR_utimensat)
12900     case TARGET_NR_utimensat:
12901         {
12902             struct timespec *tsp, ts[2];
12903             if (!arg3) {
12904                 tsp = NULL;
12905             } else {
12906                 if (target_to_host_timespec(ts, arg3)) {
12907                     return -TARGET_EFAULT;
12908                 }
12909                 if (target_to_host_timespec(ts + 1, arg3 +
12910                                             sizeof(struct target_timespec))) {
12911                     return -TARGET_EFAULT;
12912                 }
12913                 tsp = ts;
12914             }
12915             if (!arg2)
12916                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12917             else {
12918                 if (!(p = lock_user_string(arg2))) {
12919                     return -TARGET_EFAULT;
12920                 }
12921                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12922                 unlock_user(p, arg2, 0);
12923             }
12924         }
12925         return ret;
12926 #endif
12927 #ifdef TARGET_NR_utimensat_time64
12928     case TARGET_NR_utimensat_time64:
12929         {
12930             struct timespec *tsp, ts[2];
12931             if (!arg3) {
12932                 tsp = NULL;
12933             } else {
12934                 if (target_to_host_timespec64(ts, arg3)) {
12935                     return -TARGET_EFAULT;
12936                 }
12937                 if (target_to_host_timespec64(ts + 1, arg3 +
12938                                      sizeof(struct target__kernel_timespec))) {
12939                     return -TARGET_EFAULT;
12940                 }
12941                 tsp = ts;
12942             }
12943             if (!arg2)
12944                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12945             else {
12946                 p = lock_user_string(arg2);
12947                 if (!p) {
12948                     return -TARGET_EFAULT;
12949                 }
12950                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12951                 unlock_user(p, arg2, 0);
12952             }
12953         }
12954         return ret;
12955 #endif
12956 #ifdef TARGET_NR_futex
12957     case TARGET_NR_futex:
12958         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12959 #endif
12960 #ifdef TARGET_NR_futex_time64
12961     case TARGET_NR_futex_time64:
12962         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12963 #endif
12964 #ifdef CONFIG_INOTIFY
12965 #if defined(TARGET_NR_inotify_init)
12966     case TARGET_NR_inotify_init:
12967         ret = get_errno(inotify_init());
12968         if (ret >= 0) {
12969             fd_trans_register(ret, &target_inotify_trans);
12970         }
12971         return ret;
12972 #endif
12973 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12974     case TARGET_NR_inotify_init1:
12975         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12976                                           fcntl_flags_tbl)));
12977         if (ret >= 0) {
12978             fd_trans_register(ret, &target_inotify_trans);
12979         }
12980         return ret;
12981 #endif
12982 #if defined(TARGET_NR_inotify_add_watch)
12983     case TARGET_NR_inotify_add_watch:
12984         p = lock_user_string(arg2);
12985         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12986         unlock_user(p, arg2, 0);
12987         return ret;
12988 #endif
12989 #if defined(TARGET_NR_inotify_rm_watch)
12990     case TARGET_NR_inotify_rm_watch:
12991         return get_errno(inotify_rm_watch(arg1, arg2));
12992 #endif
12993 #endif
12994 
12995 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12996     case TARGET_NR_mq_open:
12997         {
12998             struct mq_attr posix_mq_attr;
12999             struct mq_attr *pposix_mq_attr;
13000             int host_flags;
13001 
13002             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
13003             pposix_mq_attr = NULL;
13004             if (arg4) {
13005                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
13006                     return -TARGET_EFAULT;
13007                 }
13008                 pposix_mq_attr = &posix_mq_attr;
13009             }
13010             p = lock_user_string(arg1 - 1);
13011             if (!p) {
13012                 return -TARGET_EFAULT;
13013             }
13014             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13015             unlock_user (p, arg1, 0);
13016         }
13017         return ret;
13018 
13019     case TARGET_NR_mq_unlink:
13020         p = lock_user_string(arg1 - 1);
13021         if (!p) {
13022             return -TARGET_EFAULT;
13023         }
13024         ret = get_errno(mq_unlink(p));
13025         unlock_user (p, arg1, 0);
13026         return ret;
13027 
13028 #ifdef TARGET_NR_mq_timedsend
13029     case TARGET_NR_mq_timedsend:
13030         {
13031             struct timespec ts;
13032 
13033             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13034             if (arg5 != 0) {
13035                 if (target_to_host_timespec(&ts, arg5)) {
13036                     return -TARGET_EFAULT;
13037                 }
13038                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13039                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13040                     return -TARGET_EFAULT;
13041                 }
13042             } else {
13043                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13044             }
13045             unlock_user (p, arg2, arg3);
13046         }
13047         return ret;
13048 #endif
13049 #ifdef TARGET_NR_mq_timedsend_time64
13050     case TARGET_NR_mq_timedsend_time64:
13051         {
13052             struct timespec ts;
13053 
13054             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13055             if (arg5 != 0) {
13056                 if (target_to_host_timespec64(&ts, arg5)) {
13057                     return -TARGET_EFAULT;
13058                 }
13059                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13060                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13061                     return -TARGET_EFAULT;
13062                 }
13063             } else {
13064                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13065             }
13066             unlock_user(p, arg2, arg3);
13067         }
13068         return ret;
13069 #endif
13070 
13071 #ifdef TARGET_NR_mq_timedreceive
13072     case TARGET_NR_mq_timedreceive:
13073         {
13074             struct timespec ts;
13075             unsigned int prio;
13076 
13077             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13078             if (arg5 != 0) {
13079                 if (target_to_host_timespec(&ts, arg5)) {
13080                     return -TARGET_EFAULT;
13081                 }
13082                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13083                                                      &prio, &ts));
13084                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13085                     return -TARGET_EFAULT;
13086                 }
13087             } else {
13088                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13089                                                      &prio, NULL));
13090             }
13091             unlock_user (p, arg2, arg3);
13092             if (arg4 != 0)
13093                 put_user_u32(prio, arg4);
13094         }
13095         return ret;
13096 #endif
13097 #ifdef TARGET_NR_mq_timedreceive_time64
13098     case TARGET_NR_mq_timedreceive_time64:
13099         {
13100             struct timespec ts;
13101             unsigned int prio;
13102 
13103             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13104             if (arg5 != 0) {
13105                 if (target_to_host_timespec64(&ts, arg5)) {
13106                     return -TARGET_EFAULT;
13107                 }
13108                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13109                                                      &prio, &ts));
13110                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13111                     return -TARGET_EFAULT;
13112                 }
13113             } else {
13114                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13115                                                      &prio, NULL));
13116             }
13117             unlock_user(p, arg2, arg3);
13118             if (arg4 != 0) {
13119                 put_user_u32(prio, arg4);
13120             }
13121         }
13122         return ret;
13123 #endif
13124 
13125     /* Not implemented for now... */
13126 /*     case TARGET_NR_mq_notify: */
13127 /*         break; */
13128 
13129     case TARGET_NR_mq_getsetattr:
13130         {
13131             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13132             ret = 0;
13133             if (arg2 != 0) {
13134                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13135                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13136                                            &posix_mq_attr_out));
13137             } else if (arg3 != 0) {
13138                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13139             }
13140             if (ret == 0 && arg3 != 0) {
13141                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13142             }
13143         }
13144         return ret;
13145 #endif
13146 
13147 #ifdef CONFIG_SPLICE
13148 #ifdef TARGET_NR_tee
13149     case TARGET_NR_tee:
13150         {
13151             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13152         }
13153         return ret;
13154 #endif
13155 #ifdef TARGET_NR_splice
13156     case TARGET_NR_splice:
13157         {
13158             loff_t loff_in, loff_out;
13159             loff_t *ploff_in = NULL, *ploff_out = NULL;
13160             if (arg2) {
13161                 if (get_user_u64(loff_in, arg2)) {
13162                     return -TARGET_EFAULT;
13163                 }
13164                 ploff_in = &loff_in;
13165             }
13166             if (arg4) {
13167                 if (get_user_u64(loff_out, arg4)) {
13168                     return -TARGET_EFAULT;
13169                 }
13170                 ploff_out = &loff_out;
13171             }
13172             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13173             if (arg2) {
13174                 if (put_user_u64(loff_in, arg2)) {
13175                     return -TARGET_EFAULT;
13176                 }
13177             }
13178             if (arg4) {
13179                 if (put_user_u64(loff_out, arg4)) {
13180                     return -TARGET_EFAULT;
13181                 }
13182             }
13183         }
13184         return ret;
13185 #endif
13186 #ifdef TARGET_NR_vmsplice
13187 	case TARGET_NR_vmsplice:
13188         {
13189             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13190             if (vec != NULL) {
13191                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13192                 unlock_iovec(vec, arg2, arg3, 0);
13193             } else {
13194                 ret = -host_to_target_errno(errno);
13195             }
13196         }
13197         return ret;
13198 #endif
13199 #endif /* CONFIG_SPLICE */
13200 #ifdef CONFIG_EVENTFD
13201 #if defined(TARGET_NR_eventfd)
13202     case TARGET_NR_eventfd:
13203         ret = get_errno(eventfd(arg1, 0));
13204         if (ret >= 0) {
13205             fd_trans_register(ret, &target_eventfd_trans);
13206         }
13207         return ret;
13208 #endif
13209 #if defined(TARGET_NR_eventfd2)
13210     case TARGET_NR_eventfd2:
13211     {
13212         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13213         if (arg2 & TARGET_O_NONBLOCK) {
13214             host_flags |= O_NONBLOCK;
13215         }
13216         if (arg2 & TARGET_O_CLOEXEC) {
13217             host_flags |= O_CLOEXEC;
13218         }
13219         ret = get_errno(eventfd(arg1, host_flags));
13220         if (ret >= 0) {
13221             fd_trans_register(ret, &target_eventfd_trans);
13222         }
13223         return ret;
13224     }
13225 #endif
13226 #endif /* CONFIG_EVENTFD  */
13227 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13228     case TARGET_NR_fallocate:
13229 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13230         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13231                                   target_offset64(arg5, arg6)));
13232 #else
13233         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13234 #endif
13235         return ret;
13236 #endif
13237 #if defined(CONFIG_SYNC_FILE_RANGE)
13238 #if defined(TARGET_NR_sync_file_range)
13239     case TARGET_NR_sync_file_range:
13240 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13241 #if defined(TARGET_MIPS)
13242         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13243                                         target_offset64(arg5, arg6), arg7));
13244 #else
13245         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13246                                         target_offset64(arg4, arg5), arg6));
13247 #endif /* !TARGET_MIPS */
13248 #else
13249         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13250 #endif
13251         return ret;
13252 #endif
13253 #if defined(TARGET_NR_sync_file_range2) || \
13254     defined(TARGET_NR_arm_sync_file_range)
13255 #if defined(TARGET_NR_sync_file_range2)
13256     case TARGET_NR_sync_file_range2:
13257 #endif
13258 #if defined(TARGET_NR_arm_sync_file_range)
13259     case TARGET_NR_arm_sync_file_range:
13260 #endif
13261         /* This is like sync_file_range but the arguments are reordered */
13262 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13263         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13264                                         target_offset64(arg5, arg6), arg2));
13265 #else
13266         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13267 #endif
13268         return ret;
13269 #endif
13270 #endif
13271 #if defined(TARGET_NR_signalfd4)
13272     case TARGET_NR_signalfd4:
13273         return do_signalfd4(arg1, arg2, arg4);
13274 #endif
13275 #if defined(TARGET_NR_signalfd)
13276     case TARGET_NR_signalfd:
13277         return do_signalfd4(arg1, arg2, 0);
13278 #endif
13279 #if defined(CONFIG_EPOLL)
13280 #if defined(TARGET_NR_epoll_create)
13281     case TARGET_NR_epoll_create:
13282         return get_errno(epoll_create(arg1));
13283 #endif
13284 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13285     case TARGET_NR_epoll_create1:
13286         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13287 #endif
13288 #if defined(TARGET_NR_epoll_ctl)
13289     case TARGET_NR_epoll_ctl:
13290     {
13291         struct epoll_event ep;
13292         struct epoll_event *epp = 0;
13293         if (arg4) {
13294             if (arg2 != EPOLL_CTL_DEL) {
13295                 struct target_epoll_event *target_ep;
13296                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13297                     return -TARGET_EFAULT;
13298                 }
13299                 ep.events = tswap32(target_ep->events);
13300                 /*
13301                  * The epoll_data_t union is just opaque data to the kernel,
13302                  * so we transfer all 64 bits across and need not worry what
13303                  * actual data type it is.
13304                  */
13305                 ep.data.u64 = tswap64(target_ep->data.u64);
13306                 unlock_user_struct(target_ep, arg4, 0);
13307             }
13308             /*
13309              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13310              * non-null pointer, even though this argument is ignored.
13311              *
13312              */
13313             epp = &ep;
13314         }
13315         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13316     }
13317 #endif
13318 
13319 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13320 #if defined(TARGET_NR_epoll_wait)
13321     case TARGET_NR_epoll_wait:
13322 #endif
13323 #if defined(TARGET_NR_epoll_pwait)
13324     case TARGET_NR_epoll_pwait:
13325 #endif
13326     {
13327         struct target_epoll_event *target_ep;
13328         struct epoll_event *ep;
13329         int epfd = arg1;
13330         int maxevents = arg3;
13331         int timeout = arg4;
13332 
13333         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13334             return -TARGET_EINVAL;
13335         }
13336 
13337         target_ep = lock_user(VERIFY_WRITE, arg2,
13338                               maxevents * sizeof(struct target_epoll_event), 1);
13339         if (!target_ep) {
13340             return -TARGET_EFAULT;
13341         }
13342 
13343         ep = g_try_new(struct epoll_event, maxevents);
13344         if (!ep) {
13345             unlock_user(target_ep, arg2, 0);
13346             return -TARGET_ENOMEM;
13347         }
13348 
13349         switch (num) {
13350 #if defined(TARGET_NR_epoll_pwait)
13351         case TARGET_NR_epoll_pwait:
13352         {
13353             sigset_t *set = NULL;
13354 
13355             if (arg5) {
13356                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13357                 if (ret != 0) {
13358                     break;
13359                 }
13360             }
13361 
13362             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13363                                              set, SIGSET_T_SIZE));
13364 
13365             if (set) {
13366                 finish_sigsuspend_mask(ret);
13367             }
13368             break;
13369         }
13370 #endif
13371 #if defined(TARGET_NR_epoll_wait)
13372         case TARGET_NR_epoll_wait:
13373             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13374                                              NULL, 0));
13375             break;
13376 #endif
13377         default:
13378             ret = -TARGET_ENOSYS;
13379         }
13380         if (!is_error(ret)) {
13381             int i;
13382             for (i = 0; i < ret; i++) {
13383                 target_ep[i].events = tswap32(ep[i].events);
13384                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13385             }
13386             unlock_user(target_ep, arg2,
13387                         ret * sizeof(struct target_epoll_event));
13388         } else {
13389             unlock_user(target_ep, arg2, 0);
13390         }
13391         g_free(ep);
13392         return ret;
13393     }
13394 #endif
13395 #endif
13396 #ifdef TARGET_NR_prlimit64
13397     case TARGET_NR_prlimit64:
13398     {
13399         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13400         struct target_rlimit64 *target_rnew, *target_rold;
13401         struct host_rlimit64 rnew, rold, *rnewp = 0;
13402         int resource = target_to_host_resource(arg2);
13403 
13404         if (arg3 && (resource != RLIMIT_AS &&
13405                      resource != RLIMIT_DATA &&
13406                      resource != RLIMIT_STACK)) {
13407             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13408                 return -TARGET_EFAULT;
13409             }
13410             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13411             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13412             unlock_user_struct(target_rnew, arg3, 0);
13413             rnewp = &rnew;
13414         }
13415 
13416         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13417         if (!is_error(ret) && arg4) {
13418             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13419                 return -TARGET_EFAULT;
13420             }
13421             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13422             __put_user(rold.rlim_max, &target_rold->rlim_max);
13423             unlock_user_struct(target_rold, arg4, 1);
13424         }
13425         return ret;
13426     }
13427 #endif
13428 #ifdef TARGET_NR_gethostname
13429     case TARGET_NR_gethostname:
13430     {
13431         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13432         if (name) {
13433             ret = get_errno(gethostname(name, arg2));
13434             unlock_user(name, arg1, arg2);
13435         } else {
13436             ret = -TARGET_EFAULT;
13437         }
13438         return ret;
13439     }
13440 #endif
13441 #ifdef TARGET_NR_atomic_cmpxchg_32
13442     case TARGET_NR_atomic_cmpxchg_32:
13443     {
13444         /* should use start_exclusive from main.c */
13445         abi_ulong mem_value;
13446         if (get_user_u32(mem_value, arg6)) {
13447             target_siginfo_t info;
13448             info.si_signo = SIGSEGV;
13449             info.si_errno = 0;
13450             info.si_code = TARGET_SEGV_MAPERR;
13451             info._sifields._sigfault._addr = arg6;
13452             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13453             ret = 0xdeadbeef;
13454 
13455         }
13456         if (mem_value == arg2)
13457             put_user_u32(arg1, arg6);
13458         return mem_value;
13459     }
13460 #endif
13461 #ifdef TARGET_NR_atomic_barrier
13462     case TARGET_NR_atomic_barrier:
13463         /* Like the kernel implementation and the
13464            qemu arm barrier, no-op this? */
13465         return 0;
13466 #endif
13467 
13468 #ifdef TARGET_NR_timer_create
13469     case TARGET_NR_timer_create:
13470     {
13471         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13472 
13473         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13474 
13475         int clkid = arg1;
13476         int timer_index = next_free_host_timer();
13477 
13478         if (timer_index < 0) {
13479             ret = -TARGET_EAGAIN;
13480         } else {
13481             timer_t *phtimer = g_posix_timers  + timer_index;
13482 
13483             if (arg2) {
13484                 phost_sevp = &host_sevp;
13485                 ret = target_to_host_sigevent(phost_sevp, arg2);
13486                 if (ret != 0) {
13487                     free_host_timer_slot(timer_index);
13488                     return ret;
13489                 }
13490             }
13491 
13492             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13493             if (ret) {
13494                 free_host_timer_slot(timer_index);
13495             } else {
13496                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13497                     timer_delete(*phtimer);
13498                     free_host_timer_slot(timer_index);
13499                     return -TARGET_EFAULT;
13500                 }
13501             }
13502         }
13503         return ret;
13504     }
13505 #endif
13506 
13507 #ifdef TARGET_NR_timer_settime
13508     case TARGET_NR_timer_settime:
13509     {
13510         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13511          * struct itimerspec * old_value */
13512         target_timer_t timerid = get_timer_id(arg1);
13513 
13514         if (timerid < 0) {
13515             ret = timerid;
13516         } else if (arg3 == 0) {
13517             ret = -TARGET_EINVAL;
13518         } else {
13519             timer_t htimer = g_posix_timers[timerid];
13520             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13521 
13522             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13523                 return -TARGET_EFAULT;
13524             }
13525             ret = get_errno(
13526                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13527             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13528                 return -TARGET_EFAULT;
13529             }
13530         }
13531         return ret;
13532     }
13533 #endif
13534 
13535 #ifdef TARGET_NR_timer_settime64
13536     case TARGET_NR_timer_settime64:
13537     {
13538         target_timer_t timerid = get_timer_id(arg1);
13539 
13540         if (timerid < 0) {
13541             ret = timerid;
13542         } else if (arg3 == 0) {
13543             ret = -TARGET_EINVAL;
13544         } else {
13545             timer_t htimer = g_posix_timers[timerid];
13546             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13547 
13548             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13549                 return -TARGET_EFAULT;
13550             }
13551             ret = get_errno(
13552                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13553             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13554                 return -TARGET_EFAULT;
13555             }
13556         }
13557         return ret;
13558     }
13559 #endif
13560 
13561 #ifdef TARGET_NR_timer_gettime
13562     case TARGET_NR_timer_gettime:
13563     {
13564         /* args: timer_t timerid, struct itimerspec *curr_value */
13565         target_timer_t timerid = get_timer_id(arg1);
13566 
13567         if (timerid < 0) {
13568             ret = timerid;
13569         } else if (!arg2) {
13570             ret = -TARGET_EFAULT;
13571         } else {
13572             timer_t htimer = g_posix_timers[timerid];
13573             struct itimerspec hspec;
13574             ret = get_errno(timer_gettime(htimer, &hspec));
13575 
13576             if (host_to_target_itimerspec(arg2, &hspec)) {
13577                 ret = -TARGET_EFAULT;
13578             }
13579         }
13580         return ret;
13581     }
13582 #endif
13583 
13584 #ifdef TARGET_NR_timer_gettime64
13585     case TARGET_NR_timer_gettime64:
13586     {
13587         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13588         target_timer_t timerid = get_timer_id(arg1);
13589 
13590         if (timerid < 0) {
13591             ret = timerid;
13592         } else if (!arg2) {
13593             ret = -TARGET_EFAULT;
13594         } else {
13595             timer_t htimer = g_posix_timers[timerid];
13596             struct itimerspec hspec;
13597             ret = get_errno(timer_gettime(htimer, &hspec));
13598 
13599             if (host_to_target_itimerspec64(arg2, &hspec)) {
13600                 ret = -TARGET_EFAULT;
13601             }
13602         }
13603         return ret;
13604     }
13605 #endif
13606 
13607 #ifdef TARGET_NR_timer_getoverrun
13608     case TARGET_NR_timer_getoverrun:
13609     {
13610         /* args: timer_t timerid */
13611         target_timer_t timerid = get_timer_id(arg1);
13612 
13613         if (timerid < 0) {
13614             ret = timerid;
13615         } else {
13616             timer_t htimer = g_posix_timers[timerid];
13617             ret = get_errno(timer_getoverrun(htimer));
13618         }
13619         return ret;
13620     }
13621 #endif
13622 
13623 #ifdef TARGET_NR_timer_delete
13624     case TARGET_NR_timer_delete:
13625     {
13626         /* args: timer_t timerid */
13627         target_timer_t timerid = get_timer_id(arg1);
13628 
13629         if (timerid < 0) {
13630             ret = timerid;
13631         } else {
13632             timer_t htimer = g_posix_timers[timerid];
13633             ret = get_errno(timer_delete(htimer));
13634             free_host_timer_slot(timerid);
13635         }
13636         return ret;
13637     }
13638 #endif
13639 
13640 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13641     case TARGET_NR_timerfd_create:
13642         ret = get_errno(timerfd_create(arg1,
13643                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13644         if (ret >= 0) {
13645             fd_trans_register(ret, &target_timerfd_trans);
13646         }
13647         return ret;
13648 #endif
13649 
13650 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13651     case TARGET_NR_timerfd_gettime:
13652         {
13653             struct itimerspec its_curr;
13654 
13655             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13656 
13657             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13658                 return -TARGET_EFAULT;
13659             }
13660         }
13661         return ret;
13662 #endif
13663 
13664 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13665     case TARGET_NR_timerfd_gettime64:
13666         {
13667             struct itimerspec its_curr;
13668 
13669             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13670 
13671             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13672                 return -TARGET_EFAULT;
13673             }
13674         }
13675         return ret;
13676 #endif
13677 
13678 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13679     case TARGET_NR_timerfd_settime:
13680         {
13681             struct itimerspec its_new, its_old, *p_new;
13682 
13683             if (arg3) {
13684                 if (target_to_host_itimerspec(&its_new, arg3)) {
13685                     return -TARGET_EFAULT;
13686                 }
13687                 p_new = &its_new;
13688             } else {
13689                 p_new = NULL;
13690             }
13691 
13692             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13693 
13694             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13695                 return -TARGET_EFAULT;
13696             }
13697         }
13698         return ret;
13699 #endif
13700 
13701 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13702     case TARGET_NR_timerfd_settime64:
13703         {
13704             struct itimerspec its_new, its_old, *p_new;
13705 
13706             if (arg3) {
13707                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13708                     return -TARGET_EFAULT;
13709                 }
13710                 p_new = &its_new;
13711             } else {
13712                 p_new = NULL;
13713             }
13714 
13715             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13716 
13717             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13718                 return -TARGET_EFAULT;
13719             }
13720         }
13721         return ret;
13722 #endif
13723 
13724 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13725     case TARGET_NR_ioprio_get:
13726         return get_errno(ioprio_get(arg1, arg2));
13727 #endif
13728 
13729 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13730     case TARGET_NR_ioprio_set:
13731         return get_errno(ioprio_set(arg1, arg2, arg3));
13732 #endif
13733 
13734 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13735     case TARGET_NR_setns:
13736         return get_errno(setns(arg1, arg2));
13737 #endif
13738 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13739     case TARGET_NR_unshare:
13740         return get_errno(unshare(arg1));
13741 #endif
13742 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13743     case TARGET_NR_kcmp:
13744         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13745 #endif
13746 #ifdef TARGET_NR_swapcontext
13747     case TARGET_NR_swapcontext:
13748         /* PowerPC specific.  */
13749         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13750 #endif
13751 #ifdef TARGET_NR_memfd_create
13752     case TARGET_NR_memfd_create:
13753         p = lock_user_string(arg1);
13754         if (!p) {
13755             return -TARGET_EFAULT;
13756         }
13757         ret = get_errno(memfd_create(p, arg2));
13758         fd_trans_unregister(ret);
13759         unlock_user(p, arg1, 0);
13760         return ret;
13761 #endif
13762 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13763     case TARGET_NR_membarrier:
13764         return get_errno(membarrier(arg1, arg2));
13765 #endif
13766 
13767 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13768     case TARGET_NR_copy_file_range:
13769         {
13770             loff_t inoff, outoff;
13771             loff_t *pinoff = NULL, *poutoff = NULL;
13772 
13773             if (arg2) {
13774                 if (get_user_u64(inoff, arg2)) {
13775                     return -TARGET_EFAULT;
13776                 }
13777                 pinoff = &inoff;
13778             }
13779             if (arg4) {
13780                 if (get_user_u64(outoff, arg4)) {
13781                     return -TARGET_EFAULT;
13782                 }
13783                 poutoff = &outoff;
13784             }
13785             /* Do not sign-extend the count parameter. */
13786             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13787                                                  (abi_ulong)arg5, arg6));
13788             if (!is_error(ret) && ret > 0) {
13789                 if (arg2) {
13790                     if (put_user_u64(inoff, arg2)) {
13791                         return -TARGET_EFAULT;
13792                     }
13793                 }
13794                 if (arg4) {
13795                     if (put_user_u64(outoff, arg4)) {
13796                         return -TARGET_EFAULT;
13797                     }
13798                 }
13799             }
13800         }
13801         return ret;
13802 #endif
13803 
13804 #if defined(TARGET_NR_pivot_root)
13805     case TARGET_NR_pivot_root:
13806         {
13807             void *p2;
13808             p = lock_user_string(arg1); /* new_root */
13809             p2 = lock_user_string(arg2); /* put_old */
13810             if (!p || !p2) {
13811                 ret = -TARGET_EFAULT;
13812             } else {
13813                 ret = get_errno(pivot_root(p, p2));
13814             }
13815             unlock_user(p2, arg2, 0);
13816             unlock_user(p, arg1, 0);
13817         }
13818         return ret;
13819 #endif
13820 
13821 #if defined(TARGET_NR_riscv_hwprobe)
13822     case TARGET_NR_riscv_hwprobe:
13823         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13824 #endif
13825 
13826     default:
13827         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13828         return -TARGET_ENOSYS;
13829     }
13830     return ret;
13831 }
13832 
13833 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13834                     abi_long arg2, abi_long arg3, abi_long arg4,
13835                     abi_long arg5, abi_long arg6, abi_long arg7,
13836                     abi_long arg8)
13837 {
13838     CPUState *cpu = env_cpu(cpu_env);
13839     abi_long ret;
13840 
13841 #ifdef DEBUG_ERESTARTSYS
13842     /* Debug-only code for exercising the syscall-restart code paths
13843      * in the per-architecture cpu main loops: restart every syscall
13844      * the guest makes once before letting it through.
13845      */
13846     {
13847         static bool flag;
13848         flag = !flag;
13849         if (flag) {
13850             return -QEMU_ERESTARTSYS;
13851         }
13852     }
13853 #endif
13854 
13855     record_syscall_start(cpu, num, arg1,
13856                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13857 
13858     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13859         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13860     }
13861 
13862     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13863                       arg5, arg6, arg7, arg8);
13864 
13865     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13866         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13867                           arg3, arg4, arg5, arg6);
13868     }
13869 
13870     record_syscall_return(cpu, num, ret);
13871     return ret;
13872 }
13873