xref: /openbmc/qemu/linux-user/syscall.c (revision 7a8d9f3a)
1 /*
2  *  Linux syscalls
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #define _ATFILE_SOURCE
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22 #include "qemu/path.h"
23 #include "qemu/memfd.h"
24 #include "qemu/queue.h"
25 #include "qemu/plugin.h"
26 #include "target_mman.h"
27 #include <elf.h>
28 #include <endian.h>
29 #include <grp.h>
30 #include <sys/ipc.h>
31 #include <sys/msg.h>
32 #include <sys/wait.h>
33 #include <sys/mount.h>
34 #include <sys/file.h>
35 #include <sys/fsuid.h>
36 #include <sys/personality.h>
37 #include <sys/prctl.h>
38 #include <sys/resource.h>
39 #include <sys/swap.h>
40 #include <linux/capability.h>
41 #include <sched.h>
42 #include <sys/timex.h>
43 #include <sys/socket.h>
44 #include <linux/sockios.h>
45 #include <sys/un.h>
46 #include <sys/uio.h>
47 #include <poll.h>
48 #include <sys/times.h>
49 #include <sys/shm.h>
50 #include <sys/sem.h>
51 #include <sys/statfs.h>
52 #include <utime.h>
53 #include <sys/sysinfo.h>
54 #include <sys/signalfd.h>
55 //#include <sys/user.h>
56 #include <netinet/in.h>
57 #include <netinet/ip.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <linux/wireless.h>
61 #include <linux/icmp.h>
62 #include <linux/icmpv6.h>
63 #include <linux/if_tun.h>
64 #include <linux/in6.h>
65 #include <linux/errqueue.h>
66 #include <linux/random.h>
67 #ifdef CONFIG_TIMERFD
68 #include <sys/timerfd.h>
69 #endif
70 #ifdef CONFIG_EVENTFD
71 #include <sys/eventfd.h>
72 #endif
73 #ifdef CONFIG_EPOLL
74 #include <sys/epoll.h>
75 #endif
76 #ifdef CONFIG_ATTR
77 #include "qemu/xattr.h"
78 #endif
79 #ifdef CONFIG_SENDFILE
80 #include <sys/sendfile.h>
81 #endif
82 #ifdef HAVE_SYS_KCOV_H
83 #include <sys/kcov.h>
84 #endif
85 
86 #define termios host_termios
87 #define winsize host_winsize
88 #define termio host_termio
89 #define sgttyb host_sgttyb /* same as target */
90 #define tchars host_tchars /* same as target */
91 #define ltchars host_ltchars /* same as target */
92 
93 #include <linux/termios.h>
94 #include <linux/unistd.h>
95 #include <linux/cdrom.h>
96 #include <linux/hdreg.h>
97 #include <linux/soundcard.h>
98 #include <linux/kd.h>
99 #include <linux/mtio.h>
100 #include <linux/fs.h>
101 #include <linux/fd.h>
102 #if defined(CONFIG_FIEMAP)
103 #include <linux/fiemap.h>
104 #endif
105 #include <linux/fb.h>
106 #if defined(CONFIG_USBFS)
107 #include <linux/usbdevice_fs.h>
108 #include <linux/usb/ch9.h>
109 #endif
110 #include <linux/vt.h>
111 #include <linux/dm-ioctl.h>
112 #include <linux/reboot.h>
113 #include <linux/route.h>
114 #include <linux/filter.h>
115 #include <linux/blkpg.h>
116 #include <netpacket/packet.h>
117 #include <linux/netlink.h>
118 #include <linux/if_alg.h>
119 #include <linux/rtc.h>
120 #include <sound/asound.h>
121 #ifdef HAVE_BTRFS_H
122 #include <linux/btrfs.h>
123 #endif
124 #ifdef HAVE_DRM_H
125 #include <libdrm/drm.h>
126 #include <libdrm/i915_drm.h>
127 #endif
128 #include "linux_loop.h"
129 #include "uname.h"
130 
131 #include "qemu.h"
132 #include "user-internals.h"
133 #include "strace.h"
134 #include "signal-common.h"
135 #include "loader.h"
136 #include "user-mmap.h"
137 #include "user/safe-syscall.h"
138 #include "qemu/guest-random.h"
139 #include "qemu/selfmap.h"
140 #include "user/syscall-trace.h"
141 #include "special-errno.h"
142 #include "qapi/error.h"
143 #include "fd-trans.h"
144 #include "tcg/tcg.h"
145 #include "cpu_loop-common.h"
146 
147 #ifndef CLONE_IO
148 #define CLONE_IO                0x80000000      /* Clone io context */
149 #endif
150 
151 /* We can't directly call the host clone syscall, because this will
152  * badly confuse libc (breaking mutexes, for example). So we must
153  * divide clone flags into:
154  *  * flag combinations that look like pthread_create()
155  *  * flag combinations that look like fork()
156  *  * flags we can implement within QEMU itself
157  *  * flags we can't support and will return an error for
158  */
159 /* For thread creation, all these flags must be present; for
160  * fork, none must be present.
161  */
162 #define CLONE_THREAD_FLAGS                              \
163     (CLONE_VM | CLONE_FS | CLONE_FILES |                \
164      CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
165 
166 /* These flags are ignored:
167  * CLONE_DETACHED is now ignored by the kernel;
168  * CLONE_IO is just an optimisation hint to the I/O scheduler
169  */
170 #define CLONE_IGNORED_FLAGS                     \
171     (CLONE_DETACHED | CLONE_IO)
172 
173 #ifndef CLONE_PIDFD
174 # define CLONE_PIDFD 0x00001000
175 #endif
176 
177 /* Flags for fork which we can implement within QEMU itself */
178 #define CLONE_OPTIONAL_FORK_FLAGS               \
179     (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
180      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
181 
182 /* Flags for thread creation which we can implement within QEMU itself */
183 #define CLONE_OPTIONAL_THREAD_FLAGS                             \
184     (CLONE_SETTLS | CLONE_PARENT_SETTID |                       \
185      CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
186 
187 #define CLONE_INVALID_FORK_FLAGS                                        \
188     (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
189 
190 #define CLONE_INVALID_THREAD_FLAGS                                      \
191     (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS |     \
192        CLONE_IGNORED_FLAGS))
193 
194 /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
195  * have almost all been allocated. We cannot support any of
196  * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
197  * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
198  * The checks against the invalid thread masks above will catch these.
199  * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
200  */
201 
202 /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
203  * once. This exercises the codepaths for restart.
204  */
205 //#define DEBUG_ERESTARTSYS
206 
207 //#include <linux/msdos_fs.h>
208 #define VFAT_IOCTL_READDIR_BOTH \
209     _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
210 #define VFAT_IOCTL_READDIR_SHORT \
211     _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
212 
213 #undef _syscall0
214 #undef _syscall1
215 #undef _syscall2
216 #undef _syscall3
217 #undef _syscall4
218 #undef _syscall5
219 #undef _syscall6
220 
221 #define _syscall0(type,name)		\
222 static type name (void)			\
223 {					\
224 	return syscall(__NR_##name);	\
225 }
226 
227 #define _syscall1(type,name,type1,arg1)		\
228 static type name (type1 arg1)			\
229 {						\
230 	return syscall(__NR_##name, arg1);	\
231 }
232 
233 #define _syscall2(type,name,type1,arg1,type2,arg2)	\
234 static type name (type1 arg1,type2 arg2)		\
235 {							\
236 	return syscall(__NR_##name, arg1, arg2);	\
237 }
238 
239 #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3)	\
240 static type name (type1 arg1,type2 arg2,type3 arg3)		\
241 {								\
242 	return syscall(__NR_##name, arg1, arg2, arg3);		\
243 }
244 
245 #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4)	\
246 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4)			\
247 {										\
248 	return syscall(__NR_##name, arg1, arg2, arg3, arg4);			\
249 }
250 
251 #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
252 		  type5,arg5)							\
253 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5)	\
254 {										\
255 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5);		\
256 }
257 
258 
259 #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,	\
260 		  type5,arg5,type6,arg6)					\
261 static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5,	\
262                   type6 arg6)							\
263 {										\
264 	return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6);	\
265 }
266 
267 
268 #define __NR_sys_uname __NR_uname
269 #define __NR_sys_getcwd1 __NR_getcwd
270 #define __NR_sys_getdents __NR_getdents
271 #define __NR_sys_getdents64 __NR_getdents64
272 #define __NR_sys_getpriority __NR_getpriority
273 #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
274 #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
275 #define __NR_sys_syslog __NR_syslog
276 #if defined(__NR_futex)
277 # define __NR_sys_futex __NR_futex
278 #endif
279 #if defined(__NR_futex_time64)
280 # define __NR_sys_futex_time64 __NR_futex_time64
281 #endif
282 #define __NR_sys_statx __NR_statx
283 
284 #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
285 #define __NR__llseek __NR_lseek
286 #endif
287 
288 /* Newer kernel ports have llseek() instead of _llseek() */
289 #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
290 #define TARGET_NR__llseek TARGET_NR_llseek
291 #endif
292 
293 /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
294 #ifndef TARGET_O_NONBLOCK_MASK
295 #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
296 #endif
297 
298 #define __NR_sys_gettid __NR_gettid
299 _syscall0(int, sys_gettid)
300 
301 /* For the 64-bit guest on 32-bit host case we must emulate
302  * getdents using getdents64, because otherwise the host
303  * might hand us back more dirent records than we can fit
304  * into the guest buffer after structure format conversion.
305  * Otherwise we emulate getdents with getdents if the host has it.
306  */
307 #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
308 #define EMULATE_GETDENTS_WITH_GETDENTS
309 #endif
310 
311 #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
312 _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
313 #endif
314 #if (defined(TARGET_NR_getdents) && \
315       !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
316     (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
317 _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
318 #endif
319 #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
320 _syscall5(int, _llseek,  uint,  fd, ulong, hi, ulong, lo,
321           loff_t *, res, uint, wh);
322 #endif
323 _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
324 _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
325           siginfo_t *, uinfo)
326 _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
327 #ifdef __NR_exit_group
328 _syscall1(int,exit_group,int,error_code)
329 #endif
330 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
331 #define __NR_sys_close_range __NR_close_range
332 _syscall3(int,sys_close_range,int,first,int,last,int,flags)
333 #ifndef CLOSE_RANGE_CLOEXEC
334 #define CLOSE_RANGE_CLOEXEC     (1U << 2)
335 #endif
336 #endif
337 #if defined(__NR_futex)
338 _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
339           const struct timespec *,timeout,int *,uaddr2,int,val3)
340 #endif
341 #if defined(__NR_futex_time64)
342 _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
343           const struct timespec *,timeout,int *,uaddr2,int,val3)
344 #endif
345 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
346 _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
347 #endif
348 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
349 _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
350                              unsigned int, flags);
351 #endif
352 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
353 _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
354 #endif
355 #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
356 _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
357           unsigned long *, user_mask_ptr);
358 #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
359 _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
360           unsigned long *, user_mask_ptr);
361 /* sched_attr is not defined in glibc */
362 struct sched_attr {
363     uint32_t size;
364     uint32_t sched_policy;
365     uint64_t sched_flags;
366     int32_t sched_nice;
367     uint32_t sched_priority;
368     uint64_t sched_runtime;
369     uint64_t sched_deadline;
370     uint64_t sched_period;
371     uint32_t sched_util_min;
372     uint32_t sched_util_max;
373 };
374 #define __NR_sys_sched_getattr __NR_sched_getattr
375 _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
376           unsigned int, size, unsigned int, flags);
377 #define __NR_sys_sched_setattr __NR_sched_setattr
378 _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
379           unsigned int, flags);
380 #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
381 _syscall1(int, sys_sched_getscheduler, pid_t, pid);
382 #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
383 _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
384           const struct sched_param *, param);
385 #define __NR_sys_sched_getparam __NR_sched_getparam
386 _syscall2(int, sys_sched_getparam, pid_t, pid,
387           struct sched_param *, param);
388 #define __NR_sys_sched_setparam __NR_sched_setparam
389 _syscall2(int, sys_sched_setparam, pid_t, pid,
390           const struct sched_param *, param);
391 #define __NR_sys_getcpu __NR_getcpu
392 _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
393 _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
394           void *, arg);
395 _syscall2(int, capget, struct __user_cap_header_struct *, header,
396           struct __user_cap_data_struct *, data);
397 _syscall2(int, capset, struct __user_cap_header_struct *, header,
398           struct __user_cap_data_struct *, data);
399 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
400 _syscall2(int, ioprio_get, int, which, int, who)
401 #endif
402 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
403 _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
404 #endif
405 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
406 _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
407 #endif
408 
409 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
410 _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
411           unsigned long, idx1, unsigned long, idx2)
412 #endif
413 
414 /*
415  * It is assumed that struct statx is architecture independent.
416  */
417 #if defined(TARGET_NR_statx) && defined(__NR_statx)
418 _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
419           unsigned int, mask, struct target_statx *, statxbuf)
420 #endif
421 #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
422 _syscall2(int, membarrier, int, cmd, int, flags)
423 #endif
424 
425 static const bitmask_transtbl fcntl_flags_tbl[] = {
426   { TARGET_O_ACCMODE,   TARGET_O_WRONLY,    O_ACCMODE,   O_WRONLY,    },
427   { TARGET_O_ACCMODE,   TARGET_O_RDWR,      O_ACCMODE,   O_RDWR,      },
428   { TARGET_O_CREAT,     TARGET_O_CREAT,     O_CREAT,     O_CREAT,     },
429   { TARGET_O_EXCL,      TARGET_O_EXCL,      O_EXCL,      O_EXCL,      },
430   { TARGET_O_NOCTTY,    TARGET_O_NOCTTY,    O_NOCTTY,    O_NOCTTY,    },
431   { TARGET_O_TRUNC,     TARGET_O_TRUNC,     O_TRUNC,     O_TRUNC,     },
432   { TARGET_O_APPEND,    TARGET_O_APPEND,    O_APPEND,    O_APPEND,    },
433   { TARGET_O_NONBLOCK,  TARGET_O_NONBLOCK,  O_NONBLOCK,  O_NONBLOCK,  },
434   { TARGET_O_SYNC,      TARGET_O_DSYNC,     O_SYNC,      O_DSYNC,     },
435   { TARGET_O_SYNC,      TARGET_O_SYNC,      O_SYNC,      O_SYNC,      },
436   { TARGET_FASYNC,      TARGET_FASYNC,      FASYNC,      FASYNC,      },
437   { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
438   { TARGET_O_NOFOLLOW,  TARGET_O_NOFOLLOW,  O_NOFOLLOW,  O_NOFOLLOW,  },
439 #if defined(O_DIRECT)
440   { TARGET_O_DIRECT,    TARGET_O_DIRECT,    O_DIRECT,    O_DIRECT,    },
441 #endif
442 #if defined(O_NOATIME)
443   { TARGET_O_NOATIME,   TARGET_O_NOATIME,   O_NOATIME,   O_NOATIME    },
444 #endif
445 #if defined(O_CLOEXEC)
446   { TARGET_O_CLOEXEC,   TARGET_O_CLOEXEC,   O_CLOEXEC,   O_CLOEXEC    },
447 #endif
448 #if defined(O_PATH)
449   { TARGET_O_PATH,      TARGET_O_PATH,      O_PATH,      O_PATH       },
450 #endif
451 #if defined(O_TMPFILE)
452   { TARGET_O_TMPFILE,   TARGET_O_TMPFILE,   O_TMPFILE,   O_TMPFILE    },
453 #endif
454   /* Don't terminate the list prematurely on 64-bit host+guest.  */
455 #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
456   { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
457 #endif
458   { 0, 0, 0, 0 }
459 };
460 
461 _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
462 
463 #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
464 #if defined(__NR_utimensat)
465 #define __NR_sys_utimensat __NR_utimensat
466 _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
467           const struct timespec *,tsp,int,flags)
468 #else
469 static int sys_utimensat(int dirfd, const char *pathname,
470                          const struct timespec times[2], int flags)
471 {
472     errno = ENOSYS;
473     return -1;
474 }
475 #endif
476 #endif /* TARGET_NR_utimensat */
477 
478 #ifdef TARGET_NR_renameat2
479 #if defined(__NR_renameat2)
480 #define __NR_sys_renameat2 __NR_renameat2
481 _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
482           const char *, new, unsigned int, flags)
483 #else
484 static int sys_renameat2(int oldfd, const char *old,
485                          int newfd, const char *new, int flags)
486 {
487     if (flags == 0) {
488         return renameat(oldfd, old, newfd, new);
489     }
490     errno = ENOSYS;
491     return -1;
492 }
493 #endif
494 #endif /* TARGET_NR_renameat2 */
495 
496 #ifdef CONFIG_INOTIFY
497 #include <sys/inotify.h>
498 #else
499 /* Userspace can usually survive runtime without inotify */
500 #undef TARGET_NR_inotify_init
501 #undef TARGET_NR_inotify_init1
502 #undef TARGET_NR_inotify_add_watch
503 #undef TARGET_NR_inotify_rm_watch
504 #endif /* CONFIG_INOTIFY  */
505 
506 #if defined(TARGET_NR_prlimit64)
507 #ifndef __NR_prlimit64
508 # define __NR_prlimit64 -1
509 #endif
510 #define __NR_sys_prlimit64 __NR_prlimit64
511 /* The glibc rlimit structure may not be that used by the underlying syscall */
512 struct host_rlimit64 {
513     uint64_t rlim_cur;
514     uint64_t rlim_max;
515 };
516 _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
517           const struct host_rlimit64 *, new_limit,
518           struct host_rlimit64 *, old_limit)
519 #endif
520 
521 
522 #if defined(TARGET_NR_timer_create)
523 /* Maximum of 32 active POSIX timers allowed at any one time. */
524 #define GUEST_TIMER_MAX 32
525 static timer_t g_posix_timers[GUEST_TIMER_MAX];
526 static int g_posix_timer_allocated[GUEST_TIMER_MAX];
527 
528 static inline int next_free_host_timer(void)
529 {
530     int k;
531     for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
532         if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
533             return k;
534         }
535     }
536     return -1;
537 }
538 
539 static inline void free_host_timer_slot(int id)
540 {
541     qatomic_store_release(g_posix_timer_allocated + id, 0);
542 }
543 #endif
544 
545 static inline int host_to_target_errno(int host_errno)
546 {
547     switch (host_errno) {
548 #define E(X)  case X: return TARGET_##X;
549 #include "errnos.c.inc"
550 #undef E
551     default:
552         return host_errno;
553     }
554 }
555 
556 static inline int target_to_host_errno(int target_errno)
557 {
558     switch (target_errno) {
559 #define E(X)  case TARGET_##X: return X;
560 #include "errnos.c.inc"
561 #undef E
562     default:
563         return target_errno;
564     }
565 }
566 
567 abi_long get_errno(abi_long ret)
568 {
569     if (ret == -1)
570         return -host_to_target_errno(errno);
571     else
572         return ret;
573 }
574 
575 const char *target_strerror(int err)
576 {
577     if (err == QEMU_ERESTARTSYS) {
578         return "To be restarted";
579     }
580     if (err == QEMU_ESIGRETURN) {
581         return "Successful exit from sigreturn";
582     }
583 
584     return strerror(target_to_host_errno(err));
585 }
586 
587 static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
588 {
589     int i;
590     uint8_t b;
591     if (usize <= ksize) {
592         return 1;
593     }
594     for (i = ksize; i < usize; i++) {
595         if (get_user_u8(b, addr + i)) {
596             return -TARGET_EFAULT;
597         }
598         if (b != 0) {
599             return 0;
600         }
601     }
602     return 1;
603 }
604 
605 #define safe_syscall0(type, name) \
606 static type safe_##name(void) \
607 { \
608     return safe_syscall(__NR_##name); \
609 }
610 
611 #define safe_syscall1(type, name, type1, arg1) \
612 static type safe_##name(type1 arg1) \
613 { \
614     return safe_syscall(__NR_##name, arg1); \
615 }
616 
617 #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
618 static type safe_##name(type1 arg1, type2 arg2) \
619 { \
620     return safe_syscall(__NR_##name, arg1, arg2); \
621 }
622 
623 #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
624 static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
625 { \
626     return safe_syscall(__NR_##name, arg1, arg2, arg3); \
627 }
628 
629 #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
630     type4, arg4) \
631 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
632 { \
633     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
634 }
635 
636 #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
637     type4, arg4, type5, arg5) \
638 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
639     type5 arg5) \
640 { \
641     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
642 }
643 
644 #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
645     type4, arg4, type5, arg5, type6, arg6) \
646 static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
647     type5 arg5, type6 arg6) \
648 { \
649     return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
650 }
651 
652 safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
653 safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
654 safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
655               int, flags, mode_t, mode)
656 #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
657 safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
658               struct rusage *, rusage)
659 #endif
660 safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
661               int, options, struct rusage *, rusage)
662 safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
663 safe_syscall5(int, execveat, int, dirfd, const char *, filename,
664               char **, argv, char **, envp, int, flags)
665 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
666     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
667 safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
668               fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
669 #endif
670 #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
671 safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
672               struct timespec *, tsp, const sigset_t *, sigmask,
673               size_t, sigsetsize)
674 #endif
675 safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
676               int, maxevents, int, timeout, const sigset_t *, sigmask,
677               size_t, sigsetsize)
678 #if defined(__NR_futex)
679 safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
680               const struct timespec *,timeout,int *,uaddr2,int,val3)
681 #endif
682 #if defined(__NR_futex_time64)
683 safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
684               const struct timespec *,timeout,int *,uaddr2,int,val3)
685 #endif
686 safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
687 safe_syscall2(int, kill, pid_t, pid, int, sig)
688 safe_syscall2(int, tkill, int, tid, int, sig)
689 safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
690 safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
691 safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
692 safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
693               unsigned long, pos_l, unsigned long, pos_h)
694 safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
695               unsigned long, pos_l, unsigned long, pos_h)
696 safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
697               socklen_t, addrlen)
698 safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
699               int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
700 safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
701               int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
702 safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
703 safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
704 safe_syscall2(int, flock, int, fd, int, operation)
705 #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
706 safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
707               const struct timespec *, uts, size_t, sigsetsize)
708 #endif
709 safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
710               int, flags)
711 #if defined(TARGET_NR_nanosleep)
712 safe_syscall2(int, nanosleep, const struct timespec *, req,
713               struct timespec *, rem)
714 #endif
715 #if defined(TARGET_NR_clock_nanosleep) || \
716     defined(TARGET_NR_clock_nanosleep_time64)
717 safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
718               const struct timespec *, req, struct timespec *, rem)
719 #endif
720 #ifdef __NR_ipc
721 #ifdef __s390x__
722 safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
723               void *, ptr)
724 #else
725 safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
726               void *, ptr, long, fifth)
727 #endif
728 #endif
729 #ifdef __NR_msgsnd
730 safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
731               int, flags)
732 #endif
733 #ifdef __NR_msgrcv
734 safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
735               long, msgtype, int, flags)
736 #endif
737 #ifdef __NR_semtimedop
738 safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
739               unsigned, nsops, const struct timespec *, timeout)
740 #endif
741 #if defined(TARGET_NR_mq_timedsend) || \
742     defined(TARGET_NR_mq_timedsend_time64)
743 safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
744               size_t, len, unsigned, prio, const struct timespec *, timeout)
745 #endif
746 #if defined(TARGET_NR_mq_timedreceive) || \
747     defined(TARGET_NR_mq_timedreceive_time64)
748 safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
749               size_t, len, unsigned *, prio, const struct timespec *, timeout)
750 #endif
751 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
752 safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
753               int, outfd, loff_t *, poutoff, size_t, length,
754               unsigned int, flags)
755 #endif
756 
757 /* We do ioctl like this rather than via safe_syscall3 to preserve the
758  * "third argument might be integer or pointer or not present" behaviour of
759  * the libc function.
760  */
761 #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
762 /* Similarly for fcntl. Note that callers must always:
763  *  pass the F_GETLK64 etc constants rather than the unsuffixed F_GETLK
764  *  use the flock64 struct rather than unsuffixed flock
765  * This will then work and use a 64-bit offset for both 32-bit and 64-bit hosts.
766  */
767 #ifdef __NR_fcntl64
768 #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
769 #else
770 #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
771 #endif
772 
773 static inline int host_to_target_sock_type(int host_type)
774 {
775     int target_type;
776 
777     switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
778     case SOCK_DGRAM:
779         target_type = TARGET_SOCK_DGRAM;
780         break;
781     case SOCK_STREAM:
782         target_type = TARGET_SOCK_STREAM;
783         break;
784     default:
785         target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
786         break;
787     }
788 
789 #if defined(SOCK_CLOEXEC)
790     if (host_type & SOCK_CLOEXEC) {
791         target_type |= TARGET_SOCK_CLOEXEC;
792     }
793 #endif
794 
795 #if defined(SOCK_NONBLOCK)
796     if (host_type & SOCK_NONBLOCK) {
797         target_type |= TARGET_SOCK_NONBLOCK;
798     }
799 #endif
800 
801     return target_type;
802 }
803 
804 static abi_ulong target_brk;
805 static abi_ulong brk_page;
806 
807 void target_set_brk(abi_ulong new_brk)
808 {
809     target_brk = new_brk;
810     brk_page = HOST_PAGE_ALIGN(target_brk);
811 }
812 
813 /* do_brk() must return target values and target errnos. */
814 abi_long do_brk(abi_ulong brk_val)
815 {
816     abi_long mapped_addr;
817     abi_ulong new_alloc_size;
818     abi_ulong new_brk, new_host_brk_page;
819 
820     /* brk pointers are always untagged */
821 
822     /* return old brk value if brk_val unchanged or zero */
823     if (!brk_val || brk_val == target_brk) {
824         return target_brk;
825     }
826 
827     new_brk = TARGET_PAGE_ALIGN(brk_val);
828     new_host_brk_page = HOST_PAGE_ALIGN(brk_val);
829 
830     /* brk_val and old target_brk might be on the same page */
831     if (new_brk == TARGET_PAGE_ALIGN(target_brk)) {
832         if (brk_val > target_brk) {
833             /* empty remaining bytes in (possibly larger) host page */
834             memset(g2h_untagged(target_brk), 0, new_host_brk_page - target_brk);
835         }
836         target_brk = brk_val;
837         return target_brk;
838     }
839 
840     /* Release heap if necesary */
841     if (new_brk < target_brk) {
842         /* empty remaining bytes in (possibly larger) host page */
843         memset(g2h_untagged(brk_val), 0, new_host_brk_page - brk_val);
844 
845         /* free unused host pages and set new brk_page */
846         target_munmap(new_host_brk_page, brk_page - new_host_brk_page);
847         brk_page = new_host_brk_page;
848 
849         target_brk = brk_val;
850         return target_brk;
851     }
852 
853     /* We need to allocate more memory after the brk... Note that
854      * we don't use MAP_FIXED because that will map over the top of
855      * any existing mapping (like the one with the host libc or qemu
856      * itself); instead we treat "mapped but at wrong address" as
857      * a failure and unmap again.
858      */
859     new_alloc_size = new_host_brk_page - brk_page;
860     if (new_alloc_size) {
861         mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
862                                         PROT_READ|PROT_WRITE,
863                                         MAP_ANON|MAP_PRIVATE, 0, 0));
864     } else {
865         mapped_addr = brk_page;
866     }
867 
868     if (mapped_addr == brk_page) {
869         /* Heap contents are initialized to zero, as for anonymous
870          * mapped pages.  Technically the new pages are already
871          * initialized to zero since they *are* anonymous mapped
872          * pages, however we have to take care with the contents that
873          * come from the remaining part of the previous page: it may
874          * contains garbage data due to a previous heap usage (grown
875          * then shrunken).  */
876         memset(g2h_untagged(target_brk), 0, brk_page - target_brk);
877 
878         target_brk = brk_val;
879         brk_page = new_host_brk_page;
880         return target_brk;
881     } else if (mapped_addr != -1) {
882         /* Mapped but at wrong address, meaning there wasn't actually
883          * enough space for this brk.
884          */
885         target_munmap(mapped_addr, new_alloc_size);
886         mapped_addr = -1;
887     }
888 
889 #if defined(TARGET_ALPHA)
890     /* We (partially) emulate OSF/1 on Alpha, which requires we
891        return a proper errno, not an unchanged brk value.  */
892     return -TARGET_ENOMEM;
893 #endif
894     /* For everything else, return the previous break. */
895     return target_brk;
896 }
897 
898 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
899     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
900 static inline abi_long copy_from_user_fdset(fd_set *fds,
901                                             abi_ulong target_fds_addr,
902                                             int n)
903 {
904     int i, nw, j, k;
905     abi_ulong b, *target_fds;
906 
907     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
908     if (!(target_fds = lock_user(VERIFY_READ,
909                                  target_fds_addr,
910                                  sizeof(abi_ulong) * nw,
911                                  1)))
912         return -TARGET_EFAULT;
913 
914     FD_ZERO(fds);
915     k = 0;
916     for (i = 0; i < nw; i++) {
917         /* grab the abi_ulong */
918         __get_user(b, &target_fds[i]);
919         for (j = 0; j < TARGET_ABI_BITS; j++) {
920             /* check the bit inside the abi_ulong */
921             if ((b >> j) & 1)
922                 FD_SET(k, fds);
923             k++;
924         }
925     }
926 
927     unlock_user(target_fds, target_fds_addr, 0);
928 
929     return 0;
930 }
931 
932 static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
933                                                  abi_ulong target_fds_addr,
934                                                  int n)
935 {
936     if (target_fds_addr) {
937         if (copy_from_user_fdset(fds, target_fds_addr, n))
938             return -TARGET_EFAULT;
939         *fds_ptr = fds;
940     } else {
941         *fds_ptr = NULL;
942     }
943     return 0;
944 }
945 
946 static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
947                                           const fd_set *fds,
948                                           int n)
949 {
950     int i, nw, j, k;
951     abi_long v;
952     abi_ulong *target_fds;
953 
954     nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
955     if (!(target_fds = lock_user(VERIFY_WRITE,
956                                  target_fds_addr,
957                                  sizeof(abi_ulong) * nw,
958                                  0)))
959         return -TARGET_EFAULT;
960 
961     k = 0;
962     for (i = 0; i < nw; i++) {
963         v = 0;
964         for (j = 0; j < TARGET_ABI_BITS; j++) {
965             v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
966             k++;
967         }
968         __put_user(v, &target_fds[i]);
969     }
970 
971     unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
972 
973     return 0;
974 }
975 #endif
976 
977 #if defined(__alpha__)
978 #define HOST_HZ 1024
979 #else
980 #define HOST_HZ 100
981 #endif
982 
983 static inline abi_long host_to_target_clock_t(long ticks)
984 {
985 #if HOST_HZ == TARGET_HZ
986     return ticks;
987 #else
988     return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
989 #endif
990 }
991 
992 static inline abi_long host_to_target_rusage(abi_ulong target_addr,
993                                              const struct rusage *rusage)
994 {
995     struct target_rusage *target_rusage;
996 
997     if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
998         return -TARGET_EFAULT;
999     target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
1000     target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
1001     target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
1002     target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
1003     target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
1004     target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
1005     target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
1006     target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
1007     target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
1008     target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
1009     target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
1010     target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
1011     target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
1012     target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
1013     target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
1014     target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
1015     target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
1016     target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
1017     unlock_user_struct(target_rusage, target_addr, 1);
1018 
1019     return 0;
1020 }
1021 
1022 #ifdef TARGET_NR_setrlimit
1023 static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
1024 {
1025     abi_ulong target_rlim_swap;
1026     rlim_t result;
1027 
1028     target_rlim_swap = tswapal(target_rlim);
1029     if (target_rlim_swap == TARGET_RLIM_INFINITY)
1030         return RLIM_INFINITY;
1031 
1032     result = target_rlim_swap;
1033     if (target_rlim_swap != (rlim_t)result)
1034         return RLIM_INFINITY;
1035 
1036     return result;
1037 }
1038 #endif
1039 
1040 #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
1041 static inline abi_ulong host_to_target_rlim(rlim_t rlim)
1042 {
1043     abi_ulong target_rlim_swap;
1044     abi_ulong result;
1045 
1046     if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
1047         target_rlim_swap = TARGET_RLIM_INFINITY;
1048     else
1049         target_rlim_swap = rlim;
1050     result = tswapal(target_rlim_swap);
1051 
1052     return result;
1053 }
1054 #endif
1055 
1056 static inline int target_to_host_resource(int code)
1057 {
1058     switch (code) {
1059     case TARGET_RLIMIT_AS:
1060         return RLIMIT_AS;
1061     case TARGET_RLIMIT_CORE:
1062         return RLIMIT_CORE;
1063     case TARGET_RLIMIT_CPU:
1064         return RLIMIT_CPU;
1065     case TARGET_RLIMIT_DATA:
1066         return RLIMIT_DATA;
1067     case TARGET_RLIMIT_FSIZE:
1068         return RLIMIT_FSIZE;
1069     case TARGET_RLIMIT_LOCKS:
1070         return RLIMIT_LOCKS;
1071     case TARGET_RLIMIT_MEMLOCK:
1072         return RLIMIT_MEMLOCK;
1073     case TARGET_RLIMIT_MSGQUEUE:
1074         return RLIMIT_MSGQUEUE;
1075     case TARGET_RLIMIT_NICE:
1076         return RLIMIT_NICE;
1077     case TARGET_RLIMIT_NOFILE:
1078         return RLIMIT_NOFILE;
1079     case TARGET_RLIMIT_NPROC:
1080         return RLIMIT_NPROC;
1081     case TARGET_RLIMIT_RSS:
1082         return RLIMIT_RSS;
1083     case TARGET_RLIMIT_RTPRIO:
1084         return RLIMIT_RTPRIO;
1085 #ifdef RLIMIT_RTTIME
1086     case TARGET_RLIMIT_RTTIME:
1087         return RLIMIT_RTTIME;
1088 #endif
1089     case TARGET_RLIMIT_SIGPENDING:
1090         return RLIMIT_SIGPENDING;
1091     case TARGET_RLIMIT_STACK:
1092         return RLIMIT_STACK;
1093     default:
1094         return code;
1095     }
1096 }
1097 
1098 static inline abi_long copy_from_user_timeval(struct timeval *tv,
1099                                               abi_ulong target_tv_addr)
1100 {
1101     struct target_timeval *target_tv;
1102 
1103     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1104         return -TARGET_EFAULT;
1105     }
1106 
1107     __get_user(tv->tv_sec, &target_tv->tv_sec);
1108     __get_user(tv->tv_usec, &target_tv->tv_usec);
1109 
1110     unlock_user_struct(target_tv, target_tv_addr, 0);
1111 
1112     return 0;
1113 }
1114 
1115 static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
1116                                             const struct timeval *tv)
1117 {
1118     struct target_timeval *target_tv;
1119 
1120     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1121         return -TARGET_EFAULT;
1122     }
1123 
1124     __put_user(tv->tv_sec, &target_tv->tv_sec);
1125     __put_user(tv->tv_usec, &target_tv->tv_usec);
1126 
1127     unlock_user_struct(target_tv, target_tv_addr, 1);
1128 
1129     return 0;
1130 }
1131 
1132 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
1133 static inline abi_long copy_from_user_timeval64(struct timeval *tv,
1134                                                 abi_ulong target_tv_addr)
1135 {
1136     struct target__kernel_sock_timeval *target_tv;
1137 
1138     if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
1139         return -TARGET_EFAULT;
1140     }
1141 
1142     __get_user(tv->tv_sec, &target_tv->tv_sec);
1143     __get_user(tv->tv_usec, &target_tv->tv_usec);
1144 
1145     unlock_user_struct(target_tv, target_tv_addr, 0);
1146 
1147     return 0;
1148 }
1149 #endif
1150 
1151 static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
1152                                               const struct timeval *tv)
1153 {
1154     struct target__kernel_sock_timeval *target_tv;
1155 
1156     if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
1157         return -TARGET_EFAULT;
1158     }
1159 
1160     __put_user(tv->tv_sec, &target_tv->tv_sec);
1161     __put_user(tv->tv_usec, &target_tv->tv_usec);
1162 
1163     unlock_user_struct(target_tv, target_tv_addr, 1);
1164 
1165     return 0;
1166 }
1167 
1168 #if defined(TARGET_NR_futex) || \
1169     defined(TARGET_NR_rt_sigtimedwait) || \
1170     defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
1171     defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
1172     defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
1173     defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
1174     defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
1175     defined(TARGET_NR_timer_settime) || \
1176     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
1177 static inline abi_long target_to_host_timespec(struct timespec *host_ts,
1178                                                abi_ulong target_addr)
1179 {
1180     struct target_timespec *target_ts;
1181 
1182     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1183         return -TARGET_EFAULT;
1184     }
1185     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1186     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1187     unlock_user_struct(target_ts, target_addr, 0);
1188     return 0;
1189 }
1190 #endif
1191 
1192 #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
1193     defined(TARGET_NR_timer_settime64) || \
1194     defined(TARGET_NR_mq_timedsend_time64) || \
1195     defined(TARGET_NR_mq_timedreceive_time64) || \
1196     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
1197     defined(TARGET_NR_clock_nanosleep_time64) || \
1198     defined(TARGET_NR_rt_sigtimedwait_time64) || \
1199     defined(TARGET_NR_utimensat) || \
1200     defined(TARGET_NR_utimensat_time64) || \
1201     defined(TARGET_NR_semtimedop_time64) || \
1202     defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
1203 static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
1204                                                  abi_ulong target_addr)
1205 {
1206     struct target__kernel_timespec *target_ts;
1207 
1208     if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
1209         return -TARGET_EFAULT;
1210     }
1211     __get_user(host_ts->tv_sec, &target_ts->tv_sec);
1212     __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1213     /* in 32bit mode, this drops the padding */
1214     host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
1215     unlock_user_struct(target_ts, target_addr, 0);
1216     return 0;
1217 }
1218 #endif
1219 
1220 static inline abi_long host_to_target_timespec(abi_ulong target_addr,
1221                                                struct timespec *host_ts)
1222 {
1223     struct target_timespec *target_ts;
1224 
1225     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1226         return -TARGET_EFAULT;
1227     }
1228     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1229     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1230     unlock_user_struct(target_ts, target_addr, 1);
1231     return 0;
1232 }
1233 
1234 static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
1235                                                  struct timespec *host_ts)
1236 {
1237     struct target__kernel_timespec *target_ts;
1238 
1239     if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
1240         return -TARGET_EFAULT;
1241     }
1242     __put_user(host_ts->tv_sec, &target_ts->tv_sec);
1243     __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
1244     unlock_user_struct(target_ts, target_addr, 1);
1245     return 0;
1246 }
1247 
1248 #if defined(TARGET_NR_gettimeofday)
1249 static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
1250                                              struct timezone *tz)
1251 {
1252     struct target_timezone *target_tz;
1253 
1254     if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
1255         return -TARGET_EFAULT;
1256     }
1257 
1258     __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1259     __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1260 
1261     unlock_user_struct(target_tz, target_tz_addr, 1);
1262 
1263     return 0;
1264 }
1265 #endif
1266 
1267 #if defined(TARGET_NR_settimeofday)
1268 static inline abi_long copy_from_user_timezone(struct timezone *tz,
1269                                                abi_ulong target_tz_addr)
1270 {
1271     struct target_timezone *target_tz;
1272 
1273     if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
1274         return -TARGET_EFAULT;
1275     }
1276 
1277     __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
1278     __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
1279 
1280     unlock_user_struct(target_tz, target_tz_addr, 0);
1281 
1282     return 0;
1283 }
1284 #endif
1285 
1286 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
1287 #include <mqueue.h>
1288 
1289 static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
1290                                               abi_ulong target_mq_attr_addr)
1291 {
1292     struct target_mq_attr *target_mq_attr;
1293 
1294     if (!lock_user_struct(VERIFY_READ, target_mq_attr,
1295                           target_mq_attr_addr, 1))
1296         return -TARGET_EFAULT;
1297 
1298     __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
1299     __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1300     __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1301     __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1302 
1303     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
1304 
1305     return 0;
1306 }
1307 
1308 static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
1309                                             const struct mq_attr *attr)
1310 {
1311     struct target_mq_attr *target_mq_attr;
1312 
1313     if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
1314                           target_mq_attr_addr, 0))
1315         return -TARGET_EFAULT;
1316 
1317     __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
1318     __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
1319     __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
1320     __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
1321 
1322     unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
1323 
1324     return 0;
1325 }
1326 #endif
1327 
1328 #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
1329 /* do_select() must return target values and target errnos. */
1330 static abi_long do_select(int n,
1331                           abi_ulong rfd_addr, abi_ulong wfd_addr,
1332                           abi_ulong efd_addr, abi_ulong target_tv_addr)
1333 {
1334     fd_set rfds, wfds, efds;
1335     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1336     struct timeval tv;
1337     struct timespec ts, *ts_ptr;
1338     abi_long ret;
1339 
1340     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1341     if (ret) {
1342         return ret;
1343     }
1344     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1345     if (ret) {
1346         return ret;
1347     }
1348     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1349     if (ret) {
1350         return ret;
1351     }
1352 
1353     if (target_tv_addr) {
1354         if (copy_from_user_timeval(&tv, target_tv_addr))
1355             return -TARGET_EFAULT;
1356         ts.tv_sec = tv.tv_sec;
1357         ts.tv_nsec = tv.tv_usec * 1000;
1358         ts_ptr = &ts;
1359     } else {
1360         ts_ptr = NULL;
1361     }
1362 
1363     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1364                                   ts_ptr, NULL));
1365 
1366     if (!is_error(ret)) {
1367         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
1368             return -TARGET_EFAULT;
1369         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
1370             return -TARGET_EFAULT;
1371         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
1372             return -TARGET_EFAULT;
1373 
1374         if (target_tv_addr) {
1375             tv.tv_sec = ts.tv_sec;
1376             tv.tv_usec = ts.tv_nsec / 1000;
1377             if (copy_to_user_timeval(target_tv_addr, &tv)) {
1378                 return -TARGET_EFAULT;
1379             }
1380         }
1381     }
1382 
1383     return ret;
1384 }
1385 
1386 #if defined(TARGET_WANT_OLD_SYS_SELECT)
1387 static abi_long do_old_select(abi_ulong arg1)
1388 {
1389     struct target_sel_arg_struct *sel;
1390     abi_ulong inp, outp, exp, tvp;
1391     long nsel;
1392 
1393     if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
1394         return -TARGET_EFAULT;
1395     }
1396 
1397     nsel = tswapal(sel->n);
1398     inp = tswapal(sel->inp);
1399     outp = tswapal(sel->outp);
1400     exp = tswapal(sel->exp);
1401     tvp = tswapal(sel->tvp);
1402 
1403     unlock_user_struct(sel, arg1, 0);
1404 
1405     return do_select(nsel, inp, outp, exp, tvp);
1406 }
1407 #endif
1408 #endif
1409 
1410 #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
1411 static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
1412                             abi_long arg4, abi_long arg5, abi_long arg6,
1413                             bool time64)
1414 {
1415     abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
1416     fd_set rfds, wfds, efds;
1417     fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
1418     struct timespec ts, *ts_ptr;
1419     abi_long ret;
1420 
1421     /*
1422      * The 6th arg is actually two args smashed together,
1423      * so we cannot use the C library.
1424      */
1425     struct {
1426         sigset_t *set;
1427         size_t size;
1428     } sig, *sig_ptr;
1429 
1430     abi_ulong arg_sigset, arg_sigsize, *arg7;
1431 
1432     n = arg1;
1433     rfd_addr = arg2;
1434     wfd_addr = arg3;
1435     efd_addr = arg4;
1436     ts_addr = arg5;
1437 
1438     ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
1439     if (ret) {
1440         return ret;
1441     }
1442     ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
1443     if (ret) {
1444         return ret;
1445     }
1446     ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
1447     if (ret) {
1448         return ret;
1449     }
1450 
1451     /*
1452      * This takes a timespec, and not a timeval, so we cannot
1453      * use the do_select() helper ...
1454      */
1455     if (ts_addr) {
1456         if (time64) {
1457             if (target_to_host_timespec64(&ts, ts_addr)) {
1458                 return -TARGET_EFAULT;
1459             }
1460         } else {
1461             if (target_to_host_timespec(&ts, ts_addr)) {
1462                 return -TARGET_EFAULT;
1463             }
1464         }
1465             ts_ptr = &ts;
1466     } else {
1467         ts_ptr = NULL;
1468     }
1469 
1470     /* Extract the two packed args for the sigset */
1471     sig_ptr = NULL;
1472     if (arg6) {
1473         arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
1474         if (!arg7) {
1475             return -TARGET_EFAULT;
1476         }
1477         arg_sigset = tswapal(arg7[0]);
1478         arg_sigsize = tswapal(arg7[1]);
1479         unlock_user(arg7, arg6, 0);
1480 
1481         if (arg_sigset) {
1482             ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
1483             if (ret != 0) {
1484                 return ret;
1485             }
1486             sig_ptr = &sig;
1487             sig.size = SIGSET_T_SIZE;
1488         }
1489     }
1490 
1491     ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
1492                                   ts_ptr, sig_ptr));
1493 
1494     if (sig_ptr) {
1495         finish_sigsuspend_mask(ret);
1496     }
1497 
1498     if (!is_error(ret)) {
1499         if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
1500             return -TARGET_EFAULT;
1501         }
1502         if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
1503             return -TARGET_EFAULT;
1504         }
1505         if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
1506             return -TARGET_EFAULT;
1507         }
1508         if (time64) {
1509             if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
1510                 return -TARGET_EFAULT;
1511             }
1512         } else {
1513             if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
1514                 return -TARGET_EFAULT;
1515             }
1516         }
1517     }
1518     return ret;
1519 }
1520 #endif
1521 
1522 #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
1523     defined(TARGET_NR_ppoll_time64)
1524 static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
1525                          abi_long arg4, abi_long arg5, bool ppoll, bool time64)
1526 {
1527     struct target_pollfd *target_pfd;
1528     unsigned int nfds = arg2;
1529     struct pollfd *pfd;
1530     unsigned int i;
1531     abi_long ret;
1532 
1533     pfd = NULL;
1534     target_pfd = NULL;
1535     if (nfds) {
1536         if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
1537             return -TARGET_EINVAL;
1538         }
1539         target_pfd = lock_user(VERIFY_WRITE, arg1,
1540                                sizeof(struct target_pollfd) * nfds, 1);
1541         if (!target_pfd) {
1542             return -TARGET_EFAULT;
1543         }
1544 
1545         pfd = alloca(sizeof(struct pollfd) * nfds);
1546         for (i = 0; i < nfds; i++) {
1547             pfd[i].fd = tswap32(target_pfd[i].fd);
1548             pfd[i].events = tswap16(target_pfd[i].events);
1549         }
1550     }
1551     if (ppoll) {
1552         struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
1553         sigset_t *set = NULL;
1554 
1555         if (arg3) {
1556             if (time64) {
1557                 if (target_to_host_timespec64(timeout_ts, arg3)) {
1558                     unlock_user(target_pfd, arg1, 0);
1559                     return -TARGET_EFAULT;
1560                 }
1561             } else {
1562                 if (target_to_host_timespec(timeout_ts, arg3)) {
1563                     unlock_user(target_pfd, arg1, 0);
1564                     return -TARGET_EFAULT;
1565                 }
1566             }
1567         } else {
1568             timeout_ts = NULL;
1569         }
1570 
1571         if (arg4) {
1572             ret = process_sigsuspend_mask(&set, arg4, arg5);
1573             if (ret != 0) {
1574                 unlock_user(target_pfd, arg1, 0);
1575                 return ret;
1576             }
1577         }
1578 
1579         ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
1580                                    set, SIGSET_T_SIZE));
1581 
1582         if (set) {
1583             finish_sigsuspend_mask(ret);
1584         }
1585         if (!is_error(ret) && arg3) {
1586             if (time64) {
1587                 if (host_to_target_timespec64(arg3, timeout_ts)) {
1588                     return -TARGET_EFAULT;
1589                 }
1590             } else {
1591                 if (host_to_target_timespec(arg3, timeout_ts)) {
1592                     return -TARGET_EFAULT;
1593                 }
1594             }
1595         }
1596     } else {
1597           struct timespec ts, *pts;
1598 
1599           if (arg3 >= 0) {
1600               /* Convert ms to secs, ns */
1601               ts.tv_sec = arg3 / 1000;
1602               ts.tv_nsec = (arg3 % 1000) * 1000000LL;
1603               pts = &ts;
1604           } else {
1605               /* -ve poll() timeout means "infinite" */
1606               pts = NULL;
1607           }
1608           ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
1609     }
1610 
1611     if (!is_error(ret)) {
1612         for (i = 0; i < nfds; i++) {
1613             target_pfd[i].revents = tswap16(pfd[i].revents);
1614         }
1615     }
1616     unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
1617     return ret;
1618 }
1619 #endif
1620 
1621 static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
1622                         int flags, int is_pipe2)
1623 {
1624     int host_pipe[2];
1625     abi_long ret;
1626     ret = pipe2(host_pipe, flags);
1627 
1628     if (is_error(ret))
1629         return get_errno(ret);
1630 
1631     /* Several targets have special calling conventions for the original
1632        pipe syscall, but didn't replicate this into the pipe2 syscall.  */
1633     if (!is_pipe2) {
1634 #if defined(TARGET_ALPHA)
1635         cpu_env->ir[IR_A4] = host_pipe[1];
1636         return host_pipe[0];
1637 #elif defined(TARGET_MIPS)
1638         cpu_env->active_tc.gpr[3] = host_pipe[1];
1639         return host_pipe[0];
1640 #elif defined(TARGET_SH4)
1641         cpu_env->gregs[1] = host_pipe[1];
1642         return host_pipe[0];
1643 #elif defined(TARGET_SPARC)
1644         cpu_env->regwptr[1] = host_pipe[1];
1645         return host_pipe[0];
1646 #endif
1647     }
1648 
1649     if (put_user_s32(host_pipe[0], pipedes)
1650         || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
1651         return -TARGET_EFAULT;
1652     return get_errno(ret);
1653 }
1654 
1655 static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
1656                                               abi_ulong target_addr,
1657                                               socklen_t len)
1658 {
1659     struct target_ip_mreqn *target_smreqn;
1660 
1661     target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
1662     if (!target_smreqn)
1663         return -TARGET_EFAULT;
1664     mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
1665     mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
1666     if (len == sizeof(struct target_ip_mreqn))
1667         mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
1668     unlock_user(target_smreqn, target_addr, 0);
1669 
1670     return 0;
1671 }
1672 
1673 static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
1674                                                abi_ulong target_addr,
1675                                                socklen_t len)
1676 {
1677     const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
1678     sa_family_t sa_family;
1679     struct target_sockaddr *target_saddr;
1680 
1681     if (fd_trans_target_to_host_addr(fd)) {
1682         return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
1683     }
1684 
1685     target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
1686     if (!target_saddr)
1687         return -TARGET_EFAULT;
1688 
1689     sa_family = tswap16(target_saddr->sa_family);
1690 
1691     /* Oops. The caller might send a incomplete sun_path; sun_path
1692      * must be terminated by \0 (see the manual page), but
1693      * unfortunately it is quite common to specify sockaddr_un
1694      * length as "strlen(x->sun_path)" while it should be
1695      * "strlen(...) + 1". We'll fix that here if needed.
1696      * Linux kernel has a similar feature.
1697      */
1698 
1699     if (sa_family == AF_UNIX) {
1700         if (len < unix_maxlen && len > 0) {
1701             char *cp = (char*)target_saddr;
1702 
1703             if ( cp[len-1] && !cp[len] )
1704                 len++;
1705         }
1706         if (len > unix_maxlen)
1707             len = unix_maxlen;
1708     }
1709 
1710     memcpy(addr, target_saddr, len);
1711     addr->sa_family = sa_family;
1712     if (sa_family == AF_NETLINK) {
1713         struct sockaddr_nl *nladdr;
1714 
1715         nladdr = (struct sockaddr_nl *)addr;
1716         nladdr->nl_pid = tswap32(nladdr->nl_pid);
1717         nladdr->nl_groups = tswap32(nladdr->nl_groups);
1718     } else if (sa_family == AF_PACKET) {
1719 	struct target_sockaddr_ll *lladdr;
1720 
1721 	lladdr = (struct target_sockaddr_ll *)addr;
1722 	lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
1723 	lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
1724     } else if (sa_family == AF_INET6) {
1725         struct sockaddr_in6 *in6addr;
1726 
1727         in6addr = (struct sockaddr_in6 *)addr;
1728         in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
1729     }
1730     unlock_user(target_saddr, target_addr, 0);
1731 
1732     return 0;
1733 }
1734 
1735 static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
1736                                                struct sockaddr *addr,
1737                                                socklen_t len)
1738 {
1739     struct target_sockaddr *target_saddr;
1740 
1741     if (len == 0) {
1742         return 0;
1743     }
1744     assert(addr);
1745 
1746     target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
1747     if (!target_saddr)
1748         return -TARGET_EFAULT;
1749     memcpy(target_saddr, addr, len);
1750     if (len >= offsetof(struct target_sockaddr, sa_family) +
1751         sizeof(target_saddr->sa_family)) {
1752         target_saddr->sa_family = tswap16(addr->sa_family);
1753     }
1754     if (addr->sa_family == AF_NETLINK &&
1755         len >= sizeof(struct target_sockaddr_nl)) {
1756         struct target_sockaddr_nl *target_nl =
1757                (struct target_sockaddr_nl *)target_saddr;
1758         target_nl->nl_pid = tswap32(target_nl->nl_pid);
1759         target_nl->nl_groups = tswap32(target_nl->nl_groups);
1760     } else if (addr->sa_family == AF_PACKET) {
1761         struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
1762         target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
1763         target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
1764     } else if (addr->sa_family == AF_INET6 &&
1765                len >= sizeof(struct target_sockaddr_in6)) {
1766         struct target_sockaddr_in6 *target_in6 =
1767                (struct target_sockaddr_in6 *)target_saddr;
1768         target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
1769     }
1770     unlock_user(target_saddr, target_addr, len);
1771 
1772     return 0;
1773 }
1774 
1775 static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
1776                                            struct target_msghdr *target_msgh)
1777 {
1778     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1779     abi_long msg_controllen;
1780     abi_ulong target_cmsg_addr;
1781     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1782     socklen_t space = 0;
1783 
1784     msg_controllen = tswapal(target_msgh->msg_controllen);
1785     if (msg_controllen < sizeof (struct target_cmsghdr))
1786         goto the_end;
1787     target_cmsg_addr = tswapal(target_msgh->msg_control);
1788     target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
1789     target_cmsg_start = target_cmsg;
1790     if (!target_cmsg)
1791         return -TARGET_EFAULT;
1792 
1793     while (cmsg && target_cmsg) {
1794         void *data = CMSG_DATA(cmsg);
1795         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1796 
1797         int len = tswapal(target_cmsg->cmsg_len)
1798             - sizeof(struct target_cmsghdr);
1799 
1800         space += CMSG_SPACE(len);
1801         if (space > msgh->msg_controllen) {
1802             space -= CMSG_SPACE(len);
1803             /* This is a QEMU bug, since we allocated the payload
1804              * area ourselves (unlike overflow in host-to-target
1805              * conversion, which is just the guest giving us a buffer
1806              * that's too small). It can't happen for the payload types
1807              * we currently support; if it becomes an issue in future
1808              * we would need to improve our allocation strategy to
1809              * something more intelligent than "twice the size of the
1810              * target buffer we're reading from".
1811              */
1812             qemu_log_mask(LOG_UNIMP,
1813                           ("Unsupported ancillary data %d/%d: "
1814                            "unhandled msg size\n"),
1815                           tswap32(target_cmsg->cmsg_level),
1816                           tswap32(target_cmsg->cmsg_type));
1817             break;
1818         }
1819 
1820         if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
1821             cmsg->cmsg_level = SOL_SOCKET;
1822         } else {
1823             cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
1824         }
1825         cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
1826         cmsg->cmsg_len = CMSG_LEN(len);
1827 
1828         if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
1829             int *fd = (int *)data;
1830             int *target_fd = (int *)target_data;
1831             int i, numfds = len / sizeof(int);
1832 
1833             for (i = 0; i < numfds; i++) {
1834                 __get_user(fd[i], target_fd + i);
1835             }
1836         } else if (cmsg->cmsg_level == SOL_SOCKET
1837                &&  cmsg->cmsg_type == SCM_CREDENTIALS) {
1838             struct ucred *cred = (struct ucred *)data;
1839             struct target_ucred *target_cred =
1840                 (struct target_ucred *)target_data;
1841 
1842             __get_user(cred->pid, &target_cred->pid);
1843             __get_user(cred->uid, &target_cred->uid);
1844             __get_user(cred->gid, &target_cred->gid);
1845         } else if (cmsg->cmsg_level == SOL_ALG) {
1846             uint32_t *dst = (uint32_t *)data;
1847 
1848             memcpy(dst, target_data, len);
1849             /* fix endianess of first 32-bit word */
1850             if (len >= sizeof(uint32_t)) {
1851                 *dst = tswap32(*dst);
1852             }
1853         } else {
1854             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
1855                           cmsg->cmsg_level, cmsg->cmsg_type);
1856             memcpy(data, target_data, len);
1857         }
1858 
1859         cmsg = CMSG_NXTHDR(msgh, cmsg);
1860         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
1861                                          target_cmsg_start);
1862     }
1863     unlock_user(target_cmsg, target_cmsg_addr, 0);
1864  the_end:
1865     msgh->msg_controllen = space;
1866     return 0;
1867 }
1868 
1869 static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
1870                                            struct msghdr *msgh)
1871 {
1872     struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
1873     abi_long msg_controllen;
1874     abi_ulong target_cmsg_addr;
1875     struct target_cmsghdr *target_cmsg, *target_cmsg_start;
1876     socklen_t space = 0;
1877 
1878     msg_controllen = tswapal(target_msgh->msg_controllen);
1879     if (msg_controllen < sizeof (struct target_cmsghdr))
1880         goto the_end;
1881     target_cmsg_addr = tswapal(target_msgh->msg_control);
1882     target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
1883     target_cmsg_start = target_cmsg;
1884     if (!target_cmsg)
1885         return -TARGET_EFAULT;
1886 
1887     while (cmsg && target_cmsg) {
1888         void *data = CMSG_DATA(cmsg);
1889         void *target_data = TARGET_CMSG_DATA(target_cmsg);
1890 
1891         int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
1892         int tgt_len, tgt_space;
1893 
1894         /* We never copy a half-header but may copy half-data;
1895          * this is Linux's behaviour in put_cmsg(). Note that
1896          * truncation here is a guest problem (which we report
1897          * to the guest via the CTRUNC bit), unlike truncation
1898          * in target_to_host_cmsg, which is a QEMU bug.
1899          */
1900         if (msg_controllen < sizeof(struct target_cmsghdr)) {
1901             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1902             break;
1903         }
1904 
1905         if (cmsg->cmsg_level == SOL_SOCKET) {
1906             target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
1907         } else {
1908             target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
1909         }
1910         target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
1911 
1912         /* Payload types which need a different size of payload on
1913          * the target must adjust tgt_len here.
1914          */
1915         tgt_len = len;
1916         switch (cmsg->cmsg_level) {
1917         case SOL_SOCKET:
1918             switch (cmsg->cmsg_type) {
1919             case SO_TIMESTAMP:
1920                 tgt_len = sizeof(struct target_timeval);
1921                 break;
1922             default:
1923                 break;
1924             }
1925             break;
1926         default:
1927             break;
1928         }
1929 
1930         if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
1931             target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
1932             tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
1933         }
1934 
1935         /* We must now copy-and-convert len bytes of payload
1936          * into tgt_len bytes of destination space. Bear in mind
1937          * that in both source and destination we may be dealing
1938          * with a truncated value!
1939          */
1940         switch (cmsg->cmsg_level) {
1941         case SOL_SOCKET:
1942             switch (cmsg->cmsg_type) {
1943             case SCM_RIGHTS:
1944             {
1945                 int *fd = (int *)data;
1946                 int *target_fd = (int *)target_data;
1947                 int i, numfds = tgt_len / sizeof(int);
1948 
1949                 for (i = 0; i < numfds; i++) {
1950                     __put_user(fd[i], target_fd + i);
1951                 }
1952                 break;
1953             }
1954             case SO_TIMESTAMP:
1955             {
1956                 struct timeval *tv = (struct timeval *)data;
1957                 struct target_timeval *target_tv =
1958                     (struct target_timeval *)target_data;
1959 
1960                 if (len != sizeof(struct timeval) ||
1961                     tgt_len != sizeof(struct target_timeval)) {
1962                     goto unimplemented;
1963                 }
1964 
1965                 /* copy struct timeval to target */
1966                 __put_user(tv->tv_sec, &target_tv->tv_sec);
1967                 __put_user(tv->tv_usec, &target_tv->tv_usec);
1968                 break;
1969             }
1970             case SCM_CREDENTIALS:
1971             {
1972                 struct ucred *cred = (struct ucred *)data;
1973                 struct target_ucred *target_cred =
1974                     (struct target_ucred *)target_data;
1975 
1976                 __put_user(cred->pid, &target_cred->pid);
1977                 __put_user(cred->uid, &target_cred->uid);
1978                 __put_user(cred->gid, &target_cred->gid);
1979                 break;
1980             }
1981             default:
1982                 goto unimplemented;
1983             }
1984             break;
1985 
1986         case SOL_IP:
1987             switch (cmsg->cmsg_type) {
1988             case IP_TTL:
1989             {
1990                 uint32_t *v = (uint32_t *)data;
1991                 uint32_t *t_int = (uint32_t *)target_data;
1992 
1993                 if (len != sizeof(uint32_t) ||
1994                     tgt_len != sizeof(uint32_t)) {
1995                     goto unimplemented;
1996                 }
1997                 __put_user(*v, t_int);
1998                 break;
1999             }
2000             case IP_RECVERR:
2001             {
2002                 struct errhdr_t {
2003                    struct sock_extended_err ee;
2004                    struct sockaddr_in offender;
2005                 };
2006                 struct errhdr_t *errh = (struct errhdr_t *)data;
2007                 struct errhdr_t *target_errh =
2008                     (struct errhdr_t *)target_data;
2009 
2010                 if (len != sizeof(struct errhdr_t) ||
2011                     tgt_len != sizeof(struct errhdr_t)) {
2012                     goto unimplemented;
2013                 }
2014                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2015                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2016                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2017                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2018                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2019                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2020                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2021                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2022                     (void *) &errh->offender, sizeof(errh->offender));
2023                 break;
2024             }
2025             default:
2026                 goto unimplemented;
2027             }
2028             break;
2029 
2030         case SOL_IPV6:
2031             switch (cmsg->cmsg_type) {
2032             case IPV6_HOPLIMIT:
2033             {
2034                 uint32_t *v = (uint32_t *)data;
2035                 uint32_t *t_int = (uint32_t *)target_data;
2036 
2037                 if (len != sizeof(uint32_t) ||
2038                     tgt_len != sizeof(uint32_t)) {
2039                     goto unimplemented;
2040                 }
2041                 __put_user(*v, t_int);
2042                 break;
2043             }
2044             case IPV6_RECVERR:
2045             {
2046                 struct errhdr6_t {
2047                    struct sock_extended_err ee;
2048                    struct sockaddr_in6 offender;
2049                 };
2050                 struct errhdr6_t *errh = (struct errhdr6_t *)data;
2051                 struct errhdr6_t *target_errh =
2052                     (struct errhdr6_t *)target_data;
2053 
2054                 if (len != sizeof(struct errhdr6_t) ||
2055                     tgt_len != sizeof(struct errhdr6_t)) {
2056                     goto unimplemented;
2057                 }
2058                 __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
2059                 __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
2060                 __put_user(errh->ee.ee_type,  &target_errh->ee.ee_type);
2061                 __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
2062                 __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
2063                 __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
2064                 __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
2065                 host_to_target_sockaddr((unsigned long) &target_errh->offender,
2066                     (void *) &errh->offender, sizeof(errh->offender));
2067                 break;
2068             }
2069             default:
2070                 goto unimplemented;
2071             }
2072             break;
2073 
2074         default:
2075         unimplemented:
2076             qemu_log_mask(LOG_UNIMP, "Unsupported ancillary data: %d/%d\n",
2077                           cmsg->cmsg_level, cmsg->cmsg_type);
2078             memcpy(target_data, data, MIN(len, tgt_len));
2079             if (tgt_len > len) {
2080                 memset(target_data + len, 0, tgt_len - len);
2081             }
2082         }
2083 
2084         target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
2085         tgt_space = TARGET_CMSG_SPACE(tgt_len);
2086         if (msg_controllen < tgt_space) {
2087             tgt_space = msg_controllen;
2088         }
2089         msg_controllen -= tgt_space;
2090         space += tgt_space;
2091         cmsg = CMSG_NXTHDR(msgh, cmsg);
2092         target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
2093                                          target_cmsg_start);
2094     }
2095     unlock_user(target_cmsg, target_cmsg_addr, space);
2096  the_end:
2097     target_msgh->msg_controllen = tswapal(space);
2098     return 0;
2099 }
2100 
2101 /* do_setsockopt() Must return target values and target errnos. */
2102 static abi_long do_setsockopt(int sockfd, int level, int optname,
2103                               abi_ulong optval_addr, socklen_t optlen)
2104 {
2105     abi_long ret;
2106     int val;
2107     struct ip_mreqn *ip_mreq;
2108     struct ip_mreq_source *ip_mreq_source;
2109 
2110     switch(level) {
2111     case SOL_TCP:
2112     case SOL_UDP:
2113         /* TCP and UDP options all take an 'int' value.  */
2114         if (optlen < sizeof(uint32_t))
2115             return -TARGET_EINVAL;
2116 
2117         if (get_user_u32(val, optval_addr))
2118             return -TARGET_EFAULT;
2119         ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2120         break;
2121     case SOL_IP:
2122         switch(optname) {
2123         case IP_TOS:
2124         case IP_TTL:
2125         case IP_HDRINCL:
2126         case IP_ROUTER_ALERT:
2127         case IP_RECVOPTS:
2128         case IP_RETOPTS:
2129         case IP_PKTINFO:
2130         case IP_MTU_DISCOVER:
2131         case IP_RECVERR:
2132         case IP_RECVTTL:
2133         case IP_RECVTOS:
2134 #ifdef IP_FREEBIND
2135         case IP_FREEBIND:
2136 #endif
2137         case IP_MULTICAST_TTL:
2138         case IP_MULTICAST_LOOP:
2139             val = 0;
2140             if (optlen >= sizeof(uint32_t)) {
2141                 if (get_user_u32(val, optval_addr))
2142                     return -TARGET_EFAULT;
2143             } else if (optlen >= 1) {
2144                 if (get_user_u8(val, optval_addr))
2145                     return -TARGET_EFAULT;
2146             }
2147             ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
2148             break;
2149         case IP_ADD_MEMBERSHIP:
2150         case IP_DROP_MEMBERSHIP:
2151             if (optlen < sizeof (struct target_ip_mreq) ||
2152                 optlen > sizeof (struct target_ip_mreqn))
2153                 return -TARGET_EINVAL;
2154 
2155             ip_mreq = (struct ip_mreqn *) alloca(optlen);
2156             target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
2157             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
2158             break;
2159 
2160         case IP_BLOCK_SOURCE:
2161         case IP_UNBLOCK_SOURCE:
2162         case IP_ADD_SOURCE_MEMBERSHIP:
2163         case IP_DROP_SOURCE_MEMBERSHIP:
2164             if (optlen != sizeof (struct target_ip_mreq_source))
2165                 return -TARGET_EINVAL;
2166 
2167             ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2168             if (!ip_mreq_source) {
2169                 return -TARGET_EFAULT;
2170             }
2171             ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
2172             unlock_user (ip_mreq_source, optval_addr, 0);
2173             break;
2174 
2175         default:
2176             goto unimplemented;
2177         }
2178         break;
2179     case SOL_IPV6:
2180         switch (optname) {
2181         case IPV6_MTU_DISCOVER:
2182         case IPV6_MTU:
2183         case IPV6_V6ONLY:
2184         case IPV6_RECVPKTINFO:
2185         case IPV6_UNICAST_HOPS:
2186         case IPV6_MULTICAST_HOPS:
2187         case IPV6_MULTICAST_LOOP:
2188         case IPV6_RECVERR:
2189         case IPV6_RECVHOPLIMIT:
2190         case IPV6_2292HOPLIMIT:
2191         case IPV6_CHECKSUM:
2192         case IPV6_ADDRFORM:
2193         case IPV6_2292PKTINFO:
2194         case IPV6_RECVTCLASS:
2195         case IPV6_RECVRTHDR:
2196         case IPV6_2292RTHDR:
2197         case IPV6_RECVHOPOPTS:
2198         case IPV6_2292HOPOPTS:
2199         case IPV6_RECVDSTOPTS:
2200         case IPV6_2292DSTOPTS:
2201         case IPV6_TCLASS:
2202         case IPV6_ADDR_PREFERENCES:
2203 #ifdef IPV6_RECVPATHMTU
2204         case IPV6_RECVPATHMTU:
2205 #endif
2206 #ifdef IPV6_TRANSPARENT
2207         case IPV6_TRANSPARENT:
2208 #endif
2209 #ifdef IPV6_FREEBIND
2210         case IPV6_FREEBIND:
2211 #endif
2212 #ifdef IPV6_RECVORIGDSTADDR
2213         case IPV6_RECVORIGDSTADDR:
2214 #endif
2215             val = 0;
2216             if (optlen < sizeof(uint32_t)) {
2217                 return -TARGET_EINVAL;
2218             }
2219             if (get_user_u32(val, optval_addr)) {
2220                 return -TARGET_EFAULT;
2221             }
2222             ret = get_errno(setsockopt(sockfd, level, optname,
2223                                        &val, sizeof(val)));
2224             break;
2225         case IPV6_PKTINFO:
2226         {
2227             struct in6_pktinfo pki;
2228 
2229             if (optlen < sizeof(pki)) {
2230                 return -TARGET_EINVAL;
2231             }
2232 
2233             if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
2234                 return -TARGET_EFAULT;
2235             }
2236 
2237             pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
2238 
2239             ret = get_errno(setsockopt(sockfd, level, optname,
2240                                        &pki, sizeof(pki)));
2241             break;
2242         }
2243         case IPV6_ADD_MEMBERSHIP:
2244         case IPV6_DROP_MEMBERSHIP:
2245         {
2246             struct ipv6_mreq ipv6mreq;
2247 
2248             if (optlen < sizeof(ipv6mreq)) {
2249                 return -TARGET_EINVAL;
2250             }
2251 
2252             if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
2253                 return -TARGET_EFAULT;
2254             }
2255 
2256             ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
2257 
2258             ret = get_errno(setsockopt(sockfd, level, optname,
2259                                        &ipv6mreq, sizeof(ipv6mreq)));
2260             break;
2261         }
2262         default:
2263             goto unimplemented;
2264         }
2265         break;
2266     case SOL_ICMPV6:
2267         switch (optname) {
2268         case ICMPV6_FILTER:
2269         {
2270             struct icmp6_filter icmp6f;
2271 
2272             if (optlen > sizeof(icmp6f)) {
2273                 optlen = sizeof(icmp6f);
2274             }
2275 
2276             if (copy_from_user(&icmp6f, optval_addr, optlen)) {
2277                 return -TARGET_EFAULT;
2278             }
2279 
2280             for (val = 0; val < 8; val++) {
2281                 icmp6f.data[val] = tswap32(icmp6f.data[val]);
2282             }
2283 
2284             ret = get_errno(setsockopt(sockfd, level, optname,
2285                                        &icmp6f, optlen));
2286             break;
2287         }
2288         default:
2289             goto unimplemented;
2290         }
2291         break;
2292     case SOL_RAW:
2293         switch (optname) {
2294         case ICMP_FILTER:
2295         case IPV6_CHECKSUM:
2296             /* those take an u32 value */
2297             if (optlen < sizeof(uint32_t)) {
2298                 return -TARGET_EINVAL;
2299             }
2300 
2301             if (get_user_u32(val, optval_addr)) {
2302                 return -TARGET_EFAULT;
2303             }
2304             ret = get_errno(setsockopt(sockfd, level, optname,
2305                                        &val, sizeof(val)));
2306             break;
2307 
2308         default:
2309             goto unimplemented;
2310         }
2311         break;
2312 #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
2313     case SOL_ALG:
2314         switch (optname) {
2315         case ALG_SET_KEY:
2316         {
2317             char *alg_key = g_malloc(optlen);
2318 
2319             if (!alg_key) {
2320                 return -TARGET_ENOMEM;
2321             }
2322             if (copy_from_user(alg_key, optval_addr, optlen)) {
2323                 g_free(alg_key);
2324                 return -TARGET_EFAULT;
2325             }
2326             ret = get_errno(setsockopt(sockfd, level, optname,
2327                                        alg_key, optlen));
2328             g_free(alg_key);
2329             break;
2330         }
2331         case ALG_SET_AEAD_AUTHSIZE:
2332         {
2333             ret = get_errno(setsockopt(sockfd, level, optname,
2334                                        NULL, optlen));
2335             break;
2336         }
2337         default:
2338             goto unimplemented;
2339         }
2340         break;
2341 #endif
2342     case TARGET_SOL_SOCKET:
2343         switch (optname) {
2344         case TARGET_SO_RCVTIMEO:
2345         {
2346                 struct timeval tv;
2347 
2348                 optname = SO_RCVTIMEO;
2349 
2350 set_timeout:
2351                 if (optlen != sizeof(struct target_timeval)) {
2352                     return -TARGET_EINVAL;
2353                 }
2354 
2355                 if (copy_from_user_timeval(&tv, optval_addr)) {
2356                     return -TARGET_EFAULT;
2357                 }
2358 
2359                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2360                                 &tv, sizeof(tv)));
2361                 return ret;
2362         }
2363         case TARGET_SO_SNDTIMEO:
2364                 optname = SO_SNDTIMEO;
2365                 goto set_timeout;
2366         case TARGET_SO_ATTACH_FILTER:
2367         {
2368                 struct target_sock_fprog *tfprog;
2369                 struct target_sock_filter *tfilter;
2370                 struct sock_fprog fprog;
2371                 struct sock_filter *filter;
2372                 int i;
2373 
2374                 if (optlen != sizeof(*tfprog)) {
2375                     return -TARGET_EINVAL;
2376                 }
2377                 if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
2378                     return -TARGET_EFAULT;
2379                 }
2380                 if (!lock_user_struct(VERIFY_READ, tfilter,
2381                                       tswapal(tfprog->filter), 0)) {
2382                     unlock_user_struct(tfprog, optval_addr, 1);
2383                     return -TARGET_EFAULT;
2384                 }
2385 
2386                 fprog.len = tswap16(tfprog->len);
2387                 filter = g_try_new(struct sock_filter, fprog.len);
2388                 if (filter == NULL) {
2389                     unlock_user_struct(tfilter, tfprog->filter, 1);
2390                     unlock_user_struct(tfprog, optval_addr, 1);
2391                     return -TARGET_ENOMEM;
2392                 }
2393                 for (i = 0; i < fprog.len; i++) {
2394                     filter[i].code = tswap16(tfilter[i].code);
2395                     filter[i].jt = tfilter[i].jt;
2396                     filter[i].jf = tfilter[i].jf;
2397                     filter[i].k = tswap32(tfilter[i].k);
2398                 }
2399                 fprog.filter = filter;
2400 
2401                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
2402                                 SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
2403                 g_free(filter);
2404 
2405                 unlock_user_struct(tfilter, tfprog->filter, 1);
2406                 unlock_user_struct(tfprog, optval_addr, 1);
2407                 return ret;
2408         }
2409 	case TARGET_SO_BINDTODEVICE:
2410 	{
2411 		char *dev_ifname, *addr_ifname;
2412 
2413 		if (optlen > IFNAMSIZ - 1) {
2414 		    optlen = IFNAMSIZ - 1;
2415 		}
2416 		dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
2417 		if (!dev_ifname) {
2418 		    return -TARGET_EFAULT;
2419 		}
2420 		optname = SO_BINDTODEVICE;
2421 		addr_ifname = alloca(IFNAMSIZ);
2422 		memcpy(addr_ifname, dev_ifname, optlen);
2423 		addr_ifname[optlen] = 0;
2424 		ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
2425                                            addr_ifname, optlen));
2426 		unlock_user (dev_ifname, optval_addr, 0);
2427 		return ret;
2428 	}
2429         case TARGET_SO_LINGER:
2430         {
2431                 struct linger lg;
2432                 struct target_linger *tlg;
2433 
2434                 if (optlen != sizeof(struct target_linger)) {
2435                     return -TARGET_EINVAL;
2436                 }
2437                 if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
2438                     return -TARGET_EFAULT;
2439                 }
2440                 __get_user(lg.l_onoff, &tlg->l_onoff);
2441                 __get_user(lg.l_linger, &tlg->l_linger);
2442                 ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
2443                                 &lg, sizeof(lg)));
2444                 unlock_user_struct(tlg, optval_addr, 0);
2445                 return ret;
2446         }
2447             /* Options with 'int' argument.  */
2448         case TARGET_SO_DEBUG:
2449 		optname = SO_DEBUG;
2450 		break;
2451         case TARGET_SO_REUSEADDR:
2452 		optname = SO_REUSEADDR;
2453 		break;
2454 #ifdef SO_REUSEPORT
2455         case TARGET_SO_REUSEPORT:
2456                 optname = SO_REUSEPORT;
2457                 break;
2458 #endif
2459         case TARGET_SO_TYPE:
2460 		optname = SO_TYPE;
2461 		break;
2462         case TARGET_SO_ERROR:
2463 		optname = SO_ERROR;
2464 		break;
2465         case TARGET_SO_DONTROUTE:
2466 		optname = SO_DONTROUTE;
2467 		break;
2468         case TARGET_SO_BROADCAST:
2469 		optname = SO_BROADCAST;
2470 		break;
2471         case TARGET_SO_SNDBUF:
2472 		optname = SO_SNDBUF;
2473 		break;
2474         case TARGET_SO_SNDBUFFORCE:
2475                 optname = SO_SNDBUFFORCE;
2476                 break;
2477         case TARGET_SO_RCVBUF:
2478 		optname = SO_RCVBUF;
2479 		break;
2480         case TARGET_SO_RCVBUFFORCE:
2481                 optname = SO_RCVBUFFORCE;
2482                 break;
2483         case TARGET_SO_KEEPALIVE:
2484 		optname = SO_KEEPALIVE;
2485 		break;
2486         case TARGET_SO_OOBINLINE:
2487 		optname = SO_OOBINLINE;
2488 		break;
2489         case TARGET_SO_NO_CHECK:
2490 		optname = SO_NO_CHECK;
2491 		break;
2492         case TARGET_SO_PRIORITY:
2493 		optname = SO_PRIORITY;
2494 		break;
2495 #ifdef SO_BSDCOMPAT
2496         case TARGET_SO_BSDCOMPAT:
2497 		optname = SO_BSDCOMPAT;
2498 		break;
2499 #endif
2500         case TARGET_SO_PASSCRED:
2501 		optname = SO_PASSCRED;
2502 		break;
2503         case TARGET_SO_PASSSEC:
2504                 optname = SO_PASSSEC;
2505                 break;
2506         case TARGET_SO_TIMESTAMP:
2507 		optname = SO_TIMESTAMP;
2508 		break;
2509         case TARGET_SO_RCVLOWAT:
2510 		optname = SO_RCVLOWAT;
2511 		break;
2512         default:
2513             goto unimplemented;
2514         }
2515 	if (optlen < sizeof(uint32_t))
2516             return -TARGET_EINVAL;
2517 
2518 	if (get_user_u32(val, optval_addr))
2519             return -TARGET_EFAULT;
2520 	ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
2521         break;
2522 #ifdef SOL_NETLINK
2523     case SOL_NETLINK:
2524         switch (optname) {
2525         case NETLINK_PKTINFO:
2526         case NETLINK_ADD_MEMBERSHIP:
2527         case NETLINK_DROP_MEMBERSHIP:
2528         case NETLINK_BROADCAST_ERROR:
2529         case NETLINK_NO_ENOBUFS:
2530 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2531         case NETLINK_LISTEN_ALL_NSID:
2532         case NETLINK_CAP_ACK:
2533 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2534 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2535         case NETLINK_EXT_ACK:
2536 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2537 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2538         case NETLINK_GET_STRICT_CHK:
2539 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2540             break;
2541         default:
2542             goto unimplemented;
2543         }
2544         val = 0;
2545         if (optlen < sizeof(uint32_t)) {
2546             return -TARGET_EINVAL;
2547         }
2548         if (get_user_u32(val, optval_addr)) {
2549             return -TARGET_EFAULT;
2550         }
2551         ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
2552                                    sizeof(val)));
2553         break;
2554 #endif /* SOL_NETLINK */
2555     default:
2556     unimplemented:
2557         qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
2558                       level, optname);
2559         ret = -TARGET_ENOPROTOOPT;
2560     }
2561     return ret;
2562 }
2563 
2564 /* do_getsockopt() Must return target values and target errnos. */
2565 static abi_long do_getsockopt(int sockfd, int level, int optname,
2566                               abi_ulong optval_addr, abi_ulong optlen)
2567 {
2568     abi_long ret;
2569     int len, val;
2570     socklen_t lv;
2571 
2572     switch(level) {
2573     case TARGET_SOL_SOCKET:
2574         level = SOL_SOCKET;
2575         switch (optname) {
2576         /* These don't just return a single integer */
2577         case TARGET_SO_PEERNAME:
2578             goto unimplemented;
2579         case TARGET_SO_RCVTIMEO: {
2580             struct timeval tv;
2581             socklen_t tvlen;
2582 
2583             optname = SO_RCVTIMEO;
2584 
2585 get_timeout:
2586             if (get_user_u32(len, optlen)) {
2587                 return -TARGET_EFAULT;
2588             }
2589             if (len < 0) {
2590                 return -TARGET_EINVAL;
2591             }
2592 
2593             tvlen = sizeof(tv);
2594             ret = get_errno(getsockopt(sockfd, level, optname,
2595                                        &tv, &tvlen));
2596             if (ret < 0) {
2597                 return ret;
2598             }
2599             if (len > sizeof(struct target_timeval)) {
2600                 len = sizeof(struct target_timeval);
2601             }
2602             if (copy_to_user_timeval(optval_addr, &tv)) {
2603                 return -TARGET_EFAULT;
2604             }
2605             if (put_user_u32(len, optlen)) {
2606                 return -TARGET_EFAULT;
2607             }
2608             break;
2609         }
2610         case TARGET_SO_SNDTIMEO:
2611             optname = SO_SNDTIMEO;
2612             goto get_timeout;
2613         case TARGET_SO_PEERCRED: {
2614             struct ucred cr;
2615             socklen_t crlen;
2616             struct target_ucred *tcr;
2617 
2618             if (get_user_u32(len, optlen)) {
2619                 return -TARGET_EFAULT;
2620             }
2621             if (len < 0) {
2622                 return -TARGET_EINVAL;
2623             }
2624 
2625             crlen = sizeof(cr);
2626             ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
2627                                        &cr, &crlen));
2628             if (ret < 0) {
2629                 return ret;
2630             }
2631             if (len > crlen) {
2632                 len = crlen;
2633             }
2634             if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
2635                 return -TARGET_EFAULT;
2636             }
2637             __put_user(cr.pid, &tcr->pid);
2638             __put_user(cr.uid, &tcr->uid);
2639             __put_user(cr.gid, &tcr->gid);
2640             unlock_user_struct(tcr, optval_addr, 1);
2641             if (put_user_u32(len, optlen)) {
2642                 return -TARGET_EFAULT;
2643             }
2644             break;
2645         }
2646         case TARGET_SO_PEERSEC: {
2647             char *name;
2648 
2649             if (get_user_u32(len, optlen)) {
2650                 return -TARGET_EFAULT;
2651             }
2652             if (len < 0) {
2653                 return -TARGET_EINVAL;
2654             }
2655             name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
2656             if (!name) {
2657                 return -TARGET_EFAULT;
2658             }
2659             lv = len;
2660             ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
2661                                        name, &lv));
2662             if (put_user_u32(lv, optlen)) {
2663                 ret = -TARGET_EFAULT;
2664             }
2665             unlock_user(name, optval_addr, lv);
2666             break;
2667         }
2668         case TARGET_SO_LINGER:
2669         {
2670             struct linger lg;
2671             socklen_t lglen;
2672             struct target_linger *tlg;
2673 
2674             if (get_user_u32(len, optlen)) {
2675                 return -TARGET_EFAULT;
2676             }
2677             if (len < 0) {
2678                 return -TARGET_EINVAL;
2679             }
2680 
2681             lglen = sizeof(lg);
2682             ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
2683                                        &lg, &lglen));
2684             if (ret < 0) {
2685                 return ret;
2686             }
2687             if (len > lglen) {
2688                 len = lglen;
2689             }
2690             if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
2691                 return -TARGET_EFAULT;
2692             }
2693             __put_user(lg.l_onoff, &tlg->l_onoff);
2694             __put_user(lg.l_linger, &tlg->l_linger);
2695             unlock_user_struct(tlg, optval_addr, 1);
2696             if (put_user_u32(len, optlen)) {
2697                 return -TARGET_EFAULT;
2698             }
2699             break;
2700         }
2701         /* Options with 'int' argument.  */
2702         case TARGET_SO_DEBUG:
2703             optname = SO_DEBUG;
2704             goto int_case;
2705         case TARGET_SO_REUSEADDR:
2706             optname = SO_REUSEADDR;
2707             goto int_case;
2708 #ifdef SO_REUSEPORT
2709         case TARGET_SO_REUSEPORT:
2710             optname = SO_REUSEPORT;
2711             goto int_case;
2712 #endif
2713         case TARGET_SO_TYPE:
2714             optname = SO_TYPE;
2715             goto int_case;
2716         case TARGET_SO_ERROR:
2717             optname = SO_ERROR;
2718             goto int_case;
2719         case TARGET_SO_DONTROUTE:
2720             optname = SO_DONTROUTE;
2721             goto int_case;
2722         case TARGET_SO_BROADCAST:
2723             optname = SO_BROADCAST;
2724             goto int_case;
2725         case TARGET_SO_SNDBUF:
2726             optname = SO_SNDBUF;
2727             goto int_case;
2728         case TARGET_SO_RCVBUF:
2729             optname = SO_RCVBUF;
2730             goto int_case;
2731         case TARGET_SO_KEEPALIVE:
2732             optname = SO_KEEPALIVE;
2733             goto int_case;
2734         case TARGET_SO_OOBINLINE:
2735             optname = SO_OOBINLINE;
2736             goto int_case;
2737         case TARGET_SO_NO_CHECK:
2738             optname = SO_NO_CHECK;
2739             goto int_case;
2740         case TARGET_SO_PRIORITY:
2741             optname = SO_PRIORITY;
2742             goto int_case;
2743 #ifdef SO_BSDCOMPAT
2744         case TARGET_SO_BSDCOMPAT:
2745             optname = SO_BSDCOMPAT;
2746             goto int_case;
2747 #endif
2748         case TARGET_SO_PASSCRED:
2749             optname = SO_PASSCRED;
2750             goto int_case;
2751         case TARGET_SO_TIMESTAMP:
2752             optname = SO_TIMESTAMP;
2753             goto int_case;
2754         case TARGET_SO_RCVLOWAT:
2755             optname = SO_RCVLOWAT;
2756             goto int_case;
2757         case TARGET_SO_ACCEPTCONN:
2758             optname = SO_ACCEPTCONN;
2759             goto int_case;
2760         case TARGET_SO_PROTOCOL:
2761             optname = SO_PROTOCOL;
2762             goto int_case;
2763         case TARGET_SO_DOMAIN:
2764             optname = SO_DOMAIN;
2765             goto int_case;
2766         default:
2767             goto int_case;
2768         }
2769         break;
2770     case SOL_TCP:
2771     case SOL_UDP:
2772         /* TCP and UDP options all take an 'int' value.  */
2773     int_case:
2774         if (get_user_u32(len, optlen))
2775             return -TARGET_EFAULT;
2776         if (len < 0)
2777             return -TARGET_EINVAL;
2778         lv = sizeof(lv);
2779         ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2780         if (ret < 0)
2781             return ret;
2782         switch (optname) {
2783         case SO_TYPE:
2784             val = host_to_target_sock_type(val);
2785             break;
2786         case SO_ERROR:
2787             val = host_to_target_errno(val);
2788             break;
2789         }
2790         if (len > lv)
2791             len = lv;
2792         if (len == 4) {
2793             if (put_user_u32(val, optval_addr))
2794                 return -TARGET_EFAULT;
2795         } else {
2796             if (put_user_u8(val, optval_addr))
2797                 return -TARGET_EFAULT;
2798         }
2799         if (put_user_u32(len, optlen))
2800             return -TARGET_EFAULT;
2801         break;
2802     case SOL_IP:
2803         switch(optname) {
2804         case IP_TOS:
2805         case IP_TTL:
2806         case IP_HDRINCL:
2807         case IP_ROUTER_ALERT:
2808         case IP_RECVOPTS:
2809         case IP_RETOPTS:
2810         case IP_PKTINFO:
2811         case IP_MTU_DISCOVER:
2812         case IP_RECVERR:
2813         case IP_RECVTOS:
2814 #ifdef IP_FREEBIND
2815         case IP_FREEBIND:
2816 #endif
2817         case IP_MULTICAST_TTL:
2818         case IP_MULTICAST_LOOP:
2819             if (get_user_u32(len, optlen))
2820                 return -TARGET_EFAULT;
2821             if (len < 0)
2822                 return -TARGET_EINVAL;
2823             lv = sizeof(lv);
2824             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2825             if (ret < 0)
2826                 return ret;
2827             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2828                 len = 1;
2829                 if (put_user_u32(len, optlen)
2830                     || put_user_u8(val, optval_addr))
2831                     return -TARGET_EFAULT;
2832             } else {
2833                 if (len > sizeof(int))
2834                     len = sizeof(int);
2835                 if (put_user_u32(len, optlen)
2836                     || put_user_u32(val, optval_addr))
2837                     return -TARGET_EFAULT;
2838             }
2839             break;
2840         default:
2841             ret = -TARGET_ENOPROTOOPT;
2842             break;
2843         }
2844         break;
2845     case SOL_IPV6:
2846         switch (optname) {
2847         case IPV6_MTU_DISCOVER:
2848         case IPV6_MTU:
2849         case IPV6_V6ONLY:
2850         case IPV6_RECVPKTINFO:
2851         case IPV6_UNICAST_HOPS:
2852         case IPV6_MULTICAST_HOPS:
2853         case IPV6_MULTICAST_LOOP:
2854         case IPV6_RECVERR:
2855         case IPV6_RECVHOPLIMIT:
2856         case IPV6_2292HOPLIMIT:
2857         case IPV6_CHECKSUM:
2858         case IPV6_ADDRFORM:
2859         case IPV6_2292PKTINFO:
2860         case IPV6_RECVTCLASS:
2861         case IPV6_RECVRTHDR:
2862         case IPV6_2292RTHDR:
2863         case IPV6_RECVHOPOPTS:
2864         case IPV6_2292HOPOPTS:
2865         case IPV6_RECVDSTOPTS:
2866         case IPV6_2292DSTOPTS:
2867         case IPV6_TCLASS:
2868         case IPV6_ADDR_PREFERENCES:
2869 #ifdef IPV6_RECVPATHMTU
2870         case IPV6_RECVPATHMTU:
2871 #endif
2872 #ifdef IPV6_TRANSPARENT
2873         case IPV6_TRANSPARENT:
2874 #endif
2875 #ifdef IPV6_FREEBIND
2876         case IPV6_FREEBIND:
2877 #endif
2878 #ifdef IPV6_RECVORIGDSTADDR
2879         case IPV6_RECVORIGDSTADDR:
2880 #endif
2881             if (get_user_u32(len, optlen))
2882                 return -TARGET_EFAULT;
2883             if (len < 0)
2884                 return -TARGET_EINVAL;
2885             lv = sizeof(lv);
2886             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2887             if (ret < 0)
2888                 return ret;
2889             if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
2890                 len = 1;
2891                 if (put_user_u32(len, optlen)
2892                     || put_user_u8(val, optval_addr))
2893                     return -TARGET_EFAULT;
2894             } else {
2895                 if (len > sizeof(int))
2896                     len = sizeof(int);
2897                 if (put_user_u32(len, optlen)
2898                     || put_user_u32(val, optval_addr))
2899                     return -TARGET_EFAULT;
2900             }
2901             break;
2902         default:
2903             ret = -TARGET_ENOPROTOOPT;
2904             break;
2905         }
2906         break;
2907 #ifdef SOL_NETLINK
2908     case SOL_NETLINK:
2909         switch (optname) {
2910         case NETLINK_PKTINFO:
2911         case NETLINK_BROADCAST_ERROR:
2912         case NETLINK_NO_ENOBUFS:
2913 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2914         case NETLINK_LISTEN_ALL_NSID:
2915         case NETLINK_CAP_ACK:
2916 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2917 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
2918         case NETLINK_EXT_ACK:
2919 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2920 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
2921         case NETLINK_GET_STRICT_CHK:
2922 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
2923             if (get_user_u32(len, optlen)) {
2924                 return -TARGET_EFAULT;
2925             }
2926             if (len != sizeof(val)) {
2927                 return -TARGET_EINVAL;
2928             }
2929             lv = len;
2930             ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
2931             if (ret < 0) {
2932                 return ret;
2933             }
2934             if (put_user_u32(lv, optlen)
2935                 || put_user_u32(val, optval_addr)) {
2936                 return -TARGET_EFAULT;
2937             }
2938             break;
2939 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
2940         case NETLINK_LIST_MEMBERSHIPS:
2941         {
2942             uint32_t *results;
2943             int i;
2944             if (get_user_u32(len, optlen)) {
2945                 return -TARGET_EFAULT;
2946             }
2947             if (len < 0) {
2948                 return -TARGET_EINVAL;
2949             }
2950             results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
2951             if (!results && len > 0) {
2952                 return -TARGET_EFAULT;
2953             }
2954             lv = len;
2955             ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
2956             if (ret < 0) {
2957                 unlock_user(results, optval_addr, 0);
2958                 return ret;
2959             }
2960             /* swap host endianess to target endianess. */
2961             for (i = 0; i < (len / sizeof(uint32_t)); i++) {
2962                 results[i] = tswap32(results[i]);
2963             }
2964             if (put_user_u32(lv, optlen)) {
2965                 return -TARGET_EFAULT;
2966             }
2967             unlock_user(results, optval_addr, 0);
2968             break;
2969         }
2970 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
2971         default:
2972             goto unimplemented;
2973         }
2974         break;
2975 #endif /* SOL_NETLINK */
2976     default:
2977     unimplemented:
2978         qemu_log_mask(LOG_UNIMP,
2979                       "getsockopt level=%d optname=%d not yet supported\n",
2980                       level, optname);
2981         ret = -TARGET_EOPNOTSUPP;
2982         break;
2983     }
2984     return ret;
2985 }
2986 
2987 /* Convert target low/high pair representing file offset into the host
2988  * low/high pair. This function doesn't handle offsets bigger than 64 bits
2989  * as the kernel doesn't handle them either.
2990  */
2991 static void target_to_host_low_high(abi_ulong tlow,
2992                                     abi_ulong thigh,
2993                                     unsigned long *hlow,
2994                                     unsigned long *hhigh)
2995 {
2996     uint64_t off = tlow |
2997         ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
2998         TARGET_LONG_BITS / 2;
2999 
3000     *hlow = off;
3001     *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
3002 }
3003 
3004 static struct iovec *lock_iovec(int type, abi_ulong target_addr,
3005                                 abi_ulong count, int copy)
3006 {
3007     struct target_iovec *target_vec;
3008     struct iovec *vec;
3009     abi_ulong total_len, max_len;
3010     int i;
3011     int err = 0;
3012     bool bad_address = false;
3013 
3014     if (count == 0) {
3015         errno = 0;
3016         return NULL;
3017     }
3018     if (count > IOV_MAX) {
3019         errno = EINVAL;
3020         return NULL;
3021     }
3022 
3023     vec = g_try_new0(struct iovec, count);
3024     if (vec == NULL) {
3025         errno = ENOMEM;
3026         return NULL;
3027     }
3028 
3029     target_vec = lock_user(VERIFY_READ, target_addr,
3030                            count * sizeof(struct target_iovec), 1);
3031     if (target_vec == NULL) {
3032         err = EFAULT;
3033         goto fail2;
3034     }
3035 
3036     /* ??? If host page size > target page size, this will result in a
3037        value larger than what we can actually support.  */
3038     max_len = 0x7fffffff & TARGET_PAGE_MASK;
3039     total_len = 0;
3040 
3041     for (i = 0; i < count; i++) {
3042         abi_ulong base = tswapal(target_vec[i].iov_base);
3043         abi_long len = tswapal(target_vec[i].iov_len);
3044 
3045         if (len < 0) {
3046             err = EINVAL;
3047             goto fail;
3048         } else if (len == 0) {
3049             /* Zero length pointer is ignored.  */
3050             vec[i].iov_base = 0;
3051         } else {
3052             vec[i].iov_base = lock_user(type, base, len, copy);
3053             /* If the first buffer pointer is bad, this is a fault.  But
3054              * subsequent bad buffers will result in a partial write; this
3055              * is realized by filling the vector with null pointers and
3056              * zero lengths. */
3057             if (!vec[i].iov_base) {
3058                 if (i == 0) {
3059                     err = EFAULT;
3060                     goto fail;
3061                 } else {
3062                     bad_address = true;
3063                 }
3064             }
3065             if (bad_address) {
3066                 len = 0;
3067             }
3068             if (len > max_len - total_len) {
3069                 len = max_len - total_len;
3070             }
3071         }
3072         vec[i].iov_len = len;
3073         total_len += len;
3074     }
3075 
3076     unlock_user(target_vec, target_addr, 0);
3077     return vec;
3078 
3079  fail:
3080     while (--i >= 0) {
3081         if (tswapal(target_vec[i].iov_len) > 0) {
3082             unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
3083         }
3084     }
3085     unlock_user(target_vec, target_addr, 0);
3086  fail2:
3087     g_free(vec);
3088     errno = err;
3089     return NULL;
3090 }
3091 
3092 static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
3093                          abi_ulong count, int copy)
3094 {
3095     struct target_iovec *target_vec;
3096     int i;
3097 
3098     target_vec = lock_user(VERIFY_READ, target_addr,
3099                            count * sizeof(struct target_iovec), 1);
3100     if (target_vec) {
3101         for (i = 0; i < count; i++) {
3102             abi_ulong base = tswapal(target_vec[i].iov_base);
3103             abi_long len = tswapal(target_vec[i].iov_len);
3104             if (len < 0) {
3105                 break;
3106             }
3107             unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
3108         }
3109         unlock_user(target_vec, target_addr, 0);
3110     }
3111 
3112     g_free(vec);
3113 }
3114 
3115 static inline int target_to_host_sock_type(int *type)
3116 {
3117     int host_type = 0;
3118     int target_type = *type;
3119 
3120     switch (target_type & TARGET_SOCK_TYPE_MASK) {
3121     case TARGET_SOCK_DGRAM:
3122         host_type = SOCK_DGRAM;
3123         break;
3124     case TARGET_SOCK_STREAM:
3125         host_type = SOCK_STREAM;
3126         break;
3127     default:
3128         host_type = target_type & TARGET_SOCK_TYPE_MASK;
3129         break;
3130     }
3131     if (target_type & TARGET_SOCK_CLOEXEC) {
3132 #if defined(SOCK_CLOEXEC)
3133         host_type |= SOCK_CLOEXEC;
3134 #else
3135         return -TARGET_EINVAL;
3136 #endif
3137     }
3138     if (target_type & TARGET_SOCK_NONBLOCK) {
3139 #if defined(SOCK_NONBLOCK)
3140         host_type |= SOCK_NONBLOCK;
3141 #elif !defined(O_NONBLOCK)
3142         return -TARGET_EINVAL;
3143 #endif
3144     }
3145     *type = host_type;
3146     return 0;
3147 }
3148 
3149 /* Try to emulate socket type flags after socket creation.  */
3150 static int sock_flags_fixup(int fd, int target_type)
3151 {
3152 #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
3153     if (target_type & TARGET_SOCK_NONBLOCK) {
3154         int flags = fcntl(fd, F_GETFL);
3155         if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
3156             close(fd);
3157             return -TARGET_EINVAL;
3158         }
3159     }
3160 #endif
3161     return fd;
3162 }
3163 
3164 /* do_socket() Must return target values and target errnos. */
3165 static abi_long do_socket(int domain, int type, int protocol)
3166 {
3167     int target_type = type;
3168     int ret;
3169 
3170     ret = target_to_host_sock_type(&type);
3171     if (ret) {
3172         return ret;
3173     }
3174 
3175     if (domain == PF_NETLINK && !(
3176 #ifdef CONFIG_RTNETLINK
3177          protocol == NETLINK_ROUTE ||
3178 #endif
3179          protocol == NETLINK_KOBJECT_UEVENT ||
3180          protocol == NETLINK_AUDIT)) {
3181         return -TARGET_EPROTONOSUPPORT;
3182     }
3183 
3184     if (domain == AF_PACKET ||
3185         (domain == AF_INET && type == SOCK_PACKET)) {
3186         protocol = tswap16(protocol);
3187     }
3188 
3189     ret = get_errno(socket(domain, type, protocol));
3190     if (ret >= 0) {
3191         ret = sock_flags_fixup(ret, target_type);
3192         if (type == SOCK_PACKET) {
3193             /* Manage an obsolete case :
3194              * if socket type is SOCK_PACKET, bind by name
3195              */
3196             fd_trans_register(ret, &target_packet_trans);
3197         } else if (domain == PF_NETLINK) {
3198             switch (protocol) {
3199 #ifdef CONFIG_RTNETLINK
3200             case NETLINK_ROUTE:
3201                 fd_trans_register(ret, &target_netlink_route_trans);
3202                 break;
3203 #endif
3204             case NETLINK_KOBJECT_UEVENT:
3205                 /* nothing to do: messages are strings */
3206                 break;
3207             case NETLINK_AUDIT:
3208                 fd_trans_register(ret, &target_netlink_audit_trans);
3209                 break;
3210             default:
3211                 g_assert_not_reached();
3212             }
3213         }
3214     }
3215     return ret;
3216 }
3217 
3218 /* do_bind() Must return target values and target errnos. */
3219 static abi_long do_bind(int sockfd, abi_ulong target_addr,
3220                         socklen_t addrlen)
3221 {
3222     void *addr;
3223     abi_long ret;
3224 
3225     if ((int)addrlen < 0) {
3226         return -TARGET_EINVAL;
3227     }
3228 
3229     addr = alloca(addrlen+1);
3230 
3231     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3232     if (ret)
3233         return ret;
3234 
3235     return get_errno(bind(sockfd, addr, addrlen));
3236 }
3237 
3238 /* do_connect() Must return target values and target errnos. */
3239 static abi_long do_connect(int sockfd, abi_ulong target_addr,
3240                            socklen_t addrlen)
3241 {
3242     void *addr;
3243     abi_long ret;
3244 
3245     if ((int)addrlen < 0) {
3246         return -TARGET_EINVAL;
3247     }
3248 
3249     addr = alloca(addrlen+1);
3250 
3251     ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
3252     if (ret)
3253         return ret;
3254 
3255     return get_errno(safe_connect(sockfd, addr, addrlen));
3256 }
3257 
3258 /* do_sendrecvmsg_locked() Must return target values and target errnos. */
3259 static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
3260                                       int flags, int send)
3261 {
3262     abi_long ret, len;
3263     struct msghdr msg;
3264     abi_ulong count;
3265     struct iovec *vec;
3266     abi_ulong target_vec;
3267 
3268     if (msgp->msg_name) {
3269         msg.msg_namelen = tswap32(msgp->msg_namelen);
3270         msg.msg_name = alloca(msg.msg_namelen+1);
3271         ret = target_to_host_sockaddr(fd, msg.msg_name,
3272                                       tswapal(msgp->msg_name),
3273                                       msg.msg_namelen);
3274         if (ret == -TARGET_EFAULT) {
3275             /* For connected sockets msg_name and msg_namelen must
3276              * be ignored, so returning EFAULT immediately is wrong.
3277              * Instead, pass a bad msg_name to the host kernel, and
3278              * let it decide whether to return EFAULT or not.
3279              */
3280             msg.msg_name = (void *)-1;
3281         } else if (ret) {
3282             goto out2;
3283         }
3284     } else {
3285         msg.msg_name = NULL;
3286         msg.msg_namelen = 0;
3287     }
3288     msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
3289     msg.msg_control = alloca(msg.msg_controllen);
3290     memset(msg.msg_control, 0, msg.msg_controllen);
3291 
3292     msg.msg_flags = tswap32(msgp->msg_flags);
3293 
3294     count = tswapal(msgp->msg_iovlen);
3295     target_vec = tswapal(msgp->msg_iov);
3296 
3297     if (count > IOV_MAX) {
3298         /* sendrcvmsg returns a different errno for this condition than
3299          * readv/writev, so we must catch it here before lock_iovec() does.
3300          */
3301         ret = -TARGET_EMSGSIZE;
3302         goto out2;
3303     }
3304 
3305     vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
3306                      target_vec, count, send);
3307     if (vec == NULL) {
3308         ret = -host_to_target_errno(errno);
3309         /* allow sending packet without any iov, e.g. with MSG_MORE flag */
3310         if (!send || ret) {
3311             goto out2;
3312         }
3313     }
3314     msg.msg_iovlen = count;
3315     msg.msg_iov = vec;
3316 
3317     if (send) {
3318         if (fd_trans_target_to_host_data(fd)) {
3319             void *host_msg;
3320 
3321             host_msg = g_malloc(msg.msg_iov->iov_len);
3322             memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
3323             ret = fd_trans_target_to_host_data(fd)(host_msg,
3324                                                    msg.msg_iov->iov_len);
3325             if (ret >= 0) {
3326                 msg.msg_iov->iov_base = host_msg;
3327                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3328             }
3329             g_free(host_msg);
3330         } else {
3331             ret = target_to_host_cmsg(&msg, msgp);
3332             if (ret == 0) {
3333                 ret = get_errno(safe_sendmsg(fd, &msg, flags));
3334             }
3335         }
3336     } else {
3337         ret = get_errno(safe_recvmsg(fd, &msg, flags));
3338         if (!is_error(ret)) {
3339             len = ret;
3340             if (fd_trans_host_to_target_data(fd)) {
3341                 ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
3342                                                MIN(msg.msg_iov->iov_len, len));
3343             }
3344             if (!is_error(ret)) {
3345                 ret = host_to_target_cmsg(msgp, &msg);
3346             }
3347             if (!is_error(ret)) {
3348                 msgp->msg_namelen = tswap32(msg.msg_namelen);
3349                 msgp->msg_flags = tswap32(msg.msg_flags);
3350                 if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
3351                     ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
3352                                     msg.msg_name, msg.msg_namelen);
3353                     if (ret) {
3354                         goto out;
3355                     }
3356                 }
3357 
3358                 ret = len;
3359             }
3360         }
3361     }
3362 
3363 out:
3364     if (vec) {
3365         unlock_iovec(vec, target_vec, count, !send);
3366     }
3367 out2:
3368     return ret;
3369 }
3370 
3371 static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
3372                                int flags, int send)
3373 {
3374     abi_long ret;
3375     struct target_msghdr *msgp;
3376 
3377     if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
3378                           msgp,
3379                           target_msg,
3380                           send ? 1 : 0)) {
3381         return -TARGET_EFAULT;
3382     }
3383     ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
3384     unlock_user_struct(msgp, target_msg, send ? 0 : 1);
3385     return ret;
3386 }
3387 
3388 /* We don't rely on the C library to have sendmmsg/recvmmsg support,
3389  * so it might not have this *mmsg-specific flag either.
3390  */
3391 #ifndef MSG_WAITFORONE
3392 #define MSG_WAITFORONE 0x10000
3393 #endif
3394 
3395 static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
3396                                 unsigned int vlen, unsigned int flags,
3397                                 int send)
3398 {
3399     struct target_mmsghdr *mmsgp;
3400     abi_long ret = 0;
3401     int i;
3402 
3403     if (vlen > UIO_MAXIOV) {
3404         vlen = UIO_MAXIOV;
3405     }
3406 
3407     mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
3408     if (!mmsgp) {
3409         return -TARGET_EFAULT;
3410     }
3411 
3412     for (i = 0; i < vlen; i++) {
3413         ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
3414         if (is_error(ret)) {
3415             break;
3416         }
3417         mmsgp[i].msg_len = tswap32(ret);
3418         /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
3419         if (flags & MSG_WAITFORONE) {
3420             flags |= MSG_DONTWAIT;
3421         }
3422     }
3423 
3424     unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
3425 
3426     /* Return number of datagrams sent if we sent any at all;
3427      * otherwise return the error.
3428      */
3429     if (i) {
3430         return i;
3431     }
3432     return ret;
3433 }
3434 
3435 /* do_accept4() Must return target values and target errnos. */
3436 static abi_long do_accept4(int fd, abi_ulong target_addr,
3437                            abi_ulong target_addrlen_addr, int flags)
3438 {
3439     socklen_t addrlen, ret_addrlen;
3440     void *addr;
3441     abi_long ret;
3442     int host_flags;
3443 
3444     if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
3445         return -TARGET_EINVAL;
3446     }
3447 
3448     host_flags = 0;
3449     if (flags & TARGET_SOCK_NONBLOCK) {
3450         host_flags |= SOCK_NONBLOCK;
3451     }
3452     if (flags & TARGET_SOCK_CLOEXEC) {
3453         host_flags |= SOCK_CLOEXEC;
3454     }
3455 
3456     if (target_addr == 0) {
3457         return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
3458     }
3459 
3460     /* linux returns EFAULT if addrlen pointer is invalid */
3461     if (get_user_u32(addrlen, target_addrlen_addr))
3462         return -TARGET_EFAULT;
3463 
3464     if ((int)addrlen < 0) {
3465         return -TARGET_EINVAL;
3466     }
3467 
3468     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3469         return -TARGET_EFAULT;
3470     }
3471 
3472     addr = alloca(addrlen);
3473 
3474     ret_addrlen = addrlen;
3475     ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
3476     if (!is_error(ret)) {
3477         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3478         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3479             ret = -TARGET_EFAULT;
3480         }
3481     }
3482     return ret;
3483 }
3484 
3485 /* do_getpeername() Must return target values and target errnos. */
3486 static abi_long do_getpeername(int fd, abi_ulong target_addr,
3487                                abi_ulong target_addrlen_addr)
3488 {
3489     socklen_t addrlen, ret_addrlen;
3490     void *addr;
3491     abi_long ret;
3492 
3493     if (get_user_u32(addrlen, target_addrlen_addr))
3494         return -TARGET_EFAULT;
3495 
3496     if ((int)addrlen < 0) {
3497         return -TARGET_EINVAL;
3498     }
3499 
3500     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3501         return -TARGET_EFAULT;
3502     }
3503 
3504     addr = alloca(addrlen);
3505 
3506     ret_addrlen = addrlen;
3507     ret = get_errno(getpeername(fd, addr, &ret_addrlen));
3508     if (!is_error(ret)) {
3509         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3510         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3511             ret = -TARGET_EFAULT;
3512         }
3513     }
3514     return ret;
3515 }
3516 
3517 /* do_getsockname() Must return target values and target errnos. */
3518 static abi_long do_getsockname(int fd, abi_ulong target_addr,
3519                                abi_ulong target_addrlen_addr)
3520 {
3521     socklen_t addrlen, ret_addrlen;
3522     void *addr;
3523     abi_long ret;
3524 
3525     if (get_user_u32(addrlen, target_addrlen_addr))
3526         return -TARGET_EFAULT;
3527 
3528     if ((int)addrlen < 0) {
3529         return -TARGET_EINVAL;
3530     }
3531 
3532     if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
3533         return -TARGET_EFAULT;
3534     }
3535 
3536     addr = alloca(addrlen);
3537 
3538     ret_addrlen = addrlen;
3539     ret = get_errno(getsockname(fd, addr, &ret_addrlen));
3540     if (!is_error(ret)) {
3541         host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
3542         if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
3543             ret = -TARGET_EFAULT;
3544         }
3545     }
3546     return ret;
3547 }
3548 
3549 /* do_socketpair() Must return target values and target errnos. */
3550 static abi_long do_socketpair(int domain, int type, int protocol,
3551                               abi_ulong target_tab_addr)
3552 {
3553     int tab[2];
3554     abi_long ret;
3555 
3556     target_to_host_sock_type(&type);
3557 
3558     ret = get_errno(socketpair(domain, type, protocol, tab));
3559     if (!is_error(ret)) {
3560         if (put_user_s32(tab[0], target_tab_addr)
3561             || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
3562             ret = -TARGET_EFAULT;
3563     }
3564     return ret;
3565 }
3566 
3567 /* do_sendto() Must return target values and target errnos. */
3568 static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
3569                           abi_ulong target_addr, socklen_t addrlen)
3570 {
3571     void *addr;
3572     void *host_msg;
3573     void *copy_msg = NULL;
3574     abi_long ret;
3575 
3576     if ((int)addrlen < 0) {
3577         return -TARGET_EINVAL;
3578     }
3579 
3580     host_msg = lock_user(VERIFY_READ, msg, len, 1);
3581     if (!host_msg)
3582         return -TARGET_EFAULT;
3583     if (fd_trans_target_to_host_data(fd)) {
3584         copy_msg = host_msg;
3585         host_msg = g_malloc(len);
3586         memcpy(host_msg, copy_msg, len);
3587         ret = fd_trans_target_to_host_data(fd)(host_msg, len);
3588         if (ret < 0) {
3589             goto fail;
3590         }
3591     }
3592     if (target_addr) {
3593         addr = alloca(addrlen+1);
3594         ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
3595         if (ret) {
3596             goto fail;
3597         }
3598         ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
3599     } else {
3600         ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
3601     }
3602 fail:
3603     if (copy_msg) {
3604         g_free(host_msg);
3605         host_msg = copy_msg;
3606     }
3607     unlock_user(host_msg, msg, 0);
3608     return ret;
3609 }
3610 
3611 /* do_recvfrom() Must return target values and target errnos. */
3612 static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
3613                             abi_ulong target_addr,
3614                             abi_ulong target_addrlen)
3615 {
3616     socklen_t addrlen, ret_addrlen;
3617     void *addr;
3618     void *host_msg;
3619     abi_long ret;
3620 
3621     if (!msg) {
3622         host_msg = NULL;
3623     } else {
3624         host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
3625         if (!host_msg) {
3626             return -TARGET_EFAULT;
3627         }
3628     }
3629     if (target_addr) {
3630         if (get_user_u32(addrlen, target_addrlen)) {
3631             ret = -TARGET_EFAULT;
3632             goto fail;
3633         }
3634         if ((int)addrlen < 0) {
3635             ret = -TARGET_EINVAL;
3636             goto fail;
3637         }
3638         addr = alloca(addrlen);
3639         ret_addrlen = addrlen;
3640         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
3641                                       addr, &ret_addrlen));
3642     } else {
3643         addr = NULL; /* To keep compiler quiet.  */
3644         addrlen = 0; /* To keep compiler quiet.  */
3645         ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
3646     }
3647     if (!is_error(ret)) {
3648         if (fd_trans_host_to_target_data(fd)) {
3649             abi_long trans;
3650             trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
3651             if (is_error(trans)) {
3652                 ret = trans;
3653                 goto fail;
3654             }
3655         }
3656         if (target_addr) {
3657             host_to_target_sockaddr(target_addr, addr,
3658                                     MIN(addrlen, ret_addrlen));
3659             if (put_user_u32(ret_addrlen, target_addrlen)) {
3660                 ret = -TARGET_EFAULT;
3661                 goto fail;
3662             }
3663         }
3664         unlock_user(host_msg, msg, len);
3665     } else {
3666 fail:
3667         unlock_user(host_msg, msg, 0);
3668     }
3669     return ret;
3670 }
3671 
3672 #ifdef TARGET_NR_socketcall
3673 /* do_socketcall() must return target values and target errnos. */
3674 static abi_long do_socketcall(int num, abi_ulong vptr)
3675 {
3676     static const unsigned nargs[] = { /* number of arguments per operation */
3677         [TARGET_SYS_SOCKET] = 3,      /* domain, type, protocol */
3678         [TARGET_SYS_BIND] = 3,        /* fd, addr, addrlen */
3679         [TARGET_SYS_CONNECT] = 3,     /* fd, addr, addrlen */
3680         [TARGET_SYS_LISTEN] = 2,      /* fd, backlog */
3681         [TARGET_SYS_ACCEPT] = 3,      /* fd, addr, addrlen */
3682         [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
3683         [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
3684         [TARGET_SYS_SOCKETPAIR] = 4,  /* domain, type, protocol, tab */
3685         [TARGET_SYS_SEND] = 4,        /* fd, msg, len, flags */
3686         [TARGET_SYS_RECV] = 4,        /* fd, msg, len, flags */
3687         [TARGET_SYS_SENDTO] = 6,      /* fd, msg, len, flags, addr, addrlen */
3688         [TARGET_SYS_RECVFROM] = 6,    /* fd, msg, len, flags, addr, addrlen */
3689         [TARGET_SYS_SHUTDOWN] = 2,    /* fd, how */
3690         [TARGET_SYS_SETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3691         [TARGET_SYS_GETSOCKOPT] = 5,  /* fd, level, optname, optval, optlen */
3692         [TARGET_SYS_SENDMSG] = 3,     /* fd, msg, flags */
3693         [TARGET_SYS_RECVMSG] = 3,     /* fd, msg, flags */
3694         [TARGET_SYS_ACCEPT4] = 4,     /* fd, addr, addrlen, flags */
3695         [TARGET_SYS_RECVMMSG] = 4,    /* fd, msgvec, vlen, flags */
3696         [TARGET_SYS_SENDMMSG] = 4,    /* fd, msgvec, vlen, flags */
3697     };
3698     abi_long a[6]; /* max 6 args */
3699     unsigned i;
3700 
3701     /* check the range of the first argument num */
3702     /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
3703     if (num < 1 || num > TARGET_SYS_SENDMMSG) {
3704         return -TARGET_EINVAL;
3705     }
3706     /* ensure we have space for args */
3707     if (nargs[num] > ARRAY_SIZE(a)) {
3708         return -TARGET_EINVAL;
3709     }
3710     /* collect the arguments in a[] according to nargs[] */
3711     for (i = 0; i < nargs[num]; ++i) {
3712         if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
3713             return -TARGET_EFAULT;
3714         }
3715     }
3716     /* now when we have the args, invoke the appropriate underlying function */
3717     switch (num) {
3718     case TARGET_SYS_SOCKET: /* domain, type, protocol */
3719         return do_socket(a[0], a[1], a[2]);
3720     case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
3721         return do_bind(a[0], a[1], a[2]);
3722     case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
3723         return do_connect(a[0], a[1], a[2]);
3724     case TARGET_SYS_LISTEN: /* sockfd, backlog */
3725         return get_errno(listen(a[0], a[1]));
3726     case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
3727         return do_accept4(a[0], a[1], a[2], 0);
3728     case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
3729         return do_getsockname(a[0], a[1], a[2]);
3730     case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
3731         return do_getpeername(a[0], a[1], a[2]);
3732     case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
3733         return do_socketpair(a[0], a[1], a[2], a[3]);
3734     case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
3735         return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
3736     case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
3737         return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
3738     case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
3739         return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
3740     case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
3741         return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
3742     case TARGET_SYS_SHUTDOWN: /* sockfd, how */
3743         return get_errno(shutdown(a[0], a[1]));
3744     case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3745         return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
3746     case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
3747         return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
3748     case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
3749         return do_sendrecvmsg(a[0], a[1], a[2], 1);
3750     case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
3751         return do_sendrecvmsg(a[0], a[1], a[2], 0);
3752     case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
3753         return do_accept4(a[0], a[1], a[2], a[3]);
3754     case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
3755         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
3756     case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
3757         return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
3758     default:
3759         qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
3760         return -TARGET_EINVAL;
3761     }
3762 }
3763 #endif
3764 
3765 #define N_SHM_REGIONS	32
3766 
3767 static struct shm_region {
3768     abi_ulong start;
3769     abi_ulong size;
3770     bool in_use;
3771 } shm_regions[N_SHM_REGIONS];
3772 
3773 #ifndef TARGET_SEMID64_DS
3774 /* asm-generic version of this struct */
3775 struct target_semid64_ds
3776 {
3777   struct target_ipc_perm sem_perm;
3778   abi_ulong sem_otime;
3779 #if TARGET_ABI_BITS == 32
3780   abi_ulong __unused1;
3781 #endif
3782   abi_ulong sem_ctime;
3783 #if TARGET_ABI_BITS == 32
3784   abi_ulong __unused2;
3785 #endif
3786   abi_ulong sem_nsems;
3787   abi_ulong __unused3;
3788   abi_ulong __unused4;
3789 };
3790 #endif
3791 
3792 static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
3793                                                abi_ulong target_addr)
3794 {
3795     struct target_ipc_perm *target_ip;
3796     struct target_semid64_ds *target_sd;
3797 
3798     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3799         return -TARGET_EFAULT;
3800     target_ip = &(target_sd->sem_perm);
3801     host_ip->__key = tswap32(target_ip->__key);
3802     host_ip->uid = tswap32(target_ip->uid);
3803     host_ip->gid = tswap32(target_ip->gid);
3804     host_ip->cuid = tswap32(target_ip->cuid);
3805     host_ip->cgid = tswap32(target_ip->cgid);
3806 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3807     host_ip->mode = tswap32(target_ip->mode);
3808 #else
3809     host_ip->mode = tswap16(target_ip->mode);
3810 #endif
3811 #if defined(TARGET_PPC)
3812     host_ip->__seq = tswap32(target_ip->__seq);
3813 #else
3814     host_ip->__seq = tswap16(target_ip->__seq);
3815 #endif
3816     unlock_user_struct(target_sd, target_addr, 0);
3817     return 0;
3818 }
3819 
3820 static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
3821                                                struct ipc_perm *host_ip)
3822 {
3823     struct target_ipc_perm *target_ip;
3824     struct target_semid64_ds *target_sd;
3825 
3826     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3827         return -TARGET_EFAULT;
3828     target_ip = &(target_sd->sem_perm);
3829     target_ip->__key = tswap32(host_ip->__key);
3830     target_ip->uid = tswap32(host_ip->uid);
3831     target_ip->gid = tswap32(host_ip->gid);
3832     target_ip->cuid = tswap32(host_ip->cuid);
3833     target_ip->cgid = tswap32(host_ip->cgid);
3834 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
3835     target_ip->mode = tswap32(host_ip->mode);
3836 #else
3837     target_ip->mode = tswap16(host_ip->mode);
3838 #endif
3839 #if defined(TARGET_PPC)
3840     target_ip->__seq = tswap32(host_ip->__seq);
3841 #else
3842     target_ip->__seq = tswap16(host_ip->__seq);
3843 #endif
3844     unlock_user_struct(target_sd, target_addr, 1);
3845     return 0;
3846 }
3847 
3848 static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
3849                                                abi_ulong target_addr)
3850 {
3851     struct target_semid64_ds *target_sd;
3852 
3853     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
3854         return -TARGET_EFAULT;
3855     if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
3856         return -TARGET_EFAULT;
3857     host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
3858     host_sd->sem_otime = tswapal(target_sd->sem_otime);
3859     host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
3860     unlock_user_struct(target_sd, target_addr, 0);
3861     return 0;
3862 }
3863 
3864 static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
3865                                                struct semid_ds *host_sd)
3866 {
3867     struct target_semid64_ds *target_sd;
3868 
3869     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
3870         return -TARGET_EFAULT;
3871     if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
3872         return -TARGET_EFAULT;
3873     target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
3874     target_sd->sem_otime = tswapal(host_sd->sem_otime);
3875     target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
3876     unlock_user_struct(target_sd, target_addr, 1);
3877     return 0;
3878 }
3879 
3880 struct target_seminfo {
3881     int semmap;
3882     int semmni;
3883     int semmns;
3884     int semmnu;
3885     int semmsl;
3886     int semopm;
3887     int semume;
3888     int semusz;
3889     int semvmx;
3890     int semaem;
3891 };
3892 
3893 static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
3894                                               struct seminfo *host_seminfo)
3895 {
3896     struct target_seminfo *target_seminfo;
3897     if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
3898         return -TARGET_EFAULT;
3899     __put_user(host_seminfo->semmap, &target_seminfo->semmap);
3900     __put_user(host_seminfo->semmni, &target_seminfo->semmni);
3901     __put_user(host_seminfo->semmns, &target_seminfo->semmns);
3902     __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
3903     __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
3904     __put_user(host_seminfo->semopm, &target_seminfo->semopm);
3905     __put_user(host_seminfo->semume, &target_seminfo->semume);
3906     __put_user(host_seminfo->semusz, &target_seminfo->semusz);
3907     __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
3908     __put_user(host_seminfo->semaem, &target_seminfo->semaem);
3909     unlock_user_struct(target_seminfo, target_addr, 1);
3910     return 0;
3911 }
3912 
3913 union semun {
3914 	int val;
3915 	struct semid_ds *buf;
3916 	unsigned short *array;
3917 	struct seminfo *__buf;
3918 };
3919 
3920 union target_semun {
3921 	int val;
3922 	abi_ulong buf;
3923 	abi_ulong array;
3924 	abi_ulong __buf;
3925 };
3926 
3927 static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
3928                                                abi_ulong target_addr)
3929 {
3930     int nsems;
3931     unsigned short *array;
3932     union semun semun;
3933     struct semid_ds semid_ds;
3934     int i, ret;
3935 
3936     semun.buf = &semid_ds;
3937 
3938     ret = semctl(semid, 0, IPC_STAT, semun);
3939     if (ret == -1)
3940         return get_errno(ret);
3941 
3942     nsems = semid_ds.sem_nsems;
3943 
3944     *host_array = g_try_new(unsigned short, nsems);
3945     if (!*host_array) {
3946         return -TARGET_ENOMEM;
3947     }
3948     array = lock_user(VERIFY_READ, target_addr,
3949                       nsems*sizeof(unsigned short), 1);
3950     if (!array) {
3951         g_free(*host_array);
3952         return -TARGET_EFAULT;
3953     }
3954 
3955     for(i=0; i<nsems; i++) {
3956         __get_user((*host_array)[i], &array[i]);
3957     }
3958     unlock_user(array, target_addr, 0);
3959 
3960     return 0;
3961 }
3962 
3963 static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
3964                                                unsigned short **host_array)
3965 {
3966     int nsems;
3967     unsigned short *array;
3968     union semun semun;
3969     struct semid_ds semid_ds;
3970     int i, ret;
3971 
3972     semun.buf = &semid_ds;
3973 
3974     ret = semctl(semid, 0, IPC_STAT, semun);
3975     if (ret == -1)
3976         return get_errno(ret);
3977 
3978     nsems = semid_ds.sem_nsems;
3979 
3980     array = lock_user(VERIFY_WRITE, target_addr,
3981                       nsems*sizeof(unsigned short), 0);
3982     if (!array)
3983         return -TARGET_EFAULT;
3984 
3985     for(i=0; i<nsems; i++) {
3986         __put_user((*host_array)[i], &array[i]);
3987     }
3988     g_free(*host_array);
3989     unlock_user(array, target_addr, 1);
3990 
3991     return 0;
3992 }
3993 
3994 static inline abi_long do_semctl(int semid, int semnum, int cmd,
3995                                  abi_ulong target_arg)
3996 {
3997     union target_semun target_su = { .buf = target_arg };
3998     union semun arg;
3999     struct semid_ds dsarg;
4000     unsigned short *array = NULL;
4001     struct seminfo seminfo;
4002     abi_long ret = -TARGET_EINVAL;
4003     abi_long err;
4004     cmd &= 0xff;
4005 
4006     switch( cmd ) {
4007 	case GETVAL:
4008 	case SETVAL:
4009             /* In 64 bit cross-endian situations, we will erroneously pick up
4010              * the wrong half of the union for the "val" element.  To rectify
4011              * this, the entire 8-byte structure is byteswapped, followed by
4012 	     * a swap of the 4 byte val field. In other cases, the data is
4013 	     * already in proper host byte order. */
4014 	    if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
4015 		target_su.buf = tswapal(target_su.buf);
4016 		arg.val = tswap32(target_su.val);
4017 	    } else {
4018 		arg.val = target_su.val;
4019 	    }
4020             ret = get_errno(semctl(semid, semnum, cmd, arg));
4021             break;
4022 	case GETALL:
4023 	case SETALL:
4024             err = target_to_host_semarray(semid, &array, target_su.array);
4025             if (err)
4026                 return err;
4027             arg.array = array;
4028             ret = get_errno(semctl(semid, semnum, cmd, arg));
4029             err = host_to_target_semarray(semid, target_su.array, &array);
4030             if (err)
4031                 return err;
4032             break;
4033 	case IPC_STAT:
4034 	case IPC_SET:
4035 	case SEM_STAT:
4036             err = target_to_host_semid_ds(&dsarg, target_su.buf);
4037             if (err)
4038                 return err;
4039             arg.buf = &dsarg;
4040             ret = get_errno(semctl(semid, semnum, cmd, arg));
4041             err = host_to_target_semid_ds(target_su.buf, &dsarg);
4042             if (err)
4043                 return err;
4044             break;
4045 	case IPC_INFO:
4046 	case SEM_INFO:
4047             arg.__buf = &seminfo;
4048             ret = get_errno(semctl(semid, semnum, cmd, arg));
4049             err = host_to_target_seminfo(target_su.__buf, &seminfo);
4050             if (err)
4051                 return err;
4052             break;
4053 	case IPC_RMID:
4054 	case GETPID:
4055 	case GETNCNT:
4056 	case GETZCNT:
4057             ret = get_errno(semctl(semid, semnum, cmd, NULL));
4058             break;
4059     }
4060 
4061     return ret;
4062 }
4063 
4064 struct target_sembuf {
4065     unsigned short sem_num;
4066     short sem_op;
4067     short sem_flg;
4068 };
4069 
4070 static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
4071                                              abi_ulong target_addr,
4072                                              unsigned nsops)
4073 {
4074     struct target_sembuf *target_sembuf;
4075     int i;
4076 
4077     target_sembuf = lock_user(VERIFY_READ, target_addr,
4078                               nsops*sizeof(struct target_sembuf), 1);
4079     if (!target_sembuf)
4080         return -TARGET_EFAULT;
4081 
4082     for(i=0; i<nsops; i++) {
4083         __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
4084         __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
4085         __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
4086     }
4087 
4088     unlock_user(target_sembuf, target_addr, 0);
4089 
4090     return 0;
4091 }
4092 
4093 #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
4094     defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
4095 
4096 /*
4097  * This macro is required to handle the s390 variants, which passes the
4098  * arguments in a different order than default.
4099  */
4100 #ifdef __s390x__
4101 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4102   (__nsops), (__timeout), (__sops)
4103 #else
4104 #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
4105   (__nsops), 0, (__sops), (__timeout)
4106 #endif
4107 
4108 static inline abi_long do_semtimedop(int semid,
4109                                      abi_long ptr,
4110                                      unsigned nsops,
4111                                      abi_long timeout, bool time64)
4112 {
4113     struct sembuf *sops;
4114     struct timespec ts, *pts = NULL;
4115     abi_long ret;
4116 
4117     if (timeout) {
4118         pts = &ts;
4119         if (time64) {
4120             if (target_to_host_timespec64(pts, timeout)) {
4121                 return -TARGET_EFAULT;
4122             }
4123         } else {
4124             if (target_to_host_timespec(pts, timeout)) {
4125                 return -TARGET_EFAULT;
4126             }
4127         }
4128     }
4129 
4130     if (nsops > TARGET_SEMOPM) {
4131         return -TARGET_E2BIG;
4132     }
4133 
4134     sops = g_new(struct sembuf, nsops);
4135 
4136     if (target_to_host_sembuf(sops, ptr, nsops)) {
4137         g_free(sops);
4138         return -TARGET_EFAULT;
4139     }
4140 
4141     ret = -TARGET_ENOSYS;
4142 #ifdef __NR_semtimedop
4143     ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
4144 #endif
4145 #ifdef __NR_ipc
4146     if (ret == -TARGET_ENOSYS) {
4147         ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
4148                                  SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
4149     }
4150 #endif
4151     g_free(sops);
4152     return ret;
4153 }
4154 #endif
4155 
4156 struct target_msqid_ds
4157 {
4158     struct target_ipc_perm msg_perm;
4159     abi_ulong msg_stime;
4160 #if TARGET_ABI_BITS == 32
4161     abi_ulong __unused1;
4162 #endif
4163     abi_ulong msg_rtime;
4164 #if TARGET_ABI_BITS == 32
4165     abi_ulong __unused2;
4166 #endif
4167     abi_ulong msg_ctime;
4168 #if TARGET_ABI_BITS == 32
4169     abi_ulong __unused3;
4170 #endif
4171     abi_ulong __msg_cbytes;
4172     abi_ulong msg_qnum;
4173     abi_ulong msg_qbytes;
4174     abi_ulong msg_lspid;
4175     abi_ulong msg_lrpid;
4176     abi_ulong __unused4;
4177     abi_ulong __unused5;
4178 };
4179 
4180 static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
4181                                                abi_ulong target_addr)
4182 {
4183     struct target_msqid_ds *target_md;
4184 
4185     if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
4186         return -TARGET_EFAULT;
4187     if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
4188         return -TARGET_EFAULT;
4189     host_md->msg_stime = tswapal(target_md->msg_stime);
4190     host_md->msg_rtime = tswapal(target_md->msg_rtime);
4191     host_md->msg_ctime = tswapal(target_md->msg_ctime);
4192     host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
4193     host_md->msg_qnum = tswapal(target_md->msg_qnum);
4194     host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
4195     host_md->msg_lspid = tswapal(target_md->msg_lspid);
4196     host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
4197     unlock_user_struct(target_md, target_addr, 0);
4198     return 0;
4199 }
4200 
4201 static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
4202                                                struct msqid_ds *host_md)
4203 {
4204     struct target_msqid_ds *target_md;
4205 
4206     if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
4207         return -TARGET_EFAULT;
4208     if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
4209         return -TARGET_EFAULT;
4210     target_md->msg_stime = tswapal(host_md->msg_stime);
4211     target_md->msg_rtime = tswapal(host_md->msg_rtime);
4212     target_md->msg_ctime = tswapal(host_md->msg_ctime);
4213     target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
4214     target_md->msg_qnum = tswapal(host_md->msg_qnum);
4215     target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
4216     target_md->msg_lspid = tswapal(host_md->msg_lspid);
4217     target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
4218     unlock_user_struct(target_md, target_addr, 1);
4219     return 0;
4220 }
4221 
4222 struct target_msginfo {
4223     int msgpool;
4224     int msgmap;
4225     int msgmax;
4226     int msgmnb;
4227     int msgmni;
4228     int msgssz;
4229     int msgtql;
4230     unsigned short int msgseg;
4231 };
4232 
4233 static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
4234                                               struct msginfo *host_msginfo)
4235 {
4236     struct target_msginfo *target_msginfo;
4237     if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
4238         return -TARGET_EFAULT;
4239     __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
4240     __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
4241     __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
4242     __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
4243     __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
4244     __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
4245     __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
4246     __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
4247     unlock_user_struct(target_msginfo, target_addr, 1);
4248     return 0;
4249 }
4250 
4251 static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
4252 {
4253     struct msqid_ds dsarg;
4254     struct msginfo msginfo;
4255     abi_long ret = -TARGET_EINVAL;
4256 
4257     cmd &= 0xff;
4258 
4259     switch (cmd) {
4260     case IPC_STAT:
4261     case IPC_SET:
4262     case MSG_STAT:
4263         if (target_to_host_msqid_ds(&dsarg,ptr))
4264             return -TARGET_EFAULT;
4265         ret = get_errno(msgctl(msgid, cmd, &dsarg));
4266         if (host_to_target_msqid_ds(ptr,&dsarg))
4267             return -TARGET_EFAULT;
4268         break;
4269     case IPC_RMID:
4270         ret = get_errno(msgctl(msgid, cmd, NULL));
4271         break;
4272     case IPC_INFO:
4273     case MSG_INFO:
4274         ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
4275         if (host_to_target_msginfo(ptr, &msginfo))
4276             return -TARGET_EFAULT;
4277         break;
4278     }
4279 
4280     return ret;
4281 }
4282 
4283 struct target_msgbuf {
4284     abi_long mtype;
4285     char	mtext[1];
4286 };
4287 
4288 static inline abi_long do_msgsnd(int msqid, abi_long msgp,
4289                                  ssize_t msgsz, int msgflg)
4290 {
4291     struct target_msgbuf *target_mb;
4292     struct msgbuf *host_mb;
4293     abi_long ret = 0;
4294 
4295     if (msgsz < 0) {
4296         return -TARGET_EINVAL;
4297     }
4298 
4299     if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
4300         return -TARGET_EFAULT;
4301     host_mb = g_try_malloc(msgsz + sizeof(long));
4302     if (!host_mb) {
4303         unlock_user_struct(target_mb, msgp, 0);
4304         return -TARGET_ENOMEM;
4305     }
4306     host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
4307     memcpy(host_mb->mtext, target_mb->mtext, msgsz);
4308     ret = -TARGET_ENOSYS;
4309 #ifdef __NR_msgsnd
4310     ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
4311 #endif
4312 #ifdef __NR_ipc
4313     if (ret == -TARGET_ENOSYS) {
4314 #ifdef __s390x__
4315         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4316                                  host_mb));
4317 #else
4318         ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
4319                                  host_mb, 0));
4320 #endif
4321     }
4322 #endif
4323     g_free(host_mb);
4324     unlock_user_struct(target_mb, msgp, 0);
4325 
4326     return ret;
4327 }
4328 
4329 #ifdef __NR_ipc
4330 #if defined(__sparc__)
4331 /* SPARC for msgrcv it does not use the kludge on final 2 arguments.  */
4332 #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
4333 #elif defined(__s390x__)
4334 /* The s390 sys_ipc variant has only five parameters.  */
4335 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4336     ((long int[]){(long int)__msgp, __msgtyp})
4337 #else
4338 #define MSGRCV_ARGS(__msgp, __msgtyp) \
4339     ((long int[]){(long int)__msgp, __msgtyp}), 0
4340 #endif
4341 #endif
4342 
4343 static inline abi_long do_msgrcv(int msqid, abi_long msgp,
4344                                  ssize_t msgsz, abi_long msgtyp,
4345                                  int msgflg)
4346 {
4347     struct target_msgbuf *target_mb;
4348     char *target_mtext;
4349     struct msgbuf *host_mb;
4350     abi_long ret = 0;
4351 
4352     if (msgsz < 0) {
4353         return -TARGET_EINVAL;
4354     }
4355 
4356     if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
4357         return -TARGET_EFAULT;
4358 
4359     host_mb = g_try_malloc(msgsz + sizeof(long));
4360     if (!host_mb) {
4361         ret = -TARGET_ENOMEM;
4362         goto end;
4363     }
4364     ret = -TARGET_ENOSYS;
4365 #ifdef __NR_msgrcv
4366     ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
4367 #endif
4368 #ifdef __NR_ipc
4369     if (ret == -TARGET_ENOSYS) {
4370         ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
4371                         msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
4372     }
4373 #endif
4374 
4375     if (ret > 0) {
4376         abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
4377         target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
4378         if (!target_mtext) {
4379             ret = -TARGET_EFAULT;
4380             goto end;
4381         }
4382         memcpy(target_mb->mtext, host_mb->mtext, ret);
4383         unlock_user(target_mtext, target_mtext_addr, ret);
4384     }
4385 
4386     target_mb->mtype = tswapal(host_mb->mtype);
4387 
4388 end:
4389     if (target_mb)
4390         unlock_user_struct(target_mb, msgp, 1);
4391     g_free(host_mb);
4392     return ret;
4393 }
4394 
4395 static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
4396                                                abi_ulong target_addr)
4397 {
4398     struct target_shmid_ds *target_sd;
4399 
4400     if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
4401         return -TARGET_EFAULT;
4402     if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
4403         return -TARGET_EFAULT;
4404     __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4405     __get_user(host_sd->shm_atime, &target_sd->shm_atime);
4406     __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4407     __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4408     __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4409     __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4410     __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4411     unlock_user_struct(target_sd, target_addr, 0);
4412     return 0;
4413 }
4414 
4415 static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
4416                                                struct shmid_ds *host_sd)
4417 {
4418     struct target_shmid_ds *target_sd;
4419 
4420     if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
4421         return -TARGET_EFAULT;
4422     if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
4423         return -TARGET_EFAULT;
4424     __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
4425     __put_user(host_sd->shm_atime, &target_sd->shm_atime);
4426     __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
4427     __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
4428     __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
4429     __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
4430     __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
4431     unlock_user_struct(target_sd, target_addr, 1);
4432     return 0;
4433 }
4434 
4435 struct  target_shminfo {
4436     abi_ulong shmmax;
4437     abi_ulong shmmin;
4438     abi_ulong shmmni;
4439     abi_ulong shmseg;
4440     abi_ulong shmall;
4441 };
4442 
4443 static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
4444                                               struct shminfo *host_shminfo)
4445 {
4446     struct target_shminfo *target_shminfo;
4447     if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
4448         return -TARGET_EFAULT;
4449     __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
4450     __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
4451     __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
4452     __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
4453     __put_user(host_shminfo->shmall, &target_shminfo->shmall);
4454     unlock_user_struct(target_shminfo, target_addr, 1);
4455     return 0;
4456 }
4457 
4458 struct target_shm_info {
4459     int used_ids;
4460     abi_ulong shm_tot;
4461     abi_ulong shm_rss;
4462     abi_ulong shm_swp;
4463     abi_ulong swap_attempts;
4464     abi_ulong swap_successes;
4465 };
4466 
4467 static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
4468                                                struct shm_info *host_shm_info)
4469 {
4470     struct target_shm_info *target_shm_info;
4471     if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
4472         return -TARGET_EFAULT;
4473     __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
4474     __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
4475     __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
4476     __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
4477     __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
4478     __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
4479     unlock_user_struct(target_shm_info, target_addr, 1);
4480     return 0;
4481 }
4482 
4483 static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
4484 {
4485     struct shmid_ds dsarg;
4486     struct shminfo shminfo;
4487     struct shm_info shm_info;
4488     abi_long ret = -TARGET_EINVAL;
4489 
4490     cmd &= 0xff;
4491 
4492     switch(cmd) {
4493     case IPC_STAT:
4494     case IPC_SET:
4495     case SHM_STAT:
4496         if (target_to_host_shmid_ds(&dsarg, buf))
4497             return -TARGET_EFAULT;
4498         ret = get_errno(shmctl(shmid, cmd, &dsarg));
4499         if (host_to_target_shmid_ds(buf, &dsarg))
4500             return -TARGET_EFAULT;
4501         break;
4502     case IPC_INFO:
4503         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
4504         if (host_to_target_shminfo(buf, &shminfo))
4505             return -TARGET_EFAULT;
4506         break;
4507     case SHM_INFO:
4508         ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
4509         if (host_to_target_shm_info(buf, &shm_info))
4510             return -TARGET_EFAULT;
4511         break;
4512     case IPC_RMID:
4513     case SHM_LOCK:
4514     case SHM_UNLOCK:
4515         ret = get_errno(shmctl(shmid, cmd, NULL));
4516         break;
4517     }
4518 
4519     return ret;
4520 }
4521 
4522 #ifndef TARGET_FORCE_SHMLBA
4523 /* For most architectures, SHMLBA is the same as the page size;
4524  * some architectures have larger values, in which case they should
4525  * define TARGET_FORCE_SHMLBA and provide a target_shmlba() function.
4526  * This corresponds to the kernel arch code defining __ARCH_FORCE_SHMLBA
4527  * and defining its own value for SHMLBA.
4528  *
4529  * The kernel also permits SHMLBA to be set by the architecture to a
4530  * value larger than the page size without setting __ARCH_FORCE_SHMLBA;
4531  * this means that addresses are rounded to the large size if
4532  * SHM_RND is set but addresses not aligned to that size are not rejected
4533  * as long as they are at least page-aligned. Since the only architecture
4534  * which uses this is ia64 this code doesn't provide for that oddity.
4535  */
4536 static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
4537 {
4538     return TARGET_PAGE_SIZE;
4539 }
4540 #endif
4541 
4542 static inline abi_ulong do_shmat(CPUArchState *cpu_env,
4543                                  int shmid, abi_ulong shmaddr, int shmflg)
4544 {
4545     CPUState *cpu = env_cpu(cpu_env);
4546     abi_long raddr;
4547     void *host_raddr;
4548     struct shmid_ds shm_info;
4549     int i,ret;
4550     abi_ulong shmlba;
4551 
4552     /* shmat pointers are always untagged */
4553 
4554     /* find out the length of the shared memory segment */
4555     ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
4556     if (is_error(ret)) {
4557         /* can't get length, bail out */
4558         return ret;
4559     }
4560 
4561     shmlba = target_shmlba(cpu_env);
4562 
4563     if (shmaddr & (shmlba - 1)) {
4564         if (shmflg & SHM_RND) {
4565             shmaddr &= ~(shmlba - 1);
4566         } else {
4567             return -TARGET_EINVAL;
4568         }
4569     }
4570     if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
4571         return -TARGET_EINVAL;
4572     }
4573 
4574     mmap_lock();
4575 
4576     /*
4577      * We're mapping shared memory, so ensure we generate code for parallel
4578      * execution and flush old translations.  This will work up to the level
4579      * supported by the host -- anything that requires EXCP_ATOMIC will not
4580      * be atomic with respect to an external process.
4581      */
4582     if (!(cpu->tcg_cflags & CF_PARALLEL)) {
4583         cpu->tcg_cflags |= CF_PARALLEL;
4584         tb_flush(cpu);
4585     }
4586 
4587     if (shmaddr)
4588         host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
4589     else {
4590         abi_ulong mmap_start;
4591 
4592         /* In order to use the host shmat, we need to honor host SHMLBA.  */
4593         mmap_start = mmap_find_vma(0, shm_info.shm_segsz, MAX(SHMLBA, shmlba));
4594 
4595         if (mmap_start == -1) {
4596             errno = ENOMEM;
4597             host_raddr = (void *)-1;
4598         } else
4599             host_raddr = shmat(shmid, g2h_untagged(mmap_start),
4600                                shmflg | SHM_REMAP);
4601     }
4602 
4603     if (host_raddr == (void *)-1) {
4604         mmap_unlock();
4605         return get_errno((long)host_raddr);
4606     }
4607     raddr=h2g((unsigned long)host_raddr);
4608 
4609     page_set_flags(raddr, raddr + shm_info.shm_segsz - 1,
4610                    PAGE_VALID | PAGE_RESET | PAGE_READ |
4611                    (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
4612 
4613     for (i = 0; i < N_SHM_REGIONS; i++) {
4614         if (!shm_regions[i].in_use) {
4615             shm_regions[i].in_use = true;
4616             shm_regions[i].start = raddr;
4617             shm_regions[i].size = shm_info.shm_segsz;
4618             break;
4619         }
4620     }
4621 
4622     mmap_unlock();
4623     return raddr;
4624 
4625 }
4626 
4627 static inline abi_long do_shmdt(abi_ulong shmaddr)
4628 {
4629     int i;
4630     abi_long rv;
4631 
4632     /* shmdt pointers are always untagged */
4633 
4634     mmap_lock();
4635 
4636     for (i = 0; i < N_SHM_REGIONS; ++i) {
4637         if (shm_regions[i].in_use && shm_regions[i].start == shmaddr) {
4638             shm_regions[i].in_use = false;
4639             page_set_flags(shmaddr, shmaddr + shm_regions[i].size - 1, 0);
4640             break;
4641         }
4642     }
4643     rv = get_errno(shmdt(g2h_untagged(shmaddr)));
4644 
4645     mmap_unlock();
4646 
4647     return rv;
4648 }
4649 
4650 #ifdef TARGET_NR_ipc
4651 /* ??? This only works with linear mappings.  */
4652 /* do_ipc() must return target values and target errnos. */
4653 static abi_long do_ipc(CPUArchState *cpu_env,
4654                        unsigned int call, abi_long first,
4655                        abi_long second, abi_long third,
4656                        abi_long ptr, abi_long fifth)
4657 {
4658     int version;
4659     abi_long ret = 0;
4660 
4661     version = call >> 16;
4662     call &= 0xffff;
4663 
4664     switch (call) {
4665     case IPCOP_semop:
4666         ret = do_semtimedop(first, ptr, second, 0, false);
4667         break;
4668     case IPCOP_semtimedop:
4669     /*
4670      * The s390 sys_ipc variant has only five parameters instead of six
4671      * (as for default variant) and the only difference is the handling of
4672      * SEMTIMEDOP where on s390 the third parameter is used as a pointer
4673      * to a struct timespec where the generic variant uses fifth parameter.
4674      */
4675 #if defined(TARGET_S390X)
4676         ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
4677 #else
4678         ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
4679 #endif
4680         break;
4681 
4682     case IPCOP_semget:
4683         ret = get_errno(semget(first, second, third));
4684         break;
4685 
4686     case IPCOP_semctl: {
4687         /* The semun argument to semctl is passed by value, so dereference the
4688          * ptr argument. */
4689         abi_ulong atptr;
4690         get_user_ual(atptr, ptr);
4691         ret = do_semctl(first, second, third, atptr);
4692         break;
4693     }
4694 
4695     case IPCOP_msgget:
4696         ret = get_errno(msgget(first, second));
4697         break;
4698 
4699     case IPCOP_msgsnd:
4700         ret = do_msgsnd(first, ptr, second, third);
4701         break;
4702 
4703     case IPCOP_msgctl:
4704         ret = do_msgctl(first, second, ptr);
4705         break;
4706 
4707     case IPCOP_msgrcv:
4708         switch (version) {
4709         case 0:
4710             {
4711                 struct target_ipc_kludge {
4712                     abi_long msgp;
4713                     abi_long msgtyp;
4714                 } *tmp;
4715 
4716                 if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
4717                     ret = -TARGET_EFAULT;
4718                     break;
4719                 }
4720 
4721                 ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
4722 
4723                 unlock_user_struct(tmp, ptr, 0);
4724                 break;
4725             }
4726         default:
4727             ret = do_msgrcv(first, ptr, second, fifth, third);
4728         }
4729         break;
4730 
4731     case IPCOP_shmat:
4732         switch (version) {
4733         default:
4734         {
4735             abi_ulong raddr;
4736             raddr = do_shmat(cpu_env, first, ptr, second);
4737             if (is_error(raddr))
4738                 return get_errno(raddr);
4739             if (put_user_ual(raddr, third))
4740                 return -TARGET_EFAULT;
4741             break;
4742         }
4743         case 1:
4744             ret = -TARGET_EINVAL;
4745             break;
4746         }
4747 	break;
4748     case IPCOP_shmdt:
4749         ret = do_shmdt(ptr);
4750 	break;
4751 
4752     case IPCOP_shmget:
4753 	/* IPC_* flag values are the same on all linux platforms */
4754 	ret = get_errno(shmget(first, second, third));
4755 	break;
4756 
4757 	/* IPC_* and SHM_* command values are the same on all linux platforms */
4758     case IPCOP_shmctl:
4759         ret = do_shmctl(first, second, ptr);
4760         break;
4761     default:
4762         qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
4763                       call, version);
4764 	ret = -TARGET_ENOSYS;
4765 	break;
4766     }
4767     return ret;
4768 }
4769 #endif
4770 
4771 /* kernel structure types definitions */
4772 
4773 #define STRUCT(name, ...) STRUCT_ ## name,
4774 #define STRUCT_SPECIAL(name) STRUCT_ ## name,
4775 enum {
4776 #include "syscall_types.h"
4777 STRUCT_MAX
4778 };
4779 #undef STRUCT
4780 #undef STRUCT_SPECIAL
4781 
4782 #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = {  __VA_ARGS__, TYPE_NULL };
4783 #define STRUCT_SPECIAL(name)
4784 #include "syscall_types.h"
4785 #undef STRUCT
4786 #undef STRUCT_SPECIAL
4787 
4788 #define MAX_STRUCT_SIZE 4096
4789 
4790 #ifdef CONFIG_FIEMAP
4791 /* So fiemap access checks don't overflow on 32 bit systems.
4792  * This is very slightly smaller than the limit imposed by
4793  * the underlying kernel.
4794  */
4795 #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap))  \
4796                             / sizeof(struct fiemap_extent))
4797 
4798 static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
4799                                        int fd, int cmd, abi_long arg)
4800 {
4801     /* The parameter for this ioctl is a struct fiemap followed
4802      * by an array of struct fiemap_extent whose size is set
4803      * in fiemap->fm_extent_count. The array is filled in by the
4804      * ioctl.
4805      */
4806     int target_size_in, target_size_out;
4807     struct fiemap *fm;
4808     const argtype *arg_type = ie->arg_type;
4809     const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
4810     void *argptr, *p;
4811     abi_long ret;
4812     int i, extent_size = thunk_type_size(extent_arg_type, 0);
4813     uint32_t outbufsz;
4814     int free_fm = 0;
4815 
4816     assert(arg_type[0] == TYPE_PTR);
4817     assert(ie->access == IOC_RW);
4818     arg_type++;
4819     target_size_in = thunk_type_size(arg_type, 0);
4820     argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
4821     if (!argptr) {
4822         return -TARGET_EFAULT;
4823     }
4824     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4825     unlock_user(argptr, arg, 0);
4826     fm = (struct fiemap *)buf_temp;
4827     if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
4828         return -TARGET_EINVAL;
4829     }
4830 
4831     outbufsz = sizeof (*fm) +
4832         (sizeof(struct fiemap_extent) * fm->fm_extent_count);
4833 
4834     if (outbufsz > MAX_STRUCT_SIZE) {
4835         /* We can't fit all the extents into the fixed size buffer.
4836          * Allocate one that is large enough and use it instead.
4837          */
4838         fm = g_try_malloc(outbufsz);
4839         if (!fm) {
4840             return -TARGET_ENOMEM;
4841         }
4842         memcpy(fm, buf_temp, sizeof(struct fiemap));
4843         free_fm = 1;
4844     }
4845     ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
4846     if (!is_error(ret)) {
4847         target_size_out = target_size_in;
4848         /* An extent_count of 0 means we were only counting the extents
4849          * so there are no structs to copy
4850          */
4851         if (fm->fm_extent_count != 0) {
4852             target_size_out += fm->fm_mapped_extents * extent_size;
4853         }
4854         argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
4855         if (!argptr) {
4856             ret = -TARGET_EFAULT;
4857         } else {
4858             /* Convert the struct fiemap */
4859             thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
4860             if (fm->fm_extent_count != 0) {
4861                 p = argptr + target_size_in;
4862                 /* ...and then all the struct fiemap_extents */
4863                 for (i = 0; i < fm->fm_mapped_extents; i++) {
4864                     thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
4865                                   THUNK_TARGET);
4866                     p += extent_size;
4867                 }
4868             }
4869             unlock_user(argptr, arg, target_size_out);
4870         }
4871     }
4872     if (free_fm) {
4873         g_free(fm);
4874     }
4875     return ret;
4876 }
4877 #endif
4878 
4879 static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
4880                                 int fd, int cmd, abi_long arg)
4881 {
4882     const argtype *arg_type = ie->arg_type;
4883     int target_size;
4884     void *argptr;
4885     int ret;
4886     struct ifconf *host_ifconf;
4887     uint32_t outbufsz;
4888     const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
4889     const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
4890     int target_ifreq_size;
4891     int nb_ifreq;
4892     int free_buf = 0;
4893     int i;
4894     int target_ifc_len;
4895     abi_long target_ifc_buf;
4896     int host_ifc_len;
4897     char *host_ifc_buf;
4898 
4899     assert(arg_type[0] == TYPE_PTR);
4900     assert(ie->access == IOC_RW);
4901 
4902     arg_type++;
4903     target_size = thunk_type_size(arg_type, 0);
4904 
4905     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
4906     if (!argptr)
4907         return -TARGET_EFAULT;
4908     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
4909     unlock_user(argptr, arg, 0);
4910 
4911     host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
4912     target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
4913     target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
4914 
4915     if (target_ifc_buf != 0) {
4916         target_ifc_len = host_ifconf->ifc_len;
4917         nb_ifreq = target_ifc_len / target_ifreq_size;
4918         host_ifc_len = nb_ifreq * sizeof(struct ifreq);
4919 
4920         outbufsz = sizeof(*host_ifconf) + host_ifc_len;
4921         if (outbufsz > MAX_STRUCT_SIZE) {
4922             /*
4923              * We can't fit all the extents into the fixed size buffer.
4924              * Allocate one that is large enough and use it instead.
4925              */
4926             host_ifconf = g_try_malloc(outbufsz);
4927             if (!host_ifconf) {
4928                 return -TARGET_ENOMEM;
4929             }
4930             memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
4931             free_buf = 1;
4932         }
4933         host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
4934 
4935         host_ifconf->ifc_len = host_ifc_len;
4936     } else {
4937       host_ifc_buf = NULL;
4938     }
4939     host_ifconf->ifc_buf = host_ifc_buf;
4940 
4941     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
4942     if (!is_error(ret)) {
4943 	/* convert host ifc_len to target ifc_len */
4944 
4945         nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
4946         target_ifc_len = nb_ifreq * target_ifreq_size;
4947         host_ifconf->ifc_len = target_ifc_len;
4948 
4949 	/* restore target ifc_buf */
4950 
4951         host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
4952 
4953 	/* copy struct ifconf to target user */
4954 
4955         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
4956         if (!argptr)
4957             return -TARGET_EFAULT;
4958         thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
4959         unlock_user(argptr, arg, target_size);
4960 
4961         if (target_ifc_buf != 0) {
4962             /* copy ifreq[] to target user */
4963             argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
4964             for (i = 0; i < nb_ifreq ; i++) {
4965                 thunk_convert(argptr + i * target_ifreq_size,
4966                               host_ifc_buf + i * sizeof(struct ifreq),
4967                               ifreq_arg_type, THUNK_TARGET);
4968             }
4969             unlock_user(argptr, target_ifc_buf, target_ifc_len);
4970         }
4971     }
4972 
4973     if (free_buf) {
4974         g_free(host_ifconf);
4975     }
4976 
4977     return ret;
4978 }
4979 
4980 #if defined(CONFIG_USBFS)
4981 #if HOST_LONG_BITS > 64
4982 #error USBDEVFS thunks do not support >64 bit hosts yet.
4983 #endif
4984 struct live_urb {
4985     uint64_t target_urb_adr;
4986     uint64_t target_buf_adr;
4987     char *target_buf_ptr;
4988     struct usbdevfs_urb host_urb;
4989 };
4990 
4991 static GHashTable *usbdevfs_urb_hashtable(void)
4992 {
4993     static GHashTable *urb_hashtable;
4994 
4995     if (!urb_hashtable) {
4996         urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
4997     }
4998     return urb_hashtable;
4999 }
5000 
5001 static void urb_hashtable_insert(struct live_urb *urb)
5002 {
5003     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5004     g_hash_table_insert(urb_hashtable, urb, urb);
5005 }
5006 
5007 static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
5008 {
5009     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5010     return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
5011 }
5012 
5013 static void urb_hashtable_remove(struct live_urb *urb)
5014 {
5015     GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
5016     g_hash_table_remove(urb_hashtable, urb);
5017 }
5018 
5019 static abi_long
5020 do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
5021                           int fd, int cmd, abi_long arg)
5022 {
5023     const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
5024     const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
5025     struct live_urb *lurb;
5026     void *argptr;
5027     uint64_t hurb;
5028     int target_size;
5029     uintptr_t target_urb_adr;
5030     abi_long ret;
5031 
5032     target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
5033 
5034     memset(buf_temp, 0, sizeof(uint64_t));
5035     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5036     if (is_error(ret)) {
5037         return ret;
5038     }
5039 
5040     memcpy(&hurb, buf_temp, sizeof(uint64_t));
5041     lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
5042     if (!lurb->target_urb_adr) {
5043         return -TARGET_EFAULT;
5044     }
5045     urb_hashtable_remove(lurb);
5046     unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
5047         lurb->host_urb.buffer_length);
5048     lurb->target_buf_ptr = NULL;
5049 
5050     /* restore the guest buffer pointer */
5051     lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
5052 
5053     /* update the guest urb struct */
5054     argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
5055     if (!argptr) {
5056         g_free(lurb);
5057         return -TARGET_EFAULT;
5058     }
5059     thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
5060     unlock_user(argptr, lurb->target_urb_adr, target_size);
5061 
5062     target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
5063     /* write back the urb handle */
5064     argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5065     if (!argptr) {
5066         g_free(lurb);
5067         return -TARGET_EFAULT;
5068     }
5069 
5070     /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
5071     target_urb_adr = lurb->target_urb_adr;
5072     thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
5073     unlock_user(argptr, arg, target_size);
5074 
5075     g_free(lurb);
5076     return ret;
5077 }
5078 
5079 static abi_long
5080 do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
5081                              uint8_t *buf_temp __attribute__((unused)),
5082                              int fd, int cmd, abi_long arg)
5083 {
5084     struct live_urb *lurb;
5085 
5086     /* map target address back to host URB with metadata. */
5087     lurb = urb_hashtable_lookup(arg);
5088     if (!lurb) {
5089         return -TARGET_EFAULT;
5090     }
5091     return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5092 }
5093 
5094 static abi_long
5095 do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
5096                             int fd, int cmd, abi_long arg)
5097 {
5098     const argtype *arg_type = ie->arg_type;
5099     int target_size;
5100     abi_long ret;
5101     void *argptr;
5102     int rw_dir;
5103     struct live_urb *lurb;
5104 
5105     /*
5106      * each submitted URB needs to map to a unique ID for the
5107      * kernel, and that unique ID needs to be a pointer to
5108      * host memory.  hence, we need to malloc for each URB.
5109      * isochronous transfers have a variable length struct.
5110      */
5111     arg_type++;
5112     target_size = thunk_type_size(arg_type, THUNK_TARGET);
5113 
5114     /* construct host copy of urb and metadata */
5115     lurb = g_try_new0(struct live_urb, 1);
5116     if (!lurb) {
5117         return -TARGET_ENOMEM;
5118     }
5119 
5120     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5121     if (!argptr) {
5122         g_free(lurb);
5123         return -TARGET_EFAULT;
5124     }
5125     thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
5126     unlock_user(argptr, arg, 0);
5127 
5128     lurb->target_urb_adr = arg;
5129     lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
5130 
5131     /* buffer space used depends on endpoint type so lock the entire buffer */
5132     /* control type urbs should check the buffer contents for true direction */
5133     rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
5134     lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
5135         lurb->host_urb.buffer_length, 1);
5136     if (lurb->target_buf_ptr == NULL) {
5137         g_free(lurb);
5138         return -TARGET_EFAULT;
5139     }
5140 
5141     /* update buffer pointer in host copy */
5142     lurb->host_urb.buffer = lurb->target_buf_ptr;
5143 
5144     ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
5145     if (is_error(ret)) {
5146         unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
5147         g_free(lurb);
5148     } else {
5149         urb_hashtable_insert(lurb);
5150     }
5151 
5152     return ret;
5153 }
5154 #endif /* CONFIG_USBFS */
5155 
5156 static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5157                             int cmd, abi_long arg)
5158 {
5159     void *argptr;
5160     struct dm_ioctl *host_dm;
5161     abi_long guest_data;
5162     uint32_t guest_data_size;
5163     int target_size;
5164     const argtype *arg_type = ie->arg_type;
5165     abi_long ret;
5166     void *big_buf = NULL;
5167     char *host_data;
5168 
5169     arg_type++;
5170     target_size = thunk_type_size(arg_type, 0);
5171     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5172     if (!argptr) {
5173         ret = -TARGET_EFAULT;
5174         goto out;
5175     }
5176     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5177     unlock_user(argptr, arg, 0);
5178 
5179     /* buf_temp is too small, so fetch things into a bigger buffer */
5180     big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
5181     memcpy(big_buf, buf_temp, target_size);
5182     buf_temp = big_buf;
5183     host_dm = big_buf;
5184 
5185     guest_data = arg + host_dm->data_start;
5186     if ((guest_data - arg) < 0) {
5187         ret = -TARGET_EINVAL;
5188         goto out;
5189     }
5190     guest_data_size = host_dm->data_size - host_dm->data_start;
5191     host_data = (char*)host_dm + host_dm->data_start;
5192 
5193     argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
5194     if (!argptr) {
5195         ret = -TARGET_EFAULT;
5196         goto out;
5197     }
5198 
5199     switch (ie->host_cmd) {
5200     case DM_REMOVE_ALL:
5201     case DM_LIST_DEVICES:
5202     case DM_DEV_CREATE:
5203     case DM_DEV_REMOVE:
5204     case DM_DEV_SUSPEND:
5205     case DM_DEV_STATUS:
5206     case DM_DEV_WAIT:
5207     case DM_TABLE_STATUS:
5208     case DM_TABLE_CLEAR:
5209     case DM_TABLE_DEPS:
5210     case DM_LIST_VERSIONS:
5211         /* no input data */
5212         break;
5213     case DM_DEV_RENAME:
5214     case DM_DEV_SET_GEOMETRY:
5215         /* data contains only strings */
5216         memcpy(host_data, argptr, guest_data_size);
5217         break;
5218     case DM_TARGET_MSG:
5219         memcpy(host_data, argptr, guest_data_size);
5220         *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
5221         break;
5222     case DM_TABLE_LOAD:
5223     {
5224         void *gspec = argptr;
5225         void *cur_data = host_data;
5226         const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5227         int spec_size = thunk_type_size(arg_type, 0);
5228         int i;
5229 
5230         for (i = 0; i < host_dm->target_count; i++) {
5231             struct dm_target_spec *spec = cur_data;
5232             uint32_t next;
5233             int slen;
5234 
5235             thunk_convert(spec, gspec, arg_type, THUNK_HOST);
5236             slen = strlen((char*)gspec + spec_size) + 1;
5237             next = spec->next;
5238             spec->next = sizeof(*spec) + slen;
5239             strcpy((char*)&spec[1], gspec + spec_size);
5240             gspec += next;
5241             cur_data += spec->next;
5242         }
5243         break;
5244     }
5245     default:
5246         ret = -TARGET_EINVAL;
5247         unlock_user(argptr, guest_data, 0);
5248         goto out;
5249     }
5250     unlock_user(argptr, guest_data, 0);
5251 
5252     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5253     if (!is_error(ret)) {
5254         guest_data = arg + host_dm->data_start;
5255         guest_data_size = host_dm->data_size - host_dm->data_start;
5256         argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
5257         switch (ie->host_cmd) {
5258         case DM_REMOVE_ALL:
5259         case DM_DEV_CREATE:
5260         case DM_DEV_REMOVE:
5261         case DM_DEV_RENAME:
5262         case DM_DEV_SUSPEND:
5263         case DM_DEV_STATUS:
5264         case DM_TABLE_LOAD:
5265         case DM_TABLE_CLEAR:
5266         case DM_TARGET_MSG:
5267         case DM_DEV_SET_GEOMETRY:
5268             /* no return data */
5269             break;
5270         case DM_LIST_DEVICES:
5271         {
5272             struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
5273             uint32_t remaining_data = guest_data_size;
5274             void *cur_data = argptr;
5275             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
5276             int nl_size = 12; /* can't use thunk_size due to alignment */
5277 
5278             while (1) {
5279                 uint32_t next = nl->next;
5280                 if (next) {
5281                     nl->next = nl_size + (strlen(nl->name) + 1);
5282                 }
5283                 if (remaining_data < nl->next) {
5284                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5285                     break;
5286                 }
5287                 thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
5288                 strcpy(cur_data + nl_size, nl->name);
5289                 cur_data += nl->next;
5290                 remaining_data -= nl->next;
5291                 if (!next) {
5292                     break;
5293                 }
5294                 nl = (void*)nl + next;
5295             }
5296             break;
5297         }
5298         case DM_DEV_WAIT:
5299         case DM_TABLE_STATUS:
5300         {
5301             struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
5302             void *cur_data = argptr;
5303             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
5304             int spec_size = thunk_type_size(arg_type, 0);
5305             int i;
5306 
5307             for (i = 0; i < host_dm->target_count; i++) {
5308                 uint32_t next = spec->next;
5309                 int slen = strlen((char*)&spec[1]) + 1;
5310                 spec->next = (cur_data - argptr) + spec_size + slen;
5311                 if (guest_data_size < spec->next) {
5312                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5313                     break;
5314                 }
5315                 thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
5316                 strcpy(cur_data + spec_size, (char*)&spec[1]);
5317                 cur_data = argptr + spec->next;
5318                 spec = (void*)host_dm + host_dm->data_start + next;
5319             }
5320             break;
5321         }
5322         case DM_TABLE_DEPS:
5323         {
5324             void *hdata = (void*)host_dm + host_dm->data_start;
5325             int count = *(uint32_t*)hdata;
5326             uint64_t *hdev = hdata + 8;
5327             uint64_t *gdev = argptr + 8;
5328             int i;
5329 
5330             *(uint32_t*)argptr = tswap32(count);
5331             for (i = 0; i < count; i++) {
5332                 *gdev = tswap64(*hdev);
5333                 gdev++;
5334                 hdev++;
5335             }
5336             break;
5337         }
5338         case DM_LIST_VERSIONS:
5339         {
5340             struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
5341             uint32_t remaining_data = guest_data_size;
5342             void *cur_data = argptr;
5343             const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
5344             int vers_size = thunk_type_size(arg_type, 0);
5345 
5346             while (1) {
5347                 uint32_t next = vers->next;
5348                 if (next) {
5349                     vers->next = vers_size + (strlen(vers->name) + 1);
5350                 }
5351                 if (remaining_data < vers->next) {
5352                     host_dm->flags |= DM_BUFFER_FULL_FLAG;
5353                     break;
5354                 }
5355                 thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
5356                 strcpy(cur_data + vers_size, vers->name);
5357                 cur_data += vers->next;
5358                 remaining_data -= vers->next;
5359                 if (!next) {
5360                     break;
5361                 }
5362                 vers = (void*)vers + next;
5363             }
5364             break;
5365         }
5366         default:
5367             unlock_user(argptr, guest_data, 0);
5368             ret = -TARGET_EINVAL;
5369             goto out;
5370         }
5371         unlock_user(argptr, guest_data, guest_data_size);
5372 
5373         argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5374         if (!argptr) {
5375             ret = -TARGET_EFAULT;
5376             goto out;
5377         }
5378         thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5379         unlock_user(argptr, arg, target_size);
5380     }
5381 out:
5382     g_free(big_buf);
5383     return ret;
5384 }
5385 
5386 static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
5387                                int cmd, abi_long arg)
5388 {
5389     void *argptr;
5390     int target_size;
5391     const argtype *arg_type = ie->arg_type;
5392     const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
5393     abi_long ret;
5394 
5395     struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
5396     struct blkpg_partition host_part;
5397 
5398     /* Read and convert blkpg */
5399     arg_type++;
5400     target_size = thunk_type_size(arg_type, 0);
5401     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5402     if (!argptr) {
5403         ret = -TARGET_EFAULT;
5404         goto out;
5405     }
5406     thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5407     unlock_user(argptr, arg, 0);
5408 
5409     switch (host_blkpg->op) {
5410     case BLKPG_ADD_PARTITION:
5411     case BLKPG_DEL_PARTITION:
5412         /* payload is struct blkpg_partition */
5413         break;
5414     default:
5415         /* Unknown opcode */
5416         ret = -TARGET_EINVAL;
5417         goto out;
5418     }
5419 
5420     /* Read and convert blkpg->data */
5421     arg = (abi_long)(uintptr_t)host_blkpg->data;
5422     target_size = thunk_type_size(part_arg_type, 0);
5423     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5424     if (!argptr) {
5425         ret = -TARGET_EFAULT;
5426         goto out;
5427     }
5428     thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
5429     unlock_user(argptr, arg, 0);
5430 
5431     /* Swizzle the data pointer to our local copy and call! */
5432     host_blkpg->data = &host_part;
5433     ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
5434 
5435 out:
5436     return ret;
5437 }
5438 
5439 static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
5440                                 int fd, int cmd, abi_long arg)
5441 {
5442     const argtype *arg_type = ie->arg_type;
5443     const StructEntry *se;
5444     const argtype *field_types;
5445     const int *dst_offsets, *src_offsets;
5446     int target_size;
5447     void *argptr;
5448     abi_ulong *target_rt_dev_ptr = NULL;
5449     unsigned long *host_rt_dev_ptr = NULL;
5450     abi_long ret;
5451     int i;
5452 
5453     assert(ie->access == IOC_W);
5454     assert(*arg_type == TYPE_PTR);
5455     arg_type++;
5456     assert(*arg_type == TYPE_STRUCT);
5457     target_size = thunk_type_size(arg_type, 0);
5458     argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5459     if (!argptr) {
5460         return -TARGET_EFAULT;
5461     }
5462     arg_type++;
5463     assert(*arg_type == (int)STRUCT_rtentry);
5464     se = struct_entries + *arg_type++;
5465     assert(se->convert[0] == NULL);
5466     /* convert struct here to be able to catch rt_dev string */
5467     field_types = se->field_types;
5468     dst_offsets = se->field_offsets[THUNK_HOST];
5469     src_offsets = se->field_offsets[THUNK_TARGET];
5470     for (i = 0; i < se->nb_fields; i++) {
5471         if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
5472             assert(*field_types == TYPE_PTRVOID);
5473             target_rt_dev_ptr = argptr + src_offsets[i];
5474             host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
5475             if (*target_rt_dev_ptr != 0) {
5476                 *host_rt_dev_ptr = (unsigned long)lock_user_string(
5477                                                   tswapal(*target_rt_dev_ptr));
5478                 if (!*host_rt_dev_ptr) {
5479                     unlock_user(argptr, arg, 0);
5480                     return -TARGET_EFAULT;
5481                 }
5482             } else {
5483                 *host_rt_dev_ptr = 0;
5484             }
5485             field_types++;
5486             continue;
5487         }
5488         field_types = thunk_convert(buf_temp + dst_offsets[i],
5489                                     argptr + src_offsets[i],
5490                                     field_types, THUNK_HOST);
5491     }
5492     unlock_user(argptr, arg, 0);
5493 
5494     ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5495 
5496     assert(host_rt_dev_ptr != NULL);
5497     assert(target_rt_dev_ptr != NULL);
5498     if (*host_rt_dev_ptr != 0) {
5499         unlock_user((void *)*host_rt_dev_ptr,
5500                     *target_rt_dev_ptr, 0);
5501     }
5502     return ret;
5503 }
5504 
5505 static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
5506                                      int fd, int cmd, abi_long arg)
5507 {
5508     int sig = target_to_host_signal(arg);
5509     return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
5510 }
5511 
5512 static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
5513                                     int fd, int cmd, abi_long arg)
5514 {
5515     struct timeval tv;
5516     abi_long ret;
5517 
5518     ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
5519     if (is_error(ret)) {
5520         return ret;
5521     }
5522 
5523     if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
5524         if (copy_to_user_timeval(arg, &tv)) {
5525             return -TARGET_EFAULT;
5526         }
5527     } else {
5528         if (copy_to_user_timeval64(arg, &tv)) {
5529             return -TARGET_EFAULT;
5530         }
5531     }
5532 
5533     return ret;
5534 }
5535 
5536 static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
5537                                       int fd, int cmd, abi_long arg)
5538 {
5539     struct timespec ts;
5540     abi_long ret;
5541 
5542     ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
5543     if (is_error(ret)) {
5544         return ret;
5545     }
5546 
5547     if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
5548         if (host_to_target_timespec(arg, &ts)) {
5549             return -TARGET_EFAULT;
5550         }
5551     } else{
5552         if (host_to_target_timespec64(arg, &ts)) {
5553             return -TARGET_EFAULT;
5554         }
5555     }
5556 
5557     return ret;
5558 }
5559 
5560 #ifdef TIOCGPTPEER
5561 static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
5562                                      int fd, int cmd, abi_long arg)
5563 {
5564     int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
5565     return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
5566 }
5567 #endif
5568 
5569 #ifdef HAVE_DRM_H
5570 
5571 static void unlock_drm_version(struct drm_version *host_ver,
5572                                struct target_drm_version *target_ver,
5573                                bool copy)
5574 {
5575     unlock_user(host_ver->name, target_ver->name,
5576                                 copy ? host_ver->name_len : 0);
5577     unlock_user(host_ver->date, target_ver->date,
5578                                 copy ? host_ver->date_len : 0);
5579     unlock_user(host_ver->desc, target_ver->desc,
5580                                 copy ? host_ver->desc_len : 0);
5581 }
5582 
5583 static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
5584                                           struct target_drm_version *target_ver)
5585 {
5586     memset(host_ver, 0, sizeof(*host_ver));
5587 
5588     __get_user(host_ver->name_len, &target_ver->name_len);
5589     if (host_ver->name_len) {
5590         host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
5591                                    target_ver->name_len, 0);
5592         if (!host_ver->name) {
5593             return -EFAULT;
5594         }
5595     }
5596 
5597     __get_user(host_ver->date_len, &target_ver->date_len);
5598     if (host_ver->date_len) {
5599         host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
5600                                    target_ver->date_len, 0);
5601         if (!host_ver->date) {
5602             goto err;
5603         }
5604     }
5605 
5606     __get_user(host_ver->desc_len, &target_ver->desc_len);
5607     if (host_ver->desc_len) {
5608         host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
5609                                    target_ver->desc_len, 0);
5610         if (!host_ver->desc) {
5611             goto err;
5612         }
5613     }
5614 
5615     return 0;
5616 err:
5617     unlock_drm_version(host_ver, target_ver, false);
5618     return -EFAULT;
5619 }
5620 
5621 static inline void host_to_target_drmversion(
5622                                           struct target_drm_version *target_ver,
5623                                           struct drm_version *host_ver)
5624 {
5625     __put_user(host_ver->version_major, &target_ver->version_major);
5626     __put_user(host_ver->version_minor, &target_ver->version_minor);
5627     __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
5628     __put_user(host_ver->name_len, &target_ver->name_len);
5629     __put_user(host_ver->date_len, &target_ver->date_len);
5630     __put_user(host_ver->desc_len, &target_ver->desc_len);
5631     unlock_drm_version(host_ver, target_ver, true);
5632 }
5633 
5634 static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
5635                              int fd, int cmd, abi_long arg)
5636 {
5637     struct drm_version *ver;
5638     struct target_drm_version *target_ver;
5639     abi_long ret;
5640 
5641     switch (ie->host_cmd) {
5642     case DRM_IOCTL_VERSION:
5643         if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
5644             return -TARGET_EFAULT;
5645         }
5646         ver = (struct drm_version *)buf_temp;
5647         ret = target_to_host_drmversion(ver, target_ver);
5648         if (!is_error(ret)) {
5649             ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
5650             if (is_error(ret)) {
5651                 unlock_drm_version(ver, target_ver, false);
5652             } else {
5653                 host_to_target_drmversion(target_ver, ver);
5654             }
5655         }
5656         unlock_user_struct(target_ver, arg, 0);
5657         return ret;
5658     }
5659     return -TARGET_ENOSYS;
5660 }
5661 
5662 static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
5663                                            struct drm_i915_getparam *gparam,
5664                                            int fd, abi_long arg)
5665 {
5666     abi_long ret;
5667     int value;
5668     struct target_drm_i915_getparam *target_gparam;
5669 
5670     if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
5671         return -TARGET_EFAULT;
5672     }
5673 
5674     __get_user(gparam->param, &target_gparam->param);
5675     gparam->value = &value;
5676     ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
5677     put_user_s32(value, target_gparam->value);
5678 
5679     unlock_user_struct(target_gparam, arg, 0);
5680     return ret;
5681 }
5682 
5683 static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
5684                                   int fd, int cmd, abi_long arg)
5685 {
5686     switch (ie->host_cmd) {
5687     case DRM_IOCTL_I915_GETPARAM:
5688         return do_ioctl_drm_i915_getparam(ie,
5689                                           (struct drm_i915_getparam *)buf_temp,
5690                                           fd, arg);
5691     default:
5692         return -TARGET_ENOSYS;
5693     }
5694 }
5695 
5696 #endif
5697 
5698 static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
5699                                         int fd, int cmd, abi_long arg)
5700 {
5701     struct tun_filter *filter = (struct tun_filter *)buf_temp;
5702     struct tun_filter *target_filter;
5703     char *target_addr;
5704 
5705     assert(ie->access == IOC_W);
5706 
5707     target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
5708     if (!target_filter) {
5709         return -TARGET_EFAULT;
5710     }
5711     filter->flags = tswap16(target_filter->flags);
5712     filter->count = tswap16(target_filter->count);
5713     unlock_user(target_filter, arg, 0);
5714 
5715     if (filter->count) {
5716         if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
5717             MAX_STRUCT_SIZE) {
5718             return -TARGET_EFAULT;
5719         }
5720 
5721         target_addr = lock_user(VERIFY_READ,
5722                                 arg + offsetof(struct tun_filter, addr),
5723                                 filter->count * ETH_ALEN, 1);
5724         if (!target_addr) {
5725             return -TARGET_EFAULT;
5726         }
5727         memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
5728         unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
5729     }
5730 
5731     return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
5732 }
5733 
5734 IOCTLEntry ioctl_entries[] = {
5735 #define IOCTL(cmd, access, ...) \
5736     { TARGET_ ## cmd, cmd, #cmd, access, 0, {  __VA_ARGS__ } },
5737 #define IOCTL_SPECIAL(cmd, access, dofn, ...)                      \
5738     { TARGET_ ## cmd, cmd, #cmd, access, dofn, {  __VA_ARGS__ } },
5739 #define IOCTL_IGNORE(cmd) \
5740     { TARGET_ ## cmd, 0, #cmd },
5741 #include "ioctls.h"
5742     { 0, 0, },
5743 };
5744 
5745 /* ??? Implement proper locking for ioctls.  */
5746 /* do_ioctl() Must return target values and target errnos. */
5747 static abi_long do_ioctl(int fd, int cmd, abi_long arg)
5748 {
5749     const IOCTLEntry *ie;
5750     const argtype *arg_type;
5751     abi_long ret;
5752     uint8_t buf_temp[MAX_STRUCT_SIZE];
5753     int target_size;
5754     void *argptr;
5755 
5756     ie = ioctl_entries;
5757     for(;;) {
5758         if (ie->target_cmd == 0) {
5759             qemu_log_mask(
5760                 LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
5761             return -TARGET_ENOTTY;
5762         }
5763         if (ie->target_cmd == cmd)
5764             break;
5765         ie++;
5766     }
5767     arg_type = ie->arg_type;
5768     if (ie->do_ioctl) {
5769         return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
5770     } else if (!ie->host_cmd) {
5771         /* Some architectures define BSD ioctls in their headers
5772            that are not implemented in Linux.  */
5773         return -TARGET_ENOTTY;
5774     }
5775 
5776     switch(arg_type[0]) {
5777     case TYPE_NULL:
5778         /* no argument */
5779         ret = get_errno(safe_ioctl(fd, ie->host_cmd));
5780         break;
5781     case TYPE_PTRVOID:
5782     case TYPE_INT:
5783     case TYPE_LONG:
5784     case TYPE_ULONG:
5785         ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
5786         break;
5787     case TYPE_PTR:
5788         arg_type++;
5789         target_size = thunk_type_size(arg_type, 0);
5790         switch(ie->access) {
5791         case IOC_R:
5792             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5793             if (!is_error(ret)) {
5794                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5795                 if (!argptr)
5796                     return -TARGET_EFAULT;
5797                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5798                 unlock_user(argptr, arg, target_size);
5799             }
5800             break;
5801         case IOC_W:
5802             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5803             if (!argptr)
5804                 return -TARGET_EFAULT;
5805             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5806             unlock_user(argptr, arg, 0);
5807             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5808             break;
5809         default:
5810         case IOC_RW:
5811             argptr = lock_user(VERIFY_READ, arg, target_size, 1);
5812             if (!argptr)
5813                 return -TARGET_EFAULT;
5814             thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
5815             unlock_user(argptr, arg, 0);
5816             ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
5817             if (!is_error(ret)) {
5818                 argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
5819                 if (!argptr)
5820                     return -TARGET_EFAULT;
5821                 thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
5822                 unlock_user(argptr, arg, target_size);
5823             }
5824             break;
5825         }
5826         break;
5827     default:
5828         qemu_log_mask(LOG_UNIMP,
5829                       "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
5830                       (long)cmd, arg_type[0]);
5831         ret = -TARGET_ENOTTY;
5832         break;
5833     }
5834     return ret;
5835 }
5836 
5837 static const bitmask_transtbl iflag_tbl[] = {
5838         { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
5839         { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
5840         { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
5841         { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
5842         { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
5843         { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
5844         { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
5845         { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
5846         { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
5847         { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
5848         { TARGET_IXON, TARGET_IXON, IXON, IXON },
5849         { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
5850         { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
5851         { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
5852         { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
5853         { 0, 0, 0, 0 }
5854 };
5855 
5856 static const bitmask_transtbl oflag_tbl[] = {
5857 	{ TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
5858 	{ TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
5859 	{ TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
5860 	{ TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
5861 	{ TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
5862 	{ TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
5863 	{ TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
5864 	{ TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
5865 	{ TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
5866 	{ TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
5867 	{ TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
5868 	{ TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
5869 	{ TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
5870 	{ TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
5871 	{ TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
5872 	{ TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
5873 	{ TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
5874 	{ TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
5875 	{ TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
5876 	{ TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
5877 	{ TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
5878 	{ TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
5879 	{ TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
5880 	{ TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
5881 	{ 0, 0, 0, 0 }
5882 };
5883 
5884 static const bitmask_transtbl cflag_tbl[] = {
5885 	{ TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
5886 	{ TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
5887 	{ TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
5888 	{ TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
5889 	{ TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
5890 	{ TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
5891 	{ TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
5892 	{ TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
5893 	{ TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
5894 	{ TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
5895 	{ TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
5896 	{ TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
5897 	{ TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
5898 	{ TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
5899 	{ TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
5900 	{ TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
5901 	{ TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
5902 	{ TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
5903 	{ TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
5904 	{ TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
5905 	{ TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
5906 	{ TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
5907 	{ TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
5908 	{ TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
5909 	{ TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
5910 	{ TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
5911 	{ TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
5912 	{ TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
5913 	{ TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
5914 	{ TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
5915 	{ TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
5916 	{ 0, 0, 0, 0 }
5917 };
5918 
5919 static const bitmask_transtbl lflag_tbl[] = {
5920   { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
5921   { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
5922   { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
5923   { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
5924   { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
5925   { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
5926   { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
5927   { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
5928   { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
5929   { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
5930   { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
5931   { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
5932   { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
5933   { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
5934   { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
5935   { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
5936   { 0, 0, 0, 0 }
5937 };
5938 
5939 static void target_to_host_termios (void *dst, const void *src)
5940 {
5941     struct host_termios *host = dst;
5942     const struct target_termios *target = src;
5943 
5944     host->c_iflag =
5945         target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
5946     host->c_oflag =
5947         target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
5948     host->c_cflag =
5949         target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
5950     host->c_lflag =
5951         target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
5952     host->c_line = target->c_line;
5953 
5954     memset(host->c_cc, 0, sizeof(host->c_cc));
5955     host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
5956     host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
5957     host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
5958     host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
5959     host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
5960     host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
5961     host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
5962     host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
5963     host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
5964     host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
5965     host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
5966     host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
5967     host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
5968     host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
5969     host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
5970     host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
5971     host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
5972 }
5973 
5974 static void host_to_target_termios (void *dst, const void *src)
5975 {
5976     struct target_termios *target = dst;
5977     const struct host_termios *host = src;
5978 
5979     target->c_iflag =
5980         tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
5981     target->c_oflag =
5982         tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
5983     target->c_cflag =
5984         tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
5985     target->c_lflag =
5986         tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
5987     target->c_line = host->c_line;
5988 
5989     memset(target->c_cc, 0, sizeof(target->c_cc));
5990     target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
5991     target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
5992     target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
5993     target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
5994     target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
5995     target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
5996     target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
5997     target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
5998     target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
5999     target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
6000     target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
6001     target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
6002     target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
6003     target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
6004     target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
6005     target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
6006     target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
6007 }
6008 
6009 static const StructEntry struct_termios_def = {
6010     .convert = { host_to_target_termios, target_to_host_termios },
6011     .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
6012     .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
6013     .print = print_termios,
6014 };
6015 
6016 static const bitmask_transtbl mmap_flags_tbl[] = {
6017     { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
6018     { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
6019     { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
6020     { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
6021       MAP_ANONYMOUS, MAP_ANONYMOUS },
6022     { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
6023       MAP_GROWSDOWN, MAP_GROWSDOWN },
6024     { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
6025       MAP_DENYWRITE, MAP_DENYWRITE },
6026     { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
6027       MAP_EXECUTABLE, MAP_EXECUTABLE },
6028     { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
6029     { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
6030       MAP_NORESERVE, MAP_NORESERVE },
6031     { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
6032     /* MAP_STACK had been ignored by the kernel for quite some time.
6033        Recognize it for the target insofar as we do not want to pass
6034        it through to the host.  */
6035     { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
6036     { 0, 0, 0, 0 }
6037 };
6038 
6039 /*
6040  * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
6041  *       TARGET_I386 is defined if TARGET_X86_64 is defined
6042  */
6043 #if defined(TARGET_I386)
6044 
6045 /* NOTE: there is really one LDT for all the threads */
6046 static uint8_t *ldt_table;
6047 
6048 static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
6049 {
6050     int size;
6051     void *p;
6052 
6053     if (!ldt_table)
6054         return 0;
6055     size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
6056     if (size > bytecount)
6057         size = bytecount;
6058     p = lock_user(VERIFY_WRITE, ptr, size, 0);
6059     if (!p)
6060         return -TARGET_EFAULT;
6061     /* ??? Should this by byteswapped?  */
6062     memcpy(p, ldt_table, size);
6063     unlock_user(p, ptr, size);
6064     return size;
6065 }
6066 
6067 /* XXX: add locking support */
6068 static abi_long write_ldt(CPUX86State *env,
6069                           abi_ulong ptr, unsigned long bytecount, int oldmode)
6070 {
6071     struct target_modify_ldt_ldt_s ldt_info;
6072     struct target_modify_ldt_ldt_s *target_ldt_info;
6073     int seg_32bit, contents, read_exec_only, limit_in_pages;
6074     int seg_not_present, useable, lm;
6075     uint32_t *lp, entry_1, entry_2;
6076 
6077     if (bytecount != sizeof(ldt_info))
6078         return -TARGET_EINVAL;
6079     if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
6080         return -TARGET_EFAULT;
6081     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6082     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6083     ldt_info.limit = tswap32(target_ldt_info->limit);
6084     ldt_info.flags = tswap32(target_ldt_info->flags);
6085     unlock_user_struct(target_ldt_info, ptr, 0);
6086 
6087     if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
6088         return -TARGET_EINVAL;
6089     seg_32bit = ldt_info.flags & 1;
6090     contents = (ldt_info.flags >> 1) & 3;
6091     read_exec_only = (ldt_info.flags >> 3) & 1;
6092     limit_in_pages = (ldt_info.flags >> 4) & 1;
6093     seg_not_present = (ldt_info.flags >> 5) & 1;
6094     useable = (ldt_info.flags >> 6) & 1;
6095 #ifdef TARGET_ABI32
6096     lm = 0;
6097 #else
6098     lm = (ldt_info.flags >> 7) & 1;
6099 #endif
6100     if (contents == 3) {
6101         if (oldmode)
6102             return -TARGET_EINVAL;
6103         if (seg_not_present == 0)
6104             return -TARGET_EINVAL;
6105     }
6106     /* allocate the LDT */
6107     if (!ldt_table) {
6108         env->ldt.base = target_mmap(0,
6109                                     TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
6110                                     PROT_READ|PROT_WRITE,
6111                                     MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
6112         if (env->ldt.base == -1)
6113             return -TARGET_ENOMEM;
6114         memset(g2h_untagged(env->ldt.base), 0,
6115                TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
6116         env->ldt.limit = 0xffff;
6117         ldt_table = g2h_untagged(env->ldt.base);
6118     }
6119 
6120     /* NOTE: same code as Linux kernel */
6121     /* Allow LDTs to be cleared by the user. */
6122     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6123         if (oldmode ||
6124             (contents == 0		&&
6125              read_exec_only == 1	&&
6126              seg_32bit == 0		&&
6127              limit_in_pages == 0	&&
6128              seg_not_present == 1	&&
6129              useable == 0 )) {
6130             entry_1 = 0;
6131             entry_2 = 0;
6132             goto install;
6133         }
6134     }
6135 
6136     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6137         (ldt_info.limit & 0x0ffff);
6138     entry_2 = (ldt_info.base_addr & 0xff000000) |
6139         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6140         (ldt_info.limit & 0xf0000) |
6141         ((read_exec_only ^ 1) << 9) |
6142         (contents << 10) |
6143         ((seg_not_present ^ 1) << 15) |
6144         (seg_32bit << 22) |
6145         (limit_in_pages << 23) |
6146         (lm << 21) |
6147         0x7000;
6148     if (!oldmode)
6149         entry_2 |= (useable << 20);
6150 
6151     /* Install the new entry ...  */
6152 install:
6153     lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
6154     lp[0] = tswap32(entry_1);
6155     lp[1] = tswap32(entry_2);
6156     return 0;
6157 }
6158 
6159 /* specific and weird i386 syscalls */
6160 static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
6161                               unsigned long bytecount)
6162 {
6163     abi_long ret;
6164 
6165     switch (func) {
6166     case 0:
6167         ret = read_ldt(ptr, bytecount);
6168         break;
6169     case 1:
6170         ret = write_ldt(env, ptr, bytecount, 1);
6171         break;
6172     case 0x11:
6173         ret = write_ldt(env, ptr, bytecount, 0);
6174         break;
6175     default:
6176         ret = -TARGET_ENOSYS;
6177         break;
6178     }
6179     return ret;
6180 }
6181 
6182 #if defined(TARGET_ABI32)
6183 abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
6184 {
6185     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6186     struct target_modify_ldt_ldt_s ldt_info;
6187     struct target_modify_ldt_ldt_s *target_ldt_info;
6188     int seg_32bit, contents, read_exec_only, limit_in_pages;
6189     int seg_not_present, useable, lm;
6190     uint32_t *lp, entry_1, entry_2;
6191     int i;
6192 
6193     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6194     if (!target_ldt_info)
6195         return -TARGET_EFAULT;
6196     ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
6197     ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
6198     ldt_info.limit = tswap32(target_ldt_info->limit);
6199     ldt_info.flags = tswap32(target_ldt_info->flags);
6200     if (ldt_info.entry_number == -1) {
6201         for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
6202             if (gdt_table[i] == 0) {
6203                 ldt_info.entry_number = i;
6204                 target_ldt_info->entry_number = tswap32(i);
6205                 break;
6206             }
6207         }
6208     }
6209     unlock_user_struct(target_ldt_info, ptr, 1);
6210 
6211     if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
6212         ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
6213            return -TARGET_EINVAL;
6214     seg_32bit = ldt_info.flags & 1;
6215     contents = (ldt_info.flags >> 1) & 3;
6216     read_exec_only = (ldt_info.flags >> 3) & 1;
6217     limit_in_pages = (ldt_info.flags >> 4) & 1;
6218     seg_not_present = (ldt_info.flags >> 5) & 1;
6219     useable = (ldt_info.flags >> 6) & 1;
6220 #ifdef TARGET_ABI32
6221     lm = 0;
6222 #else
6223     lm = (ldt_info.flags >> 7) & 1;
6224 #endif
6225 
6226     if (contents == 3) {
6227         if (seg_not_present == 0)
6228             return -TARGET_EINVAL;
6229     }
6230 
6231     /* NOTE: same code as Linux kernel */
6232     /* Allow LDTs to be cleared by the user. */
6233     if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
6234         if ((contents == 0             &&
6235              read_exec_only == 1       &&
6236              seg_32bit == 0            &&
6237              limit_in_pages == 0       &&
6238              seg_not_present == 1      &&
6239              useable == 0 )) {
6240             entry_1 = 0;
6241             entry_2 = 0;
6242             goto install;
6243         }
6244     }
6245 
6246     entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
6247         (ldt_info.limit & 0x0ffff);
6248     entry_2 = (ldt_info.base_addr & 0xff000000) |
6249         ((ldt_info.base_addr & 0x00ff0000) >> 16) |
6250         (ldt_info.limit & 0xf0000) |
6251         ((read_exec_only ^ 1) << 9) |
6252         (contents << 10) |
6253         ((seg_not_present ^ 1) << 15) |
6254         (seg_32bit << 22) |
6255         (limit_in_pages << 23) |
6256         (useable << 20) |
6257         (lm << 21) |
6258         0x7000;
6259 
6260     /* Install the new entry ...  */
6261 install:
6262     lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
6263     lp[0] = tswap32(entry_1);
6264     lp[1] = tswap32(entry_2);
6265     return 0;
6266 }
6267 
6268 static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
6269 {
6270     struct target_modify_ldt_ldt_s *target_ldt_info;
6271     uint64_t *gdt_table = g2h_untagged(env->gdt.base);
6272     uint32_t base_addr, limit, flags;
6273     int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
6274     int seg_not_present, useable, lm;
6275     uint32_t *lp, entry_1, entry_2;
6276 
6277     lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
6278     if (!target_ldt_info)
6279         return -TARGET_EFAULT;
6280     idx = tswap32(target_ldt_info->entry_number);
6281     if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
6282         idx > TARGET_GDT_ENTRY_TLS_MAX) {
6283         unlock_user_struct(target_ldt_info, ptr, 1);
6284         return -TARGET_EINVAL;
6285     }
6286     lp = (uint32_t *)(gdt_table + idx);
6287     entry_1 = tswap32(lp[0]);
6288     entry_2 = tswap32(lp[1]);
6289 
6290     read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
6291     contents = (entry_2 >> 10) & 3;
6292     seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
6293     seg_32bit = (entry_2 >> 22) & 1;
6294     limit_in_pages = (entry_2 >> 23) & 1;
6295     useable = (entry_2 >> 20) & 1;
6296 #ifdef TARGET_ABI32
6297     lm = 0;
6298 #else
6299     lm = (entry_2 >> 21) & 1;
6300 #endif
6301     flags = (seg_32bit << 0) | (contents << 1) |
6302         (read_exec_only << 3) | (limit_in_pages << 4) |
6303         (seg_not_present << 5) | (useable << 6) | (lm << 7);
6304     limit = (entry_1 & 0xffff) | (entry_2  & 0xf0000);
6305     base_addr = (entry_1 >> 16) |
6306         (entry_2 & 0xff000000) |
6307         ((entry_2 & 0xff) << 16);
6308     target_ldt_info->base_addr = tswapal(base_addr);
6309     target_ldt_info->limit = tswap32(limit);
6310     target_ldt_info->flags = tswap32(flags);
6311     unlock_user_struct(target_ldt_info, ptr, 1);
6312     return 0;
6313 }
6314 
6315 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6316 {
6317     return -TARGET_ENOSYS;
6318 }
6319 #else
6320 abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
6321 {
6322     abi_long ret = 0;
6323     abi_ulong val;
6324     int idx;
6325 
6326     switch(code) {
6327     case TARGET_ARCH_SET_GS:
6328     case TARGET_ARCH_SET_FS:
6329         if (code == TARGET_ARCH_SET_GS)
6330             idx = R_GS;
6331         else
6332             idx = R_FS;
6333         cpu_x86_load_seg(env, idx, 0);
6334         env->segs[idx].base = addr;
6335         break;
6336     case TARGET_ARCH_GET_GS:
6337     case TARGET_ARCH_GET_FS:
6338         if (code == TARGET_ARCH_GET_GS)
6339             idx = R_GS;
6340         else
6341             idx = R_FS;
6342         val = env->segs[idx].base;
6343         if (put_user(val, addr, abi_ulong))
6344             ret = -TARGET_EFAULT;
6345         break;
6346     default:
6347         ret = -TARGET_EINVAL;
6348         break;
6349     }
6350     return ret;
6351 }
6352 #endif /* defined(TARGET_ABI32 */
6353 #endif /* defined(TARGET_I386) */
6354 
6355 /*
6356  * These constants are generic.  Supply any that are missing from the host.
6357  */
6358 #ifndef PR_SET_NAME
6359 # define PR_SET_NAME    15
6360 # define PR_GET_NAME    16
6361 #endif
6362 #ifndef PR_SET_FP_MODE
6363 # define PR_SET_FP_MODE 45
6364 # define PR_GET_FP_MODE 46
6365 # define PR_FP_MODE_FR   (1 << 0)
6366 # define PR_FP_MODE_FRE  (1 << 1)
6367 #endif
6368 #ifndef PR_SVE_SET_VL
6369 # define PR_SVE_SET_VL  50
6370 # define PR_SVE_GET_VL  51
6371 # define PR_SVE_VL_LEN_MASK  0xffff
6372 # define PR_SVE_VL_INHERIT   (1 << 17)
6373 #endif
6374 #ifndef PR_PAC_RESET_KEYS
6375 # define PR_PAC_RESET_KEYS  54
6376 # define PR_PAC_APIAKEY   (1 << 0)
6377 # define PR_PAC_APIBKEY   (1 << 1)
6378 # define PR_PAC_APDAKEY   (1 << 2)
6379 # define PR_PAC_APDBKEY   (1 << 3)
6380 # define PR_PAC_APGAKEY   (1 << 4)
6381 #endif
6382 #ifndef PR_SET_TAGGED_ADDR_CTRL
6383 # define PR_SET_TAGGED_ADDR_CTRL 55
6384 # define PR_GET_TAGGED_ADDR_CTRL 56
6385 # define PR_TAGGED_ADDR_ENABLE  (1UL << 0)
6386 #endif
6387 #ifndef PR_MTE_TCF_SHIFT
6388 # define PR_MTE_TCF_SHIFT       1
6389 # define PR_MTE_TCF_NONE        (0UL << PR_MTE_TCF_SHIFT)
6390 # define PR_MTE_TCF_SYNC        (1UL << PR_MTE_TCF_SHIFT)
6391 # define PR_MTE_TCF_ASYNC       (2UL << PR_MTE_TCF_SHIFT)
6392 # define PR_MTE_TCF_MASK        (3UL << PR_MTE_TCF_SHIFT)
6393 # define PR_MTE_TAG_SHIFT       3
6394 # define PR_MTE_TAG_MASK        (0xffffUL << PR_MTE_TAG_SHIFT)
6395 #endif
6396 #ifndef PR_SET_IO_FLUSHER
6397 # define PR_SET_IO_FLUSHER 57
6398 # define PR_GET_IO_FLUSHER 58
6399 #endif
6400 #ifndef PR_SET_SYSCALL_USER_DISPATCH
6401 # define PR_SET_SYSCALL_USER_DISPATCH 59
6402 #endif
6403 #ifndef PR_SME_SET_VL
6404 # define PR_SME_SET_VL  63
6405 # define PR_SME_GET_VL  64
6406 # define PR_SME_VL_LEN_MASK  0xffff
6407 # define PR_SME_VL_INHERIT   (1 << 17)
6408 #endif
6409 
6410 #include "target_prctl.h"
6411 
6412 static abi_long do_prctl_inval0(CPUArchState *env)
6413 {
6414     return -TARGET_EINVAL;
6415 }
6416 
6417 static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
6418 {
6419     return -TARGET_EINVAL;
6420 }
6421 
6422 #ifndef do_prctl_get_fp_mode
6423 #define do_prctl_get_fp_mode do_prctl_inval0
6424 #endif
6425 #ifndef do_prctl_set_fp_mode
6426 #define do_prctl_set_fp_mode do_prctl_inval1
6427 #endif
6428 #ifndef do_prctl_sve_get_vl
6429 #define do_prctl_sve_get_vl do_prctl_inval0
6430 #endif
6431 #ifndef do_prctl_sve_set_vl
6432 #define do_prctl_sve_set_vl do_prctl_inval1
6433 #endif
6434 #ifndef do_prctl_reset_keys
6435 #define do_prctl_reset_keys do_prctl_inval1
6436 #endif
6437 #ifndef do_prctl_set_tagged_addr_ctrl
6438 #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
6439 #endif
6440 #ifndef do_prctl_get_tagged_addr_ctrl
6441 #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
6442 #endif
6443 #ifndef do_prctl_get_unalign
6444 #define do_prctl_get_unalign do_prctl_inval1
6445 #endif
6446 #ifndef do_prctl_set_unalign
6447 #define do_prctl_set_unalign do_prctl_inval1
6448 #endif
6449 #ifndef do_prctl_sme_get_vl
6450 #define do_prctl_sme_get_vl do_prctl_inval0
6451 #endif
6452 #ifndef do_prctl_sme_set_vl
6453 #define do_prctl_sme_set_vl do_prctl_inval1
6454 #endif
6455 
6456 static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
6457                          abi_long arg3, abi_long arg4, abi_long arg5)
6458 {
6459     abi_long ret;
6460 
6461     switch (option) {
6462     case PR_GET_PDEATHSIG:
6463         {
6464             int deathsig;
6465             ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
6466                                   arg3, arg4, arg5));
6467             if (!is_error(ret) &&
6468                 put_user_s32(host_to_target_signal(deathsig), arg2)) {
6469                 return -TARGET_EFAULT;
6470             }
6471             return ret;
6472         }
6473     case PR_SET_PDEATHSIG:
6474         return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
6475                                arg3, arg4, arg5));
6476     case PR_GET_NAME:
6477         {
6478             void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
6479             if (!name) {
6480                 return -TARGET_EFAULT;
6481             }
6482             ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
6483                                   arg3, arg4, arg5));
6484             unlock_user(name, arg2, 16);
6485             return ret;
6486         }
6487     case PR_SET_NAME:
6488         {
6489             void *name = lock_user(VERIFY_READ, arg2, 16, 1);
6490             if (!name) {
6491                 return -TARGET_EFAULT;
6492             }
6493             ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
6494                                   arg3, arg4, arg5));
6495             unlock_user(name, arg2, 0);
6496             return ret;
6497         }
6498     case PR_GET_FP_MODE:
6499         return do_prctl_get_fp_mode(env);
6500     case PR_SET_FP_MODE:
6501         return do_prctl_set_fp_mode(env, arg2);
6502     case PR_SVE_GET_VL:
6503         return do_prctl_sve_get_vl(env);
6504     case PR_SVE_SET_VL:
6505         return do_prctl_sve_set_vl(env, arg2);
6506     case PR_SME_GET_VL:
6507         return do_prctl_sme_get_vl(env);
6508     case PR_SME_SET_VL:
6509         return do_prctl_sme_set_vl(env, arg2);
6510     case PR_PAC_RESET_KEYS:
6511         if (arg3 || arg4 || arg5) {
6512             return -TARGET_EINVAL;
6513         }
6514         return do_prctl_reset_keys(env, arg2);
6515     case PR_SET_TAGGED_ADDR_CTRL:
6516         if (arg3 || arg4 || arg5) {
6517             return -TARGET_EINVAL;
6518         }
6519         return do_prctl_set_tagged_addr_ctrl(env, arg2);
6520     case PR_GET_TAGGED_ADDR_CTRL:
6521         if (arg2 || arg3 || arg4 || arg5) {
6522             return -TARGET_EINVAL;
6523         }
6524         return do_prctl_get_tagged_addr_ctrl(env);
6525 
6526     case PR_GET_UNALIGN:
6527         return do_prctl_get_unalign(env, arg2);
6528     case PR_SET_UNALIGN:
6529         return do_prctl_set_unalign(env, arg2);
6530 
6531     case PR_CAP_AMBIENT:
6532     case PR_CAPBSET_READ:
6533     case PR_CAPBSET_DROP:
6534     case PR_GET_DUMPABLE:
6535     case PR_SET_DUMPABLE:
6536     case PR_GET_KEEPCAPS:
6537     case PR_SET_KEEPCAPS:
6538     case PR_GET_SECUREBITS:
6539     case PR_SET_SECUREBITS:
6540     case PR_GET_TIMING:
6541     case PR_SET_TIMING:
6542     case PR_GET_TIMERSLACK:
6543     case PR_SET_TIMERSLACK:
6544     case PR_MCE_KILL:
6545     case PR_MCE_KILL_GET:
6546     case PR_GET_NO_NEW_PRIVS:
6547     case PR_SET_NO_NEW_PRIVS:
6548     case PR_GET_IO_FLUSHER:
6549     case PR_SET_IO_FLUSHER:
6550         /* Some prctl options have no pointer arguments and we can pass on. */
6551         return get_errno(prctl(option, arg2, arg3, arg4, arg5));
6552 
6553     case PR_GET_CHILD_SUBREAPER:
6554     case PR_SET_CHILD_SUBREAPER:
6555     case PR_GET_SPECULATION_CTRL:
6556     case PR_SET_SPECULATION_CTRL:
6557     case PR_GET_TID_ADDRESS:
6558         /* TODO */
6559         return -TARGET_EINVAL;
6560 
6561     case PR_GET_FPEXC:
6562     case PR_SET_FPEXC:
6563         /* Was used for SPE on PowerPC. */
6564         return -TARGET_EINVAL;
6565 
6566     case PR_GET_ENDIAN:
6567     case PR_SET_ENDIAN:
6568     case PR_GET_FPEMU:
6569     case PR_SET_FPEMU:
6570     case PR_SET_MM:
6571     case PR_GET_SECCOMP:
6572     case PR_SET_SECCOMP:
6573     case PR_SET_SYSCALL_USER_DISPATCH:
6574     case PR_GET_THP_DISABLE:
6575     case PR_SET_THP_DISABLE:
6576     case PR_GET_TSC:
6577     case PR_SET_TSC:
6578         /* Disable to prevent the target disabling stuff we need. */
6579         return -TARGET_EINVAL;
6580 
6581     default:
6582         qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
6583                       option);
6584         return -TARGET_EINVAL;
6585     }
6586 }
6587 
6588 #define NEW_STACK_SIZE 0x40000
6589 
6590 
6591 static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
6592 typedef struct {
6593     CPUArchState *env;
6594     pthread_mutex_t mutex;
6595     pthread_cond_t cond;
6596     pthread_t thread;
6597     uint32_t tid;
6598     abi_ulong child_tidptr;
6599     abi_ulong parent_tidptr;
6600     sigset_t sigmask;
6601 } new_thread_info;
6602 
6603 static void *clone_func(void *arg)
6604 {
6605     new_thread_info *info = arg;
6606     CPUArchState *env;
6607     CPUState *cpu;
6608     TaskState *ts;
6609 
6610     rcu_register_thread();
6611     tcg_register_thread();
6612     env = info->env;
6613     cpu = env_cpu(env);
6614     thread_cpu = cpu;
6615     ts = (TaskState *)cpu->opaque;
6616     info->tid = sys_gettid();
6617     task_settid(ts);
6618     if (info->child_tidptr)
6619         put_user_u32(info->tid, info->child_tidptr);
6620     if (info->parent_tidptr)
6621         put_user_u32(info->tid, info->parent_tidptr);
6622     qemu_guest_random_seed_thread_part2(cpu->random_seed);
6623     /* Enable signals.  */
6624     sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
6625     /* Signal to the parent that we're ready.  */
6626     pthread_mutex_lock(&info->mutex);
6627     pthread_cond_broadcast(&info->cond);
6628     pthread_mutex_unlock(&info->mutex);
6629     /* Wait until the parent has finished initializing the tls state.  */
6630     pthread_mutex_lock(&clone_lock);
6631     pthread_mutex_unlock(&clone_lock);
6632     cpu_loop(env);
6633     /* never exits */
6634     return NULL;
6635 }
6636 
6637 /* do_fork() Must return host values and target errnos (unlike most
6638    do_*() functions). */
6639 static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
6640                    abi_ulong parent_tidptr, target_ulong newtls,
6641                    abi_ulong child_tidptr)
6642 {
6643     CPUState *cpu = env_cpu(env);
6644     int ret;
6645     TaskState *ts;
6646     CPUState *new_cpu;
6647     CPUArchState *new_env;
6648     sigset_t sigmask;
6649 
6650     flags &= ~CLONE_IGNORED_FLAGS;
6651 
6652     /* Emulate vfork() with fork() */
6653     if (flags & CLONE_VFORK)
6654         flags &= ~(CLONE_VFORK | CLONE_VM);
6655 
6656     if (flags & CLONE_VM) {
6657         TaskState *parent_ts = (TaskState *)cpu->opaque;
6658         new_thread_info info;
6659         pthread_attr_t attr;
6660 
6661         if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
6662             (flags & CLONE_INVALID_THREAD_FLAGS)) {
6663             return -TARGET_EINVAL;
6664         }
6665 
6666         ts = g_new0(TaskState, 1);
6667         init_task_state(ts);
6668 
6669         /* Grab a mutex so that thread setup appears atomic.  */
6670         pthread_mutex_lock(&clone_lock);
6671 
6672         /*
6673          * If this is our first additional thread, we need to ensure we
6674          * generate code for parallel execution and flush old translations.
6675          * Do this now so that the copy gets CF_PARALLEL too.
6676          */
6677         if (!(cpu->tcg_cflags & CF_PARALLEL)) {
6678             cpu->tcg_cflags |= CF_PARALLEL;
6679             tb_flush(cpu);
6680         }
6681 
6682         /* we create a new CPU instance. */
6683         new_env = cpu_copy(env);
6684         /* Init regs that differ from the parent.  */
6685         cpu_clone_regs_child(new_env, newsp, flags);
6686         cpu_clone_regs_parent(env, flags);
6687         new_cpu = env_cpu(new_env);
6688         new_cpu->opaque = ts;
6689         ts->bprm = parent_ts->bprm;
6690         ts->info = parent_ts->info;
6691         ts->signal_mask = parent_ts->signal_mask;
6692 
6693         if (flags & CLONE_CHILD_CLEARTID) {
6694             ts->child_tidptr = child_tidptr;
6695         }
6696 
6697         if (flags & CLONE_SETTLS) {
6698             cpu_set_tls (new_env, newtls);
6699         }
6700 
6701         memset(&info, 0, sizeof(info));
6702         pthread_mutex_init(&info.mutex, NULL);
6703         pthread_mutex_lock(&info.mutex);
6704         pthread_cond_init(&info.cond, NULL);
6705         info.env = new_env;
6706         if (flags & CLONE_CHILD_SETTID) {
6707             info.child_tidptr = child_tidptr;
6708         }
6709         if (flags & CLONE_PARENT_SETTID) {
6710             info.parent_tidptr = parent_tidptr;
6711         }
6712 
6713         ret = pthread_attr_init(&attr);
6714         ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
6715         ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
6716         /* It is not safe to deliver signals until the child has finished
6717            initializing, so temporarily block all signals.  */
6718         sigfillset(&sigmask);
6719         sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
6720         cpu->random_seed = qemu_guest_random_seed_thread_part1();
6721 
6722         ret = pthread_create(&info.thread, &attr, clone_func, &info);
6723         /* TODO: Free new CPU state if thread creation failed.  */
6724 
6725         sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
6726         pthread_attr_destroy(&attr);
6727         if (ret == 0) {
6728             /* Wait for the child to initialize.  */
6729             pthread_cond_wait(&info.cond, &info.mutex);
6730             ret = info.tid;
6731         } else {
6732             ret = -1;
6733         }
6734         pthread_mutex_unlock(&info.mutex);
6735         pthread_cond_destroy(&info.cond);
6736         pthread_mutex_destroy(&info.mutex);
6737         pthread_mutex_unlock(&clone_lock);
6738     } else {
6739         /* if no CLONE_VM, we consider it is a fork */
6740         if (flags & CLONE_INVALID_FORK_FLAGS) {
6741             return -TARGET_EINVAL;
6742         }
6743 
6744         /* We can't support custom termination signals */
6745         if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
6746             return -TARGET_EINVAL;
6747         }
6748 
6749 #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
6750         if (flags & CLONE_PIDFD) {
6751             return -TARGET_EINVAL;
6752         }
6753 #endif
6754 
6755         /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
6756         if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
6757             return -TARGET_EINVAL;
6758         }
6759 
6760         if (block_signals()) {
6761             return -QEMU_ERESTARTSYS;
6762         }
6763 
6764         fork_start();
6765         ret = fork();
6766         if (ret == 0) {
6767             /* Child Process.  */
6768             cpu_clone_regs_child(env, newsp, flags);
6769             fork_end(1);
6770             /* There is a race condition here.  The parent process could
6771                theoretically read the TID in the child process before the child
6772                tid is set.  This would require using either ptrace
6773                (not implemented) or having *_tidptr to point at a shared memory
6774                mapping.  We can't repeat the spinlock hack used above because
6775                the child process gets its own copy of the lock.  */
6776             if (flags & CLONE_CHILD_SETTID)
6777                 put_user_u32(sys_gettid(), child_tidptr);
6778             if (flags & CLONE_PARENT_SETTID)
6779                 put_user_u32(sys_gettid(), parent_tidptr);
6780             ts = (TaskState *)cpu->opaque;
6781             if (flags & CLONE_SETTLS)
6782                 cpu_set_tls (env, newtls);
6783             if (flags & CLONE_CHILD_CLEARTID)
6784                 ts->child_tidptr = child_tidptr;
6785         } else {
6786             cpu_clone_regs_parent(env, flags);
6787             if (flags & CLONE_PIDFD) {
6788                 int pid_fd = 0;
6789 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
6790                 int pid_child = ret;
6791                 pid_fd = pidfd_open(pid_child, 0);
6792                 if (pid_fd >= 0) {
6793                         fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
6794                                                | FD_CLOEXEC);
6795                 } else {
6796                         pid_fd = 0;
6797                 }
6798 #endif
6799                 put_user_u32(pid_fd, parent_tidptr);
6800                 }
6801             fork_end(0);
6802         }
6803         g_assert(!cpu_in_exclusive_context(cpu));
6804     }
6805     return ret;
6806 }
6807 
6808 /* warning : doesn't handle linux specific flags... */
6809 static int target_to_host_fcntl_cmd(int cmd)
6810 {
6811     int ret;
6812 
6813     switch(cmd) {
6814     case TARGET_F_DUPFD:
6815     case TARGET_F_GETFD:
6816     case TARGET_F_SETFD:
6817     case TARGET_F_GETFL:
6818     case TARGET_F_SETFL:
6819     case TARGET_F_OFD_GETLK:
6820     case TARGET_F_OFD_SETLK:
6821     case TARGET_F_OFD_SETLKW:
6822         ret = cmd;
6823         break;
6824     case TARGET_F_GETLK:
6825         ret = F_GETLK64;
6826         break;
6827     case TARGET_F_SETLK:
6828         ret = F_SETLK64;
6829         break;
6830     case TARGET_F_SETLKW:
6831         ret = F_SETLKW64;
6832         break;
6833     case TARGET_F_GETOWN:
6834         ret = F_GETOWN;
6835         break;
6836     case TARGET_F_SETOWN:
6837         ret = F_SETOWN;
6838         break;
6839     case TARGET_F_GETSIG:
6840         ret = F_GETSIG;
6841         break;
6842     case TARGET_F_SETSIG:
6843         ret = F_SETSIG;
6844         break;
6845 #if TARGET_ABI_BITS == 32
6846     case TARGET_F_GETLK64:
6847         ret = F_GETLK64;
6848         break;
6849     case TARGET_F_SETLK64:
6850         ret = F_SETLK64;
6851         break;
6852     case TARGET_F_SETLKW64:
6853         ret = F_SETLKW64;
6854         break;
6855 #endif
6856     case TARGET_F_SETLEASE:
6857         ret = F_SETLEASE;
6858         break;
6859     case TARGET_F_GETLEASE:
6860         ret = F_GETLEASE;
6861         break;
6862 #ifdef F_DUPFD_CLOEXEC
6863     case TARGET_F_DUPFD_CLOEXEC:
6864         ret = F_DUPFD_CLOEXEC;
6865         break;
6866 #endif
6867     case TARGET_F_NOTIFY:
6868         ret = F_NOTIFY;
6869         break;
6870 #ifdef F_GETOWN_EX
6871     case TARGET_F_GETOWN_EX:
6872         ret = F_GETOWN_EX;
6873         break;
6874 #endif
6875 #ifdef F_SETOWN_EX
6876     case TARGET_F_SETOWN_EX:
6877         ret = F_SETOWN_EX;
6878         break;
6879 #endif
6880 #ifdef F_SETPIPE_SZ
6881     case TARGET_F_SETPIPE_SZ:
6882         ret = F_SETPIPE_SZ;
6883         break;
6884     case TARGET_F_GETPIPE_SZ:
6885         ret = F_GETPIPE_SZ;
6886         break;
6887 #endif
6888 #ifdef F_ADD_SEALS
6889     case TARGET_F_ADD_SEALS:
6890         ret = F_ADD_SEALS;
6891         break;
6892     case TARGET_F_GET_SEALS:
6893         ret = F_GET_SEALS;
6894         break;
6895 #endif
6896     default:
6897         ret = -TARGET_EINVAL;
6898         break;
6899     }
6900 
6901 #if defined(__powerpc64__)
6902     /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
6903      * is not supported by kernel. The glibc fcntl call actually adjusts
6904      * them to 5, 6 and 7 before making the syscall(). Since we make the
6905      * syscall directly, adjust to what is supported by the kernel.
6906      */
6907     if (ret >= F_GETLK64 && ret <= F_SETLKW64) {
6908         ret -= F_GETLK64 - 5;
6909     }
6910 #endif
6911 
6912     return ret;
6913 }
6914 
6915 #define FLOCK_TRANSTBL \
6916     switch (type) { \
6917     TRANSTBL_CONVERT(F_RDLCK); \
6918     TRANSTBL_CONVERT(F_WRLCK); \
6919     TRANSTBL_CONVERT(F_UNLCK); \
6920     }
6921 
6922 static int target_to_host_flock(int type)
6923 {
6924 #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
6925     FLOCK_TRANSTBL
6926 #undef  TRANSTBL_CONVERT
6927     return -TARGET_EINVAL;
6928 }
6929 
6930 static int host_to_target_flock(int type)
6931 {
6932 #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
6933     FLOCK_TRANSTBL
6934 #undef  TRANSTBL_CONVERT
6935     /* if we don't know how to convert the value coming
6936      * from the host we copy to the target field as-is
6937      */
6938     return type;
6939 }
6940 
6941 static inline abi_long copy_from_user_flock(struct flock64 *fl,
6942                                             abi_ulong target_flock_addr)
6943 {
6944     struct target_flock *target_fl;
6945     int l_type;
6946 
6947     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
6948         return -TARGET_EFAULT;
6949     }
6950 
6951     __get_user(l_type, &target_fl->l_type);
6952     l_type = target_to_host_flock(l_type);
6953     if (l_type < 0) {
6954         return l_type;
6955     }
6956     fl->l_type = l_type;
6957     __get_user(fl->l_whence, &target_fl->l_whence);
6958     __get_user(fl->l_start, &target_fl->l_start);
6959     __get_user(fl->l_len, &target_fl->l_len);
6960     __get_user(fl->l_pid, &target_fl->l_pid);
6961     unlock_user_struct(target_fl, target_flock_addr, 0);
6962     return 0;
6963 }
6964 
6965 static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
6966                                           const struct flock64 *fl)
6967 {
6968     struct target_flock *target_fl;
6969     short l_type;
6970 
6971     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
6972         return -TARGET_EFAULT;
6973     }
6974 
6975     l_type = host_to_target_flock(fl->l_type);
6976     __put_user(l_type, &target_fl->l_type);
6977     __put_user(fl->l_whence, &target_fl->l_whence);
6978     __put_user(fl->l_start, &target_fl->l_start);
6979     __put_user(fl->l_len, &target_fl->l_len);
6980     __put_user(fl->l_pid, &target_fl->l_pid);
6981     unlock_user_struct(target_fl, target_flock_addr, 1);
6982     return 0;
6983 }
6984 
6985 typedef abi_long from_flock64_fn(struct flock64 *fl, abi_ulong target_addr);
6986 typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock64 *fl);
6987 
6988 #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
6989 struct target_oabi_flock64 {
6990     abi_short l_type;
6991     abi_short l_whence;
6992     abi_llong l_start;
6993     abi_llong l_len;
6994     abi_int   l_pid;
6995 } QEMU_PACKED;
6996 
6997 static inline abi_long copy_from_user_oabi_flock64(struct flock64 *fl,
6998                                                    abi_ulong target_flock_addr)
6999 {
7000     struct target_oabi_flock64 *target_fl;
7001     int l_type;
7002 
7003     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7004         return -TARGET_EFAULT;
7005     }
7006 
7007     __get_user(l_type, &target_fl->l_type);
7008     l_type = target_to_host_flock(l_type);
7009     if (l_type < 0) {
7010         return l_type;
7011     }
7012     fl->l_type = l_type;
7013     __get_user(fl->l_whence, &target_fl->l_whence);
7014     __get_user(fl->l_start, &target_fl->l_start);
7015     __get_user(fl->l_len, &target_fl->l_len);
7016     __get_user(fl->l_pid, &target_fl->l_pid);
7017     unlock_user_struct(target_fl, target_flock_addr, 0);
7018     return 0;
7019 }
7020 
7021 static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
7022                                                  const struct flock64 *fl)
7023 {
7024     struct target_oabi_flock64 *target_fl;
7025     short l_type;
7026 
7027     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7028         return -TARGET_EFAULT;
7029     }
7030 
7031     l_type = host_to_target_flock(fl->l_type);
7032     __put_user(l_type, &target_fl->l_type);
7033     __put_user(fl->l_whence, &target_fl->l_whence);
7034     __put_user(fl->l_start, &target_fl->l_start);
7035     __put_user(fl->l_len, &target_fl->l_len);
7036     __put_user(fl->l_pid, &target_fl->l_pid);
7037     unlock_user_struct(target_fl, target_flock_addr, 1);
7038     return 0;
7039 }
7040 #endif
7041 
7042 static inline abi_long copy_from_user_flock64(struct flock64 *fl,
7043                                               abi_ulong target_flock_addr)
7044 {
7045     struct target_flock64 *target_fl;
7046     int l_type;
7047 
7048     if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
7049         return -TARGET_EFAULT;
7050     }
7051 
7052     __get_user(l_type, &target_fl->l_type);
7053     l_type = target_to_host_flock(l_type);
7054     if (l_type < 0) {
7055         return l_type;
7056     }
7057     fl->l_type = l_type;
7058     __get_user(fl->l_whence, &target_fl->l_whence);
7059     __get_user(fl->l_start, &target_fl->l_start);
7060     __get_user(fl->l_len, &target_fl->l_len);
7061     __get_user(fl->l_pid, &target_fl->l_pid);
7062     unlock_user_struct(target_fl, target_flock_addr, 0);
7063     return 0;
7064 }
7065 
7066 static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
7067                                             const struct flock64 *fl)
7068 {
7069     struct target_flock64 *target_fl;
7070     short l_type;
7071 
7072     if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
7073         return -TARGET_EFAULT;
7074     }
7075 
7076     l_type = host_to_target_flock(fl->l_type);
7077     __put_user(l_type, &target_fl->l_type);
7078     __put_user(fl->l_whence, &target_fl->l_whence);
7079     __put_user(fl->l_start, &target_fl->l_start);
7080     __put_user(fl->l_len, &target_fl->l_len);
7081     __put_user(fl->l_pid, &target_fl->l_pid);
7082     unlock_user_struct(target_fl, target_flock_addr, 1);
7083     return 0;
7084 }
7085 
7086 static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
7087 {
7088     struct flock64 fl64;
7089 #ifdef F_GETOWN_EX
7090     struct f_owner_ex fox;
7091     struct target_f_owner_ex *target_fox;
7092 #endif
7093     abi_long ret;
7094     int host_cmd = target_to_host_fcntl_cmd(cmd);
7095 
7096     if (host_cmd == -TARGET_EINVAL)
7097 	    return host_cmd;
7098 
7099     switch(cmd) {
7100     case TARGET_F_GETLK:
7101         ret = copy_from_user_flock(&fl64, arg);
7102         if (ret) {
7103             return ret;
7104         }
7105         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7106         if (ret == 0) {
7107             ret = copy_to_user_flock(arg, &fl64);
7108         }
7109         break;
7110 
7111     case TARGET_F_SETLK:
7112     case TARGET_F_SETLKW:
7113         ret = copy_from_user_flock(&fl64, arg);
7114         if (ret) {
7115             return ret;
7116         }
7117         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7118         break;
7119 
7120     case TARGET_F_GETLK64:
7121     case TARGET_F_OFD_GETLK:
7122         ret = copy_from_user_flock64(&fl64, arg);
7123         if (ret) {
7124             return ret;
7125         }
7126         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7127         if (ret == 0) {
7128             ret = copy_to_user_flock64(arg, &fl64);
7129         }
7130         break;
7131     case TARGET_F_SETLK64:
7132     case TARGET_F_SETLKW64:
7133     case TARGET_F_OFD_SETLK:
7134     case TARGET_F_OFD_SETLKW:
7135         ret = copy_from_user_flock64(&fl64, arg);
7136         if (ret) {
7137             return ret;
7138         }
7139         ret = get_errno(safe_fcntl(fd, host_cmd, &fl64));
7140         break;
7141 
7142     case TARGET_F_GETFL:
7143         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7144         if (ret >= 0) {
7145             ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
7146             /* tell 32-bit guests it uses largefile on 64-bit hosts: */
7147             if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
7148                 ret |= TARGET_O_LARGEFILE;
7149             }
7150         }
7151         break;
7152 
7153     case TARGET_F_SETFL:
7154         ret = get_errno(safe_fcntl(fd, host_cmd,
7155                                    target_to_host_bitmask(arg,
7156                                                           fcntl_flags_tbl)));
7157         break;
7158 
7159 #ifdef F_GETOWN_EX
7160     case TARGET_F_GETOWN_EX:
7161         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7162         if (ret >= 0) {
7163             if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
7164                 return -TARGET_EFAULT;
7165             target_fox->type = tswap32(fox.type);
7166             target_fox->pid = tswap32(fox.pid);
7167             unlock_user_struct(target_fox, arg, 1);
7168         }
7169         break;
7170 #endif
7171 
7172 #ifdef F_SETOWN_EX
7173     case TARGET_F_SETOWN_EX:
7174         if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
7175             return -TARGET_EFAULT;
7176         fox.type = tswap32(target_fox->type);
7177         fox.pid = tswap32(target_fox->pid);
7178         unlock_user_struct(target_fox, arg, 0);
7179         ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
7180         break;
7181 #endif
7182 
7183     case TARGET_F_SETSIG:
7184         ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
7185         break;
7186 
7187     case TARGET_F_GETSIG:
7188         ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
7189         break;
7190 
7191     case TARGET_F_SETOWN:
7192     case TARGET_F_GETOWN:
7193     case TARGET_F_SETLEASE:
7194     case TARGET_F_GETLEASE:
7195     case TARGET_F_SETPIPE_SZ:
7196     case TARGET_F_GETPIPE_SZ:
7197     case TARGET_F_ADD_SEALS:
7198     case TARGET_F_GET_SEALS:
7199         ret = get_errno(safe_fcntl(fd, host_cmd, arg));
7200         break;
7201 
7202     default:
7203         ret = get_errno(safe_fcntl(fd, cmd, arg));
7204         break;
7205     }
7206     return ret;
7207 }
7208 
7209 #ifdef USE_UID16
7210 
7211 static inline int high2lowuid(int uid)
7212 {
7213     if (uid > 65535)
7214         return 65534;
7215     else
7216         return uid;
7217 }
7218 
7219 static inline int high2lowgid(int gid)
7220 {
7221     if (gid > 65535)
7222         return 65534;
7223     else
7224         return gid;
7225 }
7226 
7227 static inline int low2highuid(int uid)
7228 {
7229     if ((int16_t)uid == -1)
7230         return -1;
7231     else
7232         return uid;
7233 }
7234 
7235 static inline int low2highgid(int gid)
7236 {
7237     if ((int16_t)gid == -1)
7238         return -1;
7239     else
7240         return gid;
7241 }
7242 static inline int tswapid(int id)
7243 {
7244     return tswap16(id);
7245 }
7246 
7247 #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
7248 
7249 #else /* !USE_UID16 */
7250 static inline int high2lowuid(int uid)
7251 {
7252     return uid;
7253 }
7254 static inline int high2lowgid(int gid)
7255 {
7256     return gid;
7257 }
7258 static inline int low2highuid(int uid)
7259 {
7260     return uid;
7261 }
7262 static inline int low2highgid(int gid)
7263 {
7264     return gid;
7265 }
7266 static inline int tswapid(int id)
7267 {
7268     return tswap32(id);
7269 }
7270 
7271 #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
7272 
7273 #endif /* USE_UID16 */
7274 
7275 /* We must do direct syscalls for setting UID/GID, because we want to
7276  * implement the Linux system call semantics of "change only for this thread",
7277  * not the libc/POSIX semantics of "change for all threads in process".
7278  * (See http://ewontfix.com/17/ for more details.)
7279  * We use the 32-bit version of the syscalls if present; if it is not
7280  * then either the host architecture supports 32-bit UIDs natively with
7281  * the standard syscall, or the 16-bit UID is the best we can do.
7282  */
7283 #ifdef __NR_setuid32
7284 #define __NR_sys_setuid __NR_setuid32
7285 #else
7286 #define __NR_sys_setuid __NR_setuid
7287 #endif
7288 #ifdef __NR_setgid32
7289 #define __NR_sys_setgid __NR_setgid32
7290 #else
7291 #define __NR_sys_setgid __NR_setgid
7292 #endif
7293 #ifdef __NR_setresuid32
7294 #define __NR_sys_setresuid __NR_setresuid32
7295 #else
7296 #define __NR_sys_setresuid __NR_setresuid
7297 #endif
7298 #ifdef __NR_setresgid32
7299 #define __NR_sys_setresgid __NR_setresgid32
7300 #else
7301 #define __NR_sys_setresgid __NR_setresgid
7302 #endif
7303 
7304 _syscall1(int, sys_setuid, uid_t, uid)
7305 _syscall1(int, sys_setgid, gid_t, gid)
7306 _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
7307 _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
7308 
7309 void syscall_init(void)
7310 {
7311     IOCTLEntry *ie;
7312     const argtype *arg_type;
7313     int size;
7314 
7315     thunk_init(STRUCT_MAX);
7316 
7317 #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
7318 #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
7319 #include "syscall_types.h"
7320 #undef STRUCT
7321 #undef STRUCT_SPECIAL
7322 
7323     /* we patch the ioctl size if necessary. We rely on the fact that
7324        no ioctl has all the bits at '1' in the size field */
7325     ie = ioctl_entries;
7326     while (ie->target_cmd != 0) {
7327         if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
7328             TARGET_IOC_SIZEMASK) {
7329             arg_type = ie->arg_type;
7330             if (arg_type[0] != TYPE_PTR) {
7331                 fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
7332                         ie->target_cmd);
7333                 exit(1);
7334             }
7335             arg_type++;
7336             size = thunk_type_size(arg_type, 0);
7337             ie->target_cmd = (ie->target_cmd &
7338                               ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
7339                 (size << TARGET_IOC_SIZESHIFT);
7340         }
7341 
7342         /* automatic consistency check if same arch */
7343 #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
7344     (defined(__x86_64__) && defined(TARGET_X86_64))
7345         if (unlikely(ie->target_cmd != ie->host_cmd)) {
7346             fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
7347                     ie->name, ie->target_cmd, ie->host_cmd);
7348         }
7349 #endif
7350         ie++;
7351     }
7352 }
7353 
7354 #ifdef TARGET_NR_truncate64
7355 static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
7356                                          abi_long arg2,
7357                                          abi_long arg3,
7358                                          abi_long arg4)
7359 {
7360     if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
7361         arg2 = arg3;
7362         arg3 = arg4;
7363     }
7364     return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
7365 }
7366 #endif
7367 
7368 #ifdef TARGET_NR_ftruncate64
7369 static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
7370                                           abi_long arg2,
7371                                           abi_long arg3,
7372                                           abi_long arg4)
7373 {
7374     if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
7375         arg2 = arg3;
7376         arg3 = arg4;
7377     }
7378     return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
7379 }
7380 #endif
7381 
7382 #if defined(TARGET_NR_timer_settime) || \
7383     (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
7384 static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
7385                                                  abi_ulong target_addr)
7386 {
7387     if (target_to_host_timespec(&host_its->it_interval, target_addr +
7388                                 offsetof(struct target_itimerspec,
7389                                          it_interval)) ||
7390         target_to_host_timespec(&host_its->it_value, target_addr +
7391                                 offsetof(struct target_itimerspec,
7392                                          it_value))) {
7393         return -TARGET_EFAULT;
7394     }
7395 
7396     return 0;
7397 }
7398 #endif
7399 
7400 #if defined(TARGET_NR_timer_settime64) || \
7401     (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
7402 static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
7403                                                    abi_ulong target_addr)
7404 {
7405     if (target_to_host_timespec64(&host_its->it_interval, target_addr +
7406                                   offsetof(struct target__kernel_itimerspec,
7407                                            it_interval)) ||
7408         target_to_host_timespec64(&host_its->it_value, target_addr +
7409                                   offsetof(struct target__kernel_itimerspec,
7410                                            it_value))) {
7411         return -TARGET_EFAULT;
7412     }
7413 
7414     return 0;
7415 }
7416 #endif
7417 
7418 #if ((defined(TARGET_NR_timerfd_gettime) || \
7419       defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
7420       defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
7421 static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
7422                                                  struct itimerspec *host_its)
7423 {
7424     if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7425                                                        it_interval),
7426                                 &host_its->it_interval) ||
7427         host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
7428                                                        it_value),
7429                                 &host_its->it_value)) {
7430         return -TARGET_EFAULT;
7431     }
7432     return 0;
7433 }
7434 #endif
7435 
7436 #if ((defined(TARGET_NR_timerfd_gettime64) || \
7437       defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
7438       defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
7439 static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
7440                                                    struct itimerspec *host_its)
7441 {
7442     if (host_to_target_timespec64(target_addr +
7443                                   offsetof(struct target__kernel_itimerspec,
7444                                            it_interval),
7445                                   &host_its->it_interval) ||
7446         host_to_target_timespec64(target_addr +
7447                                   offsetof(struct target__kernel_itimerspec,
7448                                            it_value),
7449                                   &host_its->it_value)) {
7450         return -TARGET_EFAULT;
7451     }
7452     return 0;
7453 }
7454 #endif
7455 
7456 #if defined(TARGET_NR_adjtimex) || \
7457     (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
7458 static inline abi_long target_to_host_timex(struct timex *host_tx,
7459                                             abi_long target_addr)
7460 {
7461     struct target_timex *target_tx;
7462 
7463     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7464         return -TARGET_EFAULT;
7465     }
7466 
7467     __get_user(host_tx->modes, &target_tx->modes);
7468     __get_user(host_tx->offset, &target_tx->offset);
7469     __get_user(host_tx->freq, &target_tx->freq);
7470     __get_user(host_tx->maxerror, &target_tx->maxerror);
7471     __get_user(host_tx->esterror, &target_tx->esterror);
7472     __get_user(host_tx->status, &target_tx->status);
7473     __get_user(host_tx->constant, &target_tx->constant);
7474     __get_user(host_tx->precision, &target_tx->precision);
7475     __get_user(host_tx->tolerance, &target_tx->tolerance);
7476     __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7477     __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7478     __get_user(host_tx->tick, &target_tx->tick);
7479     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7480     __get_user(host_tx->jitter, &target_tx->jitter);
7481     __get_user(host_tx->shift, &target_tx->shift);
7482     __get_user(host_tx->stabil, &target_tx->stabil);
7483     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7484     __get_user(host_tx->calcnt, &target_tx->calcnt);
7485     __get_user(host_tx->errcnt, &target_tx->errcnt);
7486     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7487     __get_user(host_tx->tai, &target_tx->tai);
7488 
7489     unlock_user_struct(target_tx, target_addr, 0);
7490     return 0;
7491 }
7492 
7493 static inline abi_long host_to_target_timex(abi_long target_addr,
7494                                             struct timex *host_tx)
7495 {
7496     struct target_timex *target_tx;
7497 
7498     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7499         return -TARGET_EFAULT;
7500     }
7501 
7502     __put_user(host_tx->modes, &target_tx->modes);
7503     __put_user(host_tx->offset, &target_tx->offset);
7504     __put_user(host_tx->freq, &target_tx->freq);
7505     __put_user(host_tx->maxerror, &target_tx->maxerror);
7506     __put_user(host_tx->esterror, &target_tx->esterror);
7507     __put_user(host_tx->status, &target_tx->status);
7508     __put_user(host_tx->constant, &target_tx->constant);
7509     __put_user(host_tx->precision, &target_tx->precision);
7510     __put_user(host_tx->tolerance, &target_tx->tolerance);
7511     __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
7512     __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
7513     __put_user(host_tx->tick, &target_tx->tick);
7514     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7515     __put_user(host_tx->jitter, &target_tx->jitter);
7516     __put_user(host_tx->shift, &target_tx->shift);
7517     __put_user(host_tx->stabil, &target_tx->stabil);
7518     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7519     __put_user(host_tx->calcnt, &target_tx->calcnt);
7520     __put_user(host_tx->errcnt, &target_tx->errcnt);
7521     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7522     __put_user(host_tx->tai, &target_tx->tai);
7523 
7524     unlock_user_struct(target_tx, target_addr, 1);
7525     return 0;
7526 }
7527 #endif
7528 
7529 
7530 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
7531 static inline abi_long target_to_host_timex64(struct timex *host_tx,
7532                                               abi_long target_addr)
7533 {
7534     struct target__kernel_timex *target_tx;
7535 
7536     if (copy_from_user_timeval64(&host_tx->time, target_addr +
7537                                  offsetof(struct target__kernel_timex,
7538                                           time))) {
7539         return -TARGET_EFAULT;
7540     }
7541 
7542     if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
7543         return -TARGET_EFAULT;
7544     }
7545 
7546     __get_user(host_tx->modes, &target_tx->modes);
7547     __get_user(host_tx->offset, &target_tx->offset);
7548     __get_user(host_tx->freq, &target_tx->freq);
7549     __get_user(host_tx->maxerror, &target_tx->maxerror);
7550     __get_user(host_tx->esterror, &target_tx->esterror);
7551     __get_user(host_tx->status, &target_tx->status);
7552     __get_user(host_tx->constant, &target_tx->constant);
7553     __get_user(host_tx->precision, &target_tx->precision);
7554     __get_user(host_tx->tolerance, &target_tx->tolerance);
7555     __get_user(host_tx->tick, &target_tx->tick);
7556     __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7557     __get_user(host_tx->jitter, &target_tx->jitter);
7558     __get_user(host_tx->shift, &target_tx->shift);
7559     __get_user(host_tx->stabil, &target_tx->stabil);
7560     __get_user(host_tx->jitcnt, &target_tx->jitcnt);
7561     __get_user(host_tx->calcnt, &target_tx->calcnt);
7562     __get_user(host_tx->errcnt, &target_tx->errcnt);
7563     __get_user(host_tx->stbcnt, &target_tx->stbcnt);
7564     __get_user(host_tx->tai, &target_tx->tai);
7565 
7566     unlock_user_struct(target_tx, target_addr, 0);
7567     return 0;
7568 }
7569 
7570 static inline abi_long host_to_target_timex64(abi_long target_addr,
7571                                               struct timex *host_tx)
7572 {
7573     struct target__kernel_timex *target_tx;
7574 
7575    if (copy_to_user_timeval64(target_addr +
7576                               offsetof(struct target__kernel_timex, time),
7577                               &host_tx->time)) {
7578         return -TARGET_EFAULT;
7579     }
7580 
7581     if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
7582         return -TARGET_EFAULT;
7583     }
7584 
7585     __put_user(host_tx->modes, &target_tx->modes);
7586     __put_user(host_tx->offset, &target_tx->offset);
7587     __put_user(host_tx->freq, &target_tx->freq);
7588     __put_user(host_tx->maxerror, &target_tx->maxerror);
7589     __put_user(host_tx->esterror, &target_tx->esterror);
7590     __put_user(host_tx->status, &target_tx->status);
7591     __put_user(host_tx->constant, &target_tx->constant);
7592     __put_user(host_tx->precision, &target_tx->precision);
7593     __put_user(host_tx->tolerance, &target_tx->tolerance);
7594     __put_user(host_tx->tick, &target_tx->tick);
7595     __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
7596     __put_user(host_tx->jitter, &target_tx->jitter);
7597     __put_user(host_tx->shift, &target_tx->shift);
7598     __put_user(host_tx->stabil, &target_tx->stabil);
7599     __put_user(host_tx->jitcnt, &target_tx->jitcnt);
7600     __put_user(host_tx->calcnt, &target_tx->calcnt);
7601     __put_user(host_tx->errcnt, &target_tx->errcnt);
7602     __put_user(host_tx->stbcnt, &target_tx->stbcnt);
7603     __put_user(host_tx->tai, &target_tx->tai);
7604 
7605     unlock_user_struct(target_tx, target_addr, 1);
7606     return 0;
7607 }
7608 #endif
7609 
7610 #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
7611 #define sigev_notify_thread_id _sigev_un._tid
7612 #endif
7613 
7614 static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
7615                                                abi_ulong target_addr)
7616 {
7617     struct target_sigevent *target_sevp;
7618 
7619     if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
7620         return -TARGET_EFAULT;
7621     }
7622 
7623     /* This union is awkward on 64 bit systems because it has a 32 bit
7624      * integer and a pointer in it; we follow the conversion approach
7625      * used for handling sigval types in signal.c so the guest should get
7626      * the correct value back even if we did a 64 bit byteswap and it's
7627      * using the 32 bit integer.
7628      */
7629     host_sevp->sigev_value.sival_ptr =
7630         (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
7631     host_sevp->sigev_signo =
7632         target_to_host_signal(tswap32(target_sevp->sigev_signo));
7633     host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
7634     host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
7635 
7636     unlock_user_struct(target_sevp, target_addr, 1);
7637     return 0;
7638 }
7639 
7640 #if defined(TARGET_NR_mlockall)
7641 static inline int target_to_host_mlockall_arg(int arg)
7642 {
7643     int result = 0;
7644 
7645     if (arg & TARGET_MCL_CURRENT) {
7646         result |= MCL_CURRENT;
7647     }
7648     if (arg & TARGET_MCL_FUTURE) {
7649         result |= MCL_FUTURE;
7650     }
7651 #ifdef MCL_ONFAULT
7652     if (arg & TARGET_MCL_ONFAULT) {
7653         result |= MCL_ONFAULT;
7654     }
7655 #endif
7656 
7657     return result;
7658 }
7659 #endif
7660 
7661 static inline int target_to_host_msync_arg(abi_long arg)
7662 {
7663     return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
7664            ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
7665            ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
7666            (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
7667 }
7668 
7669 #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) ||     \
7670      defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) ||  \
7671      defined(TARGET_NR_newfstatat))
7672 static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
7673                                              abi_ulong target_addr,
7674                                              struct stat *host_st)
7675 {
7676 #if defined(TARGET_ARM) && defined(TARGET_ABI32)
7677     if (cpu_env->eabi) {
7678         struct target_eabi_stat64 *target_st;
7679 
7680         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7681             return -TARGET_EFAULT;
7682         memset(target_st, 0, sizeof(struct target_eabi_stat64));
7683         __put_user(host_st->st_dev, &target_st->st_dev);
7684         __put_user(host_st->st_ino, &target_st->st_ino);
7685 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7686         __put_user(host_st->st_ino, &target_st->__st_ino);
7687 #endif
7688         __put_user(host_st->st_mode, &target_st->st_mode);
7689         __put_user(host_st->st_nlink, &target_st->st_nlink);
7690         __put_user(host_st->st_uid, &target_st->st_uid);
7691         __put_user(host_st->st_gid, &target_st->st_gid);
7692         __put_user(host_st->st_rdev, &target_st->st_rdev);
7693         __put_user(host_st->st_size, &target_st->st_size);
7694         __put_user(host_st->st_blksize, &target_st->st_blksize);
7695         __put_user(host_st->st_blocks, &target_st->st_blocks);
7696         __put_user(host_st->st_atime, &target_st->target_st_atime);
7697         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7698         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7699 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7700         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7701         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7702         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7703 #endif
7704         unlock_user_struct(target_st, target_addr, 1);
7705     } else
7706 #endif
7707     {
7708 #if defined(TARGET_HAS_STRUCT_STAT64)
7709         struct target_stat64 *target_st;
7710 #else
7711         struct target_stat *target_st;
7712 #endif
7713 
7714         if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
7715             return -TARGET_EFAULT;
7716         memset(target_st, 0, sizeof(*target_st));
7717         __put_user(host_st->st_dev, &target_st->st_dev);
7718         __put_user(host_st->st_ino, &target_st->st_ino);
7719 #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
7720         __put_user(host_st->st_ino, &target_st->__st_ino);
7721 #endif
7722         __put_user(host_st->st_mode, &target_st->st_mode);
7723         __put_user(host_st->st_nlink, &target_st->st_nlink);
7724         __put_user(host_st->st_uid, &target_st->st_uid);
7725         __put_user(host_st->st_gid, &target_st->st_gid);
7726         __put_user(host_st->st_rdev, &target_st->st_rdev);
7727         /* XXX: better use of kernel struct */
7728         __put_user(host_st->st_size, &target_st->st_size);
7729         __put_user(host_st->st_blksize, &target_st->st_blksize);
7730         __put_user(host_st->st_blocks, &target_st->st_blocks);
7731         __put_user(host_st->st_atime, &target_st->target_st_atime);
7732         __put_user(host_st->st_mtime, &target_st->target_st_mtime);
7733         __put_user(host_st->st_ctime, &target_st->target_st_ctime);
7734 #ifdef HAVE_STRUCT_STAT_ST_ATIM
7735         __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
7736         __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
7737         __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
7738 #endif
7739         unlock_user_struct(target_st, target_addr, 1);
7740     }
7741 
7742     return 0;
7743 }
7744 #endif
7745 
7746 #if defined(TARGET_NR_statx) && defined(__NR_statx)
7747 static inline abi_long host_to_target_statx(struct target_statx *host_stx,
7748                                             abi_ulong target_addr)
7749 {
7750     struct target_statx *target_stx;
7751 
7752     if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr,  0)) {
7753         return -TARGET_EFAULT;
7754     }
7755     memset(target_stx, 0, sizeof(*target_stx));
7756 
7757     __put_user(host_stx->stx_mask, &target_stx->stx_mask);
7758     __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
7759     __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
7760     __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
7761     __put_user(host_stx->stx_uid, &target_stx->stx_uid);
7762     __put_user(host_stx->stx_gid, &target_stx->stx_gid);
7763     __put_user(host_stx->stx_mode, &target_stx->stx_mode);
7764     __put_user(host_stx->stx_ino, &target_stx->stx_ino);
7765     __put_user(host_stx->stx_size, &target_stx->stx_size);
7766     __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
7767     __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
7768     __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
7769     __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
7770     __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
7771     __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
7772     __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
7773     __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
7774     __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
7775     __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
7776     __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
7777     __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
7778     __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
7779     __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
7780 
7781     unlock_user_struct(target_stx, target_addr, 1);
7782 
7783     return 0;
7784 }
7785 #endif
7786 
7787 static int do_sys_futex(int *uaddr, int op, int val,
7788                          const struct timespec *timeout, int *uaddr2,
7789                          int val3)
7790 {
7791 #if HOST_LONG_BITS == 64
7792 #if defined(__NR_futex)
7793     /* always a 64-bit time_t, it doesn't define _time64 version  */
7794     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7795 
7796 #endif
7797 #else /* HOST_LONG_BITS == 64 */
7798 #if defined(__NR_futex_time64)
7799     if (sizeof(timeout->tv_sec) == 8) {
7800         /* _time64 function on 32bit arch */
7801         return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
7802     }
7803 #endif
7804 #if defined(__NR_futex)
7805     /* old function on 32bit arch */
7806     return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
7807 #endif
7808 #endif /* HOST_LONG_BITS == 64 */
7809     g_assert_not_reached();
7810 }
7811 
7812 static int do_safe_futex(int *uaddr, int op, int val,
7813                          const struct timespec *timeout, int *uaddr2,
7814                          int val3)
7815 {
7816 #if HOST_LONG_BITS == 64
7817 #if defined(__NR_futex)
7818     /* always a 64-bit time_t, it doesn't define _time64 version  */
7819     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7820 #endif
7821 #else /* HOST_LONG_BITS == 64 */
7822 #if defined(__NR_futex_time64)
7823     if (sizeof(timeout->tv_sec) == 8) {
7824         /* _time64 function on 32bit arch */
7825         return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
7826                                            val3));
7827     }
7828 #endif
7829 #if defined(__NR_futex)
7830     /* old function on 32bit arch */
7831     return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
7832 #endif
7833 #endif /* HOST_LONG_BITS == 64 */
7834     return -TARGET_ENOSYS;
7835 }
7836 
7837 /* ??? Using host futex calls even when target atomic operations
7838    are not really atomic probably breaks things.  However implementing
7839    futexes locally would make futexes shared between multiple processes
7840    tricky.  However they're probably useless because guest atomic
7841    operations won't work either.  */
7842 #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
7843 static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
7844                     int op, int val, target_ulong timeout,
7845                     target_ulong uaddr2, int val3)
7846 {
7847     struct timespec ts, *pts = NULL;
7848     void *haddr2 = NULL;
7849     int base_op;
7850 
7851     /* We assume FUTEX_* constants are the same on both host and target. */
7852 #ifdef FUTEX_CMD_MASK
7853     base_op = op & FUTEX_CMD_MASK;
7854 #else
7855     base_op = op;
7856 #endif
7857     switch (base_op) {
7858     case FUTEX_WAIT:
7859     case FUTEX_WAIT_BITSET:
7860         val = tswap32(val);
7861         break;
7862     case FUTEX_WAIT_REQUEUE_PI:
7863         val = tswap32(val);
7864         haddr2 = g2h(cpu, uaddr2);
7865         break;
7866     case FUTEX_LOCK_PI:
7867     case FUTEX_LOCK_PI2:
7868         break;
7869     case FUTEX_WAKE:
7870     case FUTEX_WAKE_BITSET:
7871     case FUTEX_TRYLOCK_PI:
7872     case FUTEX_UNLOCK_PI:
7873         timeout = 0;
7874         break;
7875     case FUTEX_FD:
7876         val = target_to_host_signal(val);
7877         timeout = 0;
7878         break;
7879     case FUTEX_CMP_REQUEUE:
7880     case FUTEX_CMP_REQUEUE_PI:
7881         val3 = tswap32(val3);
7882         /* fall through */
7883     case FUTEX_REQUEUE:
7884     case FUTEX_WAKE_OP:
7885         /*
7886          * For these, the 4th argument is not TIMEOUT, but VAL2.
7887          * But the prototype of do_safe_futex takes a pointer, so
7888          * insert casts to satisfy the compiler.  We do not need
7889          * to tswap VAL2 since it's not compared to guest memory.
7890           */
7891         pts = (struct timespec *)(uintptr_t)timeout;
7892         timeout = 0;
7893         haddr2 = g2h(cpu, uaddr2);
7894         break;
7895     default:
7896         return -TARGET_ENOSYS;
7897     }
7898     if (timeout) {
7899         pts = &ts;
7900         if (time64
7901             ? target_to_host_timespec64(pts, timeout)
7902             : target_to_host_timespec(pts, timeout)) {
7903             return -TARGET_EFAULT;
7904         }
7905     }
7906     return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
7907 }
7908 #endif
7909 
7910 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7911 static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
7912                                      abi_long handle, abi_long mount_id,
7913                                      abi_long flags)
7914 {
7915     struct file_handle *target_fh;
7916     struct file_handle *fh;
7917     int mid = 0;
7918     abi_long ret;
7919     char *name;
7920     unsigned int size, total_size;
7921 
7922     if (get_user_s32(size, handle)) {
7923         return -TARGET_EFAULT;
7924     }
7925 
7926     name = lock_user_string(pathname);
7927     if (!name) {
7928         return -TARGET_EFAULT;
7929     }
7930 
7931     total_size = sizeof(struct file_handle) + size;
7932     target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
7933     if (!target_fh) {
7934         unlock_user(name, pathname, 0);
7935         return -TARGET_EFAULT;
7936     }
7937 
7938     fh = g_malloc0(total_size);
7939     fh->handle_bytes = size;
7940 
7941     ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
7942     unlock_user(name, pathname, 0);
7943 
7944     /* man name_to_handle_at(2):
7945      * Other than the use of the handle_bytes field, the caller should treat
7946      * the file_handle structure as an opaque data type
7947      */
7948 
7949     memcpy(target_fh, fh, total_size);
7950     target_fh->handle_bytes = tswap32(fh->handle_bytes);
7951     target_fh->handle_type = tswap32(fh->handle_type);
7952     g_free(fh);
7953     unlock_user(target_fh, handle, total_size);
7954 
7955     if (put_user_s32(mid, mount_id)) {
7956         return -TARGET_EFAULT;
7957     }
7958 
7959     return ret;
7960 
7961 }
7962 #endif
7963 
7964 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
7965 static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
7966                                      abi_long flags)
7967 {
7968     struct file_handle *target_fh;
7969     struct file_handle *fh;
7970     unsigned int size, total_size;
7971     abi_long ret;
7972 
7973     if (get_user_s32(size, handle)) {
7974         return -TARGET_EFAULT;
7975     }
7976 
7977     total_size = sizeof(struct file_handle) + size;
7978     target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
7979     if (!target_fh) {
7980         return -TARGET_EFAULT;
7981     }
7982 
7983     fh = g_memdup(target_fh, total_size);
7984     fh->handle_bytes = size;
7985     fh->handle_type = tswap32(target_fh->handle_type);
7986 
7987     ret = get_errno(open_by_handle_at(mount_fd, fh,
7988                     target_to_host_bitmask(flags, fcntl_flags_tbl)));
7989 
7990     g_free(fh);
7991 
7992     unlock_user(target_fh, handle, total_size);
7993 
7994     return ret;
7995 }
7996 #endif
7997 
7998 #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
7999 
8000 static abi_long do_signalfd4(int fd, abi_long mask, int flags)
8001 {
8002     int host_flags;
8003     target_sigset_t *target_mask;
8004     sigset_t host_mask;
8005     abi_long ret;
8006 
8007     if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
8008         return -TARGET_EINVAL;
8009     }
8010     if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
8011         return -TARGET_EFAULT;
8012     }
8013 
8014     target_to_host_sigset(&host_mask, target_mask);
8015 
8016     host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
8017 
8018     ret = get_errno(signalfd(fd, &host_mask, host_flags));
8019     if (ret >= 0) {
8020         fd_trans_register(ret, &target_signalfd_trans);
8021     }
8022 
8023     unlock_user_struct(target_mask, mask, 0);
8024 
8025     return ret;
8026 }
8027 #endif
8028 
8029 /* Map host to target signal numbers for the wait family of syscalls.
8030    Assume all other status bits are the same.  */
8031 int host_to_target_waitstatus(int status)
8032 {
8033     if (WIFSIGNALED(status)) {
8034         return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
8035     }
8036     if (WIFSTOPPED(status)) {
8037         return (host_to_target_signal(WSTOPSIG(status)) << 8)
8038                | (status & 0xff);
8039     }
8040     return status;
8041 }
8042 
8043 static int open_self_cmdline(CPUArchState *cpu_env, int fd)
8044 {
8045     CPUState *cpu = env_cpu(cpu_env);
8046     struct linux_binprm *bprm = ((TaskState *)cpu->opaque)->bprm;
8047     int i;
8048 
8049     for (i = 0; i < bprm->argc; i++) {
8050         size_t len = strlen(bprm->argv[i]) + 1;
8051 
8052         if (write(fd, bprm->argv[i], len) != len) {
8053             return -1;
8054         }
8055     }
8056 
8057     return 0;
8058 }
8059 
8060 static void show_smaps(int fd, unsigned long size)
8061 {
8062     unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
8063     unsigned long size_kb = size >> 10;
8064 
8065     dprintf(fd, "Size:                  %lu kB\n"
8066                 "KernelPageSize:        %lu kB\n"
8067                 "MMUPageSize:           %lu kB\n"
8068                 "Rss:                   0 kB\n"
8069                 "Pss:                   0 kB\n"
8070                 "Pss_Dirty:             0 kB\n"
8071                 "Shared_Clean:          0 kB\n"
8072                 "Shared_Dirty:          0 kB\n"
8073                 "Private_Clean:         0 kB\n"
8074                 "Private_Dirty:         0 kB\n"
8075                 "Referenced:            0 kB\n"
8076                 "Anonymous:             0 kB\n"
8077                 "LazyFree:              0 kB\n"
8078                 "AnonHugePages:         0 kB\n"
8079                 "ShmemPmdMapped:        0 kB\n"
8080                 "FilePmdMapped:         0 kB\n"
8081                 "Shared_Hugetlb:        0 kB\n"
8082                 "Private_Hugetlb:       0 kB\n"
8083                 "Swap:                  0 kB\n"
8084                 "SwapPss:               0 kB\n"
8085                 "Locked:                0 kB\n"
8086                 "THPeligible:    0\n", size_kb, page_size_kb, page_size_kb);
8087 }
8088 
8089 static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
8090 {
8091     CPUState *cpu = env_cpu(cpu_env);
8092     TaskState *ts = cpu->opaque;
8093     GSList *map_info = read_self_maps();
8094     GSList *s;
8095     int count;
8096 
8097     for (s = map_info; s; s = g_slist_next(s)) {
8098         MapInfo *e = (MapInfo *) s->data;
8099 
8100         if (h2g_valid(e->start)) {
8101             unsigned long min = e->start;
8102             unsigned long max = e->end;
8103             int flags = page_get_flags(h2g(min));
8104             const char *path;
8105 
8106             max = h2g_valid(max - 1) ?
8107                 max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
8108 
8109             if (page_check_range(h2g(min), max - min, flags) == -1) {
8110                 continue;
8111             }
8112 
8113 #ifdef TARGET_HPPA
8114             if (h2g(max) == ts->info->stack_limit) {
8115 #else
8116             if (h2g(min) == ts->info->stack_limit) {
8117 #endif
8118                 path = "[stack]";
8119             } else {
8120                 path = e->path;
8121             }
8122 
8123             count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
8124                             " %c%c%c%c %08" PRIx64 " %s %"PRId64,
8125                             h2g(min), h2g(max - 1) + 1,
8126                             (flags & PAGE_READ) ? 'r' : '-',
8127                             (flags & PAGE_WRITE_ORG) ? 'w' : '-',
8128                             (flags & PAGE_EXEC) ? 'x' : '-',
8129                             e->is_priv ? 'p' : 's',
8130                             (uint64_t) e->offset, e->dev, e->inode);
8131             if (path) {
8132                 dprintf(fd, "%*s%s\n", 73 - count, "", path);
8133             } else {
8134                 dprintf(fd, "\n");
8135             }
8136             if (smaps) {
8137                 show_smaps(fd, max - min);
8138                 dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
8139                         (flags & PAGE_READ) ? " rd" : "",
8140                         (flags & PAGE_WRITE_ORG) ? " wr" : "",
8141                         (flags & PAGE_EXEC) ? " ex" : "",
8142                         e->is_priv ? "" : " sh",
8143                         (flags & PAGE_READ) ? " mr" : "",
8144                         (flags & PAGE_WRITE_ORG) ? " mw" : "",
8145                         (flags & PAGE_EXEC) ? " me" : "",
8146                         e->is_priv ? "" : " ms");
8147             }
8148         }
8149     }
8150 
8151     free_self_maps(map_info);
8152 
8153 #ifdef TARGET_VSYSCALL_PAGE
8154     /*
8155      * We only support execution from the vsyscall page.
8156      * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
8157      */
8158     count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
8159                     " --xp 00000000 00:00 0",
8160                     TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
8161     dprintf(fd, "%*s%s\n", 73 - count, "",  "[vsyscall]");
8162     if (smaps) {
8163         show_smaps(fd, TARGET_PAGE_SIZE);
8164         dprintf(fd, "VmFlags: ex\n");
8165     }
8166 #endif
8167 
8168     return 0;
8169 }
8170 
8171 static int open_self_maps(CPUArchState *cpu_env, int fd)
8172 {
8173     return open_self_maps_1(cpu_env, fd, false);
8174 }
8175 
8176 static int open_self_smaps(CPUArchState *cpu_env, int fd)
8177 {
8178     return open_self_maps_1(cpu_env, fd, true);
8179 }
8180 
8181 static int open_self_stat(CPUArchState *cpu_env, int fd)
8182 {
8183     CPUState *cpu = env_cpu(cpu_env);
8184     TaskState *ts = cpu->opaque;
8185     g_autoptr(GString) buf = g_string_new(NULL);
8186     int i;
8187 
8188     for (i = 0; i < 44; i++) {
8189         if (i == 0) {
8190             /* pid */
8191             g_string_printf(buf, FMT_pid " ", getpid());
8192         } else if (i == 1) {
8193             /* app name */
8194             gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
8195             bin = bin ? bin + 1 : ts->bprm->argv[0];
8196             g_string_printf(buf, "(%.15s) ", bin);
8197         } else if (i == 2) {
8198             /* task state */
8199             g_string_assign(buf, "R "); /* we are running right now */
8200         } else if (i == 3) {
8201             /* ppid */
8202             g_string_printf(buf, FMT_pid " ", getppid());
8203         } else if (i == 21) {
8204             /* starttime */
8205             g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
8206         } else if (i == 27) {
8207             /* stack bottom */
8208             g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
8209         } else {
8210             /* for the rest, there is MasterCard */
8211             g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
8212         }
8213 
8214         if (write(fd, buf->str, buf->len) != buf->len) {
8215             return -1;
8216         }
8217     }
8218 
8219     return 0;
8220 }
8221 
8222 static int open_self_auxv(CPUArchState *cpu_env, int fd)
8223 {
8224     CPUState *cpu = env_cpu(cpu_env);
8225     TaskState *ts = cpu->opaque;
8226     abi_ulong auxv = ts->info->saved_auxv;
8227     abi_ulong len = ts->info->auxv_len;
8228     char *ptr;
8229 
8230     /*
8231      * Auxiliary vector is stored in target process stack.
8232      * read in whole auxv vector and copy it to file
8233      */
8234     ptr = lock_user(VERIFY_READ, auxv, len, 0);
8235     if (ptr != NULL) {
8236         while (len > 0) {
8237             ssize_t r;
8238             r = write(fd, ptr, len);
8239             if (r <= 0) {
8240                 break;
8241             }
8242             len -= r;
8243             ptr += r;
8244         }
8245         lseek(fd, 0, SEEK_SET);
8246         unlock_user(ptr, auxv, len);
8247     }
8248 
8249     return 0;
8250 }
8251 
8252 static int is_proc_myself(const char *filename, const char *entry)
8253 {
8254     if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
8255         filename += strlen("/proc/");
8256         if (!strncmp(filename, "self/", strlen("self/"))) {
8257             filename += strlen("self/");
8258         } else if (*filename >= '1' && *filename <= '9') {
8259             char myself[80];
8260             snprintf(myself, sizeof(myself), "%d/", getpid());
8261             if (!strncmp(filename, myself, strlen(myself))) {
8262                 filename += strlen(myself);
8263             } else {
8264                 return 0;
8265             }
8266         } else {
8267             return 0;
8268         }
8269         if (!strcmp(filename, entry)) {
8270             return 1;
8271         }
8272     }
8273     return 0;
8274 }
8275 
8276 static void excp_dump_file(FILE *logfile, CPUArchState *env,
8277                       const char *fmt, int code)
8278 {
8279     if (logfile) {
8280         CPUState *cs = env_cpu(env);
8281 
8282         fprintf(logfile, fmt, code);
8283         fprintf(logfile, "Failing executable: %s\n", exec_path);
8284         cpu_dump_state(cs, logfile, 0);
8285         open_self_maps(env, fileno(logfile));
8286     }
8287 }
8288 
8289 void target_exception_dump(CPUArchState *env, const char *fmt, int code)
8290 {
8291     /* dump to console */
8292     excp_dump_file(stderr, env, fmt, code);
8293 
8294     /* dump to log file */
8295     if (qemu_log_separate()) {
8296         FILE *logfile = qemu_log_trylock();
8297 
8298         excp_dump_file(logfile, env, fmt, code);
8299         qemu_log_unlock(logfile);
8300     }
8301 }
8302 
8303 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
8304     defined(TARGET_SPARC) || defined(TARGET_M68K) || defined(TARGET_HPPA) || \
8305     defined(TARGET_RISCV) || defined(TARGET_S390X)
8306 static int is_proc(const char *filename, const char *entry)
8307 {
8308     return strcmp(filename, entry) == 0;
8309 }
8310 #endif
8311 
8312 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8313 static int open_net_route(CPUArchState *cpu_env, int fd)
8314 {
8315     FILE *fp;
8316     char *line = NULL;
8317     size_t len = 0;
8318     ssize_t read;
8319 
8320     fp = fopen("/proc/net/route", "r");
8321     if (fp == NULL) {
8322         return -1;
8323     }
8324 
8325     /* read header */
8326 
8327     read = getline(&line, &len, fp);
8328     dprintf(fd, "%s", line);
8329 
8330     /* read routes */
8331 
8332     while ((read = getline(&line, &len, fp)) != -1) {
8333         char iface[16];
8334         uint32_t dest, gw, mask;
8335         unsigned int flags, refcnt, use, metric, mtu, window, irtt;
8336         int fields;
8337 
8338         fields = sscanf(line,
8339                         "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8340                         iface, &dest, &gw, &flags, &refcnt, &use, &metric,
8341                         &mask, &mtu, &window, &irtt);
8342         if (fields != 11) {
8343             continue;
8344         }
8345         dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
8346                 iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
8347                 metric, tswap32(mask), mtu, window, irtt);
8348     }
8349 
8350     free(line);
8351     fclose(fp);
8352 
8353     return 0;
8354 }
8355 #endif
8356 
8357 #if defined(TARGET_SPARC)
8358 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8359 {
8360     dprintf(fd, "type\t\t: sun4u\n");
8361     return 0;
8362 }
8363 #endif
8364 
8365 #if defined(TARGET_HPPA)
8366 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8367 {
8368     int i, num_cpus;
8369 
8370     num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8371     for (i = 0; i < num_cpus; i++) {
8372         dprintf(fd, "processor\t: %d\n", i);
8373         dprintf(fd, "cpu family\t: PA-RISC 1.1e\n");
8374         dprintf(fd, "cpu\t\t: PA7300LC (PCX-L2)\n");
8375         dprintf(fd, "capabilities\t: os32\n");
8376         dprintf(fd, "model\t\t: 9000/778/B160L - "
8377                     "Merlin L2 160 QEMU (9000/778/B160L)\n\n");
8378     }
8379     return 0;
8380 }
8381 #endif
8382 
8383 #if defined(TARGET_RISCV)
8384 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8385 {
8386     int i;
8387     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8388     RISCVCPU *cpu = env_archcpu(cpu_env);
8389     const RISCVCPUConfig *cfg = riscv_cpu_cfg((CPURISCVState *) cpu_env);
8390     char *isa_string = riscv_isa_string(cpu);
8391     const char *mmu;
8392 
8393     if (cfg->mmu) {
8394         mmu = (cpu_env->xl == MXL_RV32) ? "sv32"  : "sv48";
8395     } else {
8396         mmu = "none";
8397     }
8398 
8399     for (i = 0; i < num_cpus; i++) {
8400         dprintf(fd, "processor\t: %d\n", i);
8401         dprintf(fd, "hart\t\t: %d\n", i);
8402         dprintf(fd, "isa\t\t: %s\n", isa_string);
8403         dprintf(fd, "mmu\t\t: %s\n", mmu);
8404         dprintf(fd, "uarch\t\t: qemu\n\n");
8405     }
8406 
8407     g_free(isa_string);
8408     return 0;
8409 }
8410 #endif
8411 
8412 #if defined(TARGET_S390X)
8413 /*
8414  * Emulate what a Linux kernel running in qemu-system-s390x -M accel=tcg would
8415  * show in /proc/cpuinfo.
8416  *
8417  * Skip the following in order to match the missing support in op_ecag():
8418  * - show_cacheinfo().
8419  * - show_cpu_topology().
8420  * - show_cpu_mhz().
8421  *
8422  * Use fixed values for certain fields:
8423  * - bogomips per cpu - from a qemu-system-s390x run.
8424  * - max thread id = 0, since SMT / SIGP_SET_MULTI_THREADING is not supported.
8425  *
8426  * Keep the code structure close to arch/s390/kernel/processor.c.
8427  */
8428 
8429 static void show_facilities(int fd)
8430 {
8431     size_t sizeof_stfl_bytes = 2048;
8432     g_autofree uint8_t *stfl_bytes = g_new0(uint8_t, sizeof_stfl_bytes);
8433     unsigned int bit;
8434 
8435     dprintf(fd, "facilities      :");
8436     s390_get_feat_block(S390_FEAT_TYPE_STFL, stfl_bytes);
8437     for (bit = 0; bit < sizeof_stfl_bytes * 8; bit++) {
8438         if (test_be_bit(bit, stfl_bytes)) {
8439             dprintf(fd, " %d", bit);
8440         }
8441     }
8442     dprintf(fd, "\n");
8443 }
8444 
8445 static int cpu_ident(unsigned long n)
8446 {
8447     return deposit32(0, CPU_ID_BITS - CPU_PHYS_ADDR_BITS, CPU_PHYS_ADDR_BITS,
8448                      n);
8449 }
8450 
8451 static void show_cpu_summary(CPUArchState *cpu_env, int fd)
8452 {
8453     S390CPUModel *model = env_archcpu(cpu_env)->model;
8454     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8455     uint32_t elf_hwcap = get_elf_hwcap();
8456     const char *hwcap_str;
8457     int i;
8458 
8459     dprintf(fd, "vendor_id       : IBM/S390\n"
8460                 "# processors    : %i\n"
8461                 "bogomips per cpu: 13370.00\n",
8462             num_cpus);
8463     dprintf(fd, "max thread id   : 0\n");
8464     dprintf(fd, "features\t: ");
8465     for (i = 0; i < sizeof(elf_hwcap) * 8; i++) {
8466         if (!(elf_hwcap & (1 << i))) {
8467             continue;
8468         }
8469         hwcap_str = elf_hwcap_str(i);
8470         if (hwcap_str) {
8471             dprintf(fd, "%s ", hwcap_str);
8472         }
8473     }
8474     dprintf(fd, "\n");
8475     show_facilities(fd);
8476     for (i = 0; i < num_cpus; i++) {
8477         dprintf(fd, "processor %d: "
8478                "version = %02X,  "
8479                "identification = %06X,  "
8480                "machine = %04X\n",
8481                i, model->cpu_ver, cpu_ident(i), model->def->type);
8482     }
8483 }
8484 
8485 static void show_cpu_ids(CPUArchState *cpu_env, int fd, unsigned long n)
8486 {
8487     S390CPUModel *model = env_archcpu(cpu_env)->model;
8488 
8489     dprintf(fd, "version         : %02X\n", model->cpu_ver);
8490     dprintf(fd, "identification  : %06X\n", cpu_ident(n));
8491     dprintf(fd, "machine         : %04X\n", model->def->type);
8492 }
8493 
8494 static void show_cpuinfo(CPUArchState *cpu_env, int fd, unsigned long n)
8495 {
8496     dprintf(fd, "\ncpu number      : %ld\n", n);
8497     show_cpu_ids(cpu_env, fd, n);
8498 }
8499 
8500 static int open_cpuinfo(CPUArchState *cpu_env, int fd)
8501 {
8502     int num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
8503     int i;
8504 
8505     show_cpu_summary(cpu_env, fd);
8506     for (i = 0; i < num_cpus; i++) {
8507         show_cpuinfo(cpu_env, fd, i);
8508     }
8509     return 0;
8510 }
8511 #endif
8512 
8513 #if defined(TARGET_M68K)
8514 static int open_hardware(CPUArchState *cpu_env, int fd)
8515 {
8516     dprintf(fd, "Model:\t\tqemu-m68k\n");
8517     return 0;
8518 }
8519 #endif
8520 
8521 int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
8522                     int flags, mode_t mode, bool safe)
8523 {
8524     struct fake_open {
8525         const char *filename;
8526         int (*fill)(CPUArchState *cpu_env, int fd);
8527         int (*cmp)(const char *s1, const char *s2);
8528     };
8529     const struct fake_open *fake_open;
8530     static const struct fake_open fakes[] = {
8531         { "maps", open_self_maps, is_proc_myself },
8532         { "smaps", open_self_smaps, is_proc_myself },
8533         { "stat", open_self_stat, is_proc_myself },
8534         { "auxv", open_self_auxv, is_proc_myself },
8535         { "cmdline", open_self_cmdline, is_proc_myself },
8536 #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
8537         { "/proc/net/route", open_net_route, is_proc },
8538 #endif
8539 #if defined(TARGET_SPARC) || defined(TARGET_HPPA) || \
8540     defined(TARGET_RISCV) || defined(TARGET_S390X)
8541         { "/proc/cpuinfo", open_cpuinfo, is_proc },
8542 #endif
8543 #if defined(TARGET_M68K)
8544         { "/proc/hardware", open_hardware, is_proc },
8545 #endif
8546         { NULL, NULL, NULL }
8547     };
8548 
8549     if (is_proc_myself(pathname, "exe")) {
8550         if (safe) {
8551             return safe_openat(dirfd, exec_path, flags, mode);
8552         } else {
8553             return openat(dirfd, exec_path, flags, mode);
8554         }
8555     }
8556 
8557     for (fake_open = fakes; fake_open->filename; fake_open++) {
8558         if (fake_open->cmp(pathname, fake_open->filename)) {
8559             break;
8560         }
8561     }
8562 
8563     if (fake_open->filename) {
8564         const char *tmpdir;
8565         char filename[PATH_MAX];
8566         int fd, r;
8567 
8568         fd = memfd_create("qemu-open", 0);
8569         if (fd < 0) {
8570             if (errno != ENOSYS) {
8571                 return fd;
8572             }
8573             /* create temporary file to map stat to */
8574             tmpdir = getenv("TMPDIR");
8575             if (!tmpdir)
8576                 tmpdir = "/tmp";
8577             snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
8578             fd = mkstemp(filename);
8579             if (fd < 0) {
8580                 return fd;
8581             }
8582             unlink(filename);
8583         }
8584 
8585         if ((r = fake_open->fill(cpu_env, fd))) {
8586             int e = errno;
8587             close(fd);
8588             errno = e;
8589             return r;
8590         }
8591         lseek(fd, 0, SEEK_SET);
8592 
8593         return fd;
8594     }
8595 
8596     if (safe) {
8597         return safe_openat(dirfd, path(pathname), flags, mode);
8598     } else {
8599         return openat(dirfd, path(pathname), flags, mode);
8600     }
8601 }
8602 
8603 ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
8604 {
8605     ssize_t ret;
8606 
8607     if (!pathname || !buf) {
8608         errno = EFAULT;
8609         return -1;
8610     }
8611 
8612     if (!bufsiz) {
8613         /* Short circuit this for the magic exe check. */
8614         errno = EINVAL;
8615         return -1;
8616     }
8617 
8618     if (is_proc_myself((const char *)pathname, "exe")) {
8619         /*
8620          * Don't worry about sign mismatch as earlier mapping
8621          * logic would have thrown a bad address error.
8622          */
8623         ret = MIN(strlen(exec_path), bufsiz);
8624         /* We cannot NUL terminate the string. */
8625         memcpy(buf, exec_path, ret);
8626     } else {
8627         ret = readlink(path(pathname), buf, bufsiz);
8628     }
8629 
8630     return ret;
8631 }
8632 
8633 static int do_execv(CPUArchState *cpu_env, int dirfd,
8634                     abi_long pathname, abi_long guest_argp,
8635                     abi_long guest_envp, int flags, bool is_execveat)
8636 {
8637     int ret;
8638     char **argp, **envp;
8639     int argc, envc;
8640     abi_ulong gp;
8641     abi_ulong addr;
8642     char **q;
8643     void *p;
8644 
8645     argc = 0;
8646 
8647     for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
8648         if (get_user_ual(addr, gp)) {
8649             return -TARGET_EFAULT;
8650         }
8651         if (!addr) {
8652             break;
8653         }
8654         argc++;
8655     }
8656     envc = 0;
8657     for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
8658         if (get_user_ual(addr, gp)) {
8659             return -TARGET_EFAULT;
8660         }
8661         if (!addr) {
8662             break;
8663         }
8664         envc++;
8665     }
8666 
8667     argp = g_new0(char *, argc + 1);
8668     envp = g_new0(char *, envc + 1);
8669 
8670     for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
8671         if (get_user_ual(addr, gp)) {
8672             goto execve_efault;
8673         }
8674         if (!addr) {
8675             break;
8676         }
8677         *q = lock_user_string(addr);
8678         if (!*q) {
8679             goto execve_efault;
8680         }
8681     }
8682     *q = NULL;
8683 
8684     for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
8685         if (get_user_ual(addr, gp)) {
8686             goto execve_efault;
8687         }
8688         if (!addr) {
8689             break;
8690         }
8691         *q = lock_user_string(addr);
8692         if (!*q) {
8693             goto execve_efault;
8694         }
8695     }
8696     *q = NULL;
8697 
8698     /*
8699      * Although execve() is not an interruptible syscall it is
8700      * a special case where we must use the safe_syscall wrapper:
8701      * if we allow a signal to happen before we make the host
8702      * syscall then we will 'lose' it, because at the point of
8703      * execve the process leaves QEMU's control. So we use the
8704      * safe syscall wrapper to ensure that we either take the
8705      * signal as a guest signal, or else it does not happen
8706      * before the execve completes and makes it the other
8707      * program's problem.
8708      */
8709     p = lock_user_string(pathname);
8710     if (!p) {
8711         goto execve_efault;
8712     }
8713 
8714     const char *exe = p;
8715     if (is_proc_myself(p, "exe")) {
8716         exe = exec_path;
8717     }
8718     ret = is_execveat
8719         ? safe_execveat(dirfd, exe, argp, envp, flags)
8720         : safe_execve(exe, argp, envp);
8721     ret = get_errno(ret);
8722 
8723     unlock_user(p, pathname, 0);
8724 
8725     goto execve_end;
8726 
8727 execve_efault:
8728     ret = -TARGET_EFAULT;
8729 
8730 execve_end:
8731     for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
8732         if (get_user_ual(addr, gp) || !addr) {
8733             break;
8734         }
8735         unlock_user(*q, addr, 0);
8736     }
8737     for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
8738         if (get_user_ual(addr, gp) || !addr) {
8739             break;
8740         }
8741         unlock_user(*q, addr, 0);
8742     }
8743 
8744     g_free(argp);
8745     g_free(envp);
8746     return ret;
8747 }
8748 
8749 #define TIMER_MAGIC 0x0caf0000
8750 #define TIMER_MAGIC_MASK 0xffff0000
8751 
8752 /* Convert QEMU provided timer ID back to internal 16bit index format */
8753 static target_timer_t get_timer_id(abi_long arg)
8754 {
8755     target_timer_t timerid = arg;
8756 
8757     if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
8758         return -TARGET_EINVAL;
8759     }
8760 
8761     timerid &= 0xffff;
8762 
8763     if (timerid >= ARRAY_SIZE(g_posix_timers)) {
8764         return -TARGET_EINVAL;
8765     }
8766 
8767     return timerid;
8768 }
8769 
8770 static int target_to_host_cpu_mask(unsigned long *host_mask,
8771                                    size_t host_size,
8772                                    abi_ulong target_addr,
8773                                    size_t target_size)
8774 {
8775     unsigned target_bits = sizeof(abi_ulong) * 8;
8776     unsigned host_bits = sizeof(*host_mask) * 8;
8777     abi_ulong *target_mask;
8778     unsigned i, j;
8779 
8780     assert(host_size >= target_size);
8781 
8782     target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
8783     if (!target_mask) {
8784         return -TARGET_EFAULT;
8785     }
8786     memset(host_mask, 0, host_size);
8787 
8788     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8789         unsigned bit = i * target_bits;
8790         abi_ulong val;
8791 
8792         __get_user(val, &target_mask[i]);
8793         for (j = 0; j < target_bits; j++, bit++) {
8794             if (val & (1UL << j)) {
8795                 host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
8796             }
8797         }
8798     }
8799 
8800     unlock_user(target_mask, target_addr, 0);
8801     return 0;
8802 }
8803 
8804 static int host_to_target_cpu_mask(const unsigned long *host_mask,
8805                                    size_t host_size,
8806                                    abi_ulong target_addr,
8807                                    size_t target_size)
8808 {
8809     unsigned target_bits = sizeof(abi_ulong) * 8;
8810     unsigned host_bits = sizeof(*host_mask) * 8;
8811     abi_ulong *target_mask;
8812     unsigned i, j;
8813 
8814     assert(host_size >= target_size);
8815 
8816     target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
8817     if (!target_mask) {
8818         return -TARGET_EFAULT;
8819     }
8820 
8821     for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
8822         unsigned bit = i * target_bits;
8823         abi_ulong val = 0;
8824 
8825         for (j = 0; j < target_bits; j++, bit++) {
8826             if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
8827                 val |= 1UL << j;
8828             }
8829         }
8830         __put_user(val, &target_mask[i]);
8831     }
8832 
8833     unlock_user(target_mask, target_addr, target_size);
8834     return 0;
8835 }
8836 
8837 #ifdef TARGET_NR_getdents
8838 static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
8839 {
8840     g_autofree void *hdirp = NULL;
8841     void *tdirp;
8842     int hlen, hoff, toff;
8843     int hreclen, treclen;
8844     off64_t prev_diroff = 0;
8845 
8846     hdirp = g_try_malloc(count);
8847     if (!hdirp) {
8848         return -TARGET_ENOMEM;
8849     }
8850 
8851 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8852     hlen = sys_getdents(dirfd, hdirp, count);
8853 #else
8854     hlen = sys_getdents64(dirfd, hdirp, count);
8855 #endif
8856 
8857     hlen = get_errno(hlen);
8858     if (is_error(hlen)) {
8859         return hlen;
8860     }
8861 
8862     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8863     if (!tdirp) {
8864         return -TARGET_EFAULT;
8865     }
8866 
8867     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8868 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8869         struct linux_dirent *hde = hdirp + hoff;
8870 #else
8871         struct linux_dirent64 *hde = hdirp + hoff;
8872 #endif
8873         struct target_dirent *tde = tdirp + toff;
8874         int namelen;
8875         uint8_t type;
8876 
8877         namelen = strlen(hde->d_name);
8878         hreclen = hde->d_reclen;
8879         treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
8880         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
8881 
8882         if (toff + treclen > count) {
8883             /*
8884              * If the host struct is smaller than the target struct, or
8885              * requires less alignment and thus packs into less space,
8886              * then the host can return more entries than we can pass
8887              * on to the guest.
8888              */
8889             if (toff == 0) {
8890                 toff = -TARGET_EINVAL; /* result buffer is too small */
8891                 break;
8892             }
8893             /*
8894              * Return what we have, resetting the file pointer to the
8895              * location of the first record not returned.
8896              */
8897             lseek64(dirfd, prev_diroff, SEEK_SET);
8898             break;
8899         }
8900 
8901         prev_diroff = hde->d_off;
8902         tde->d_ino = tswapal(hde->d_ino);
8903         tde->d_off = tswapal(hde->d_off);
8904         tde->d_reclen = tswap16(treclen);
8905         memcpy(tde->d_name, hde->d_name, namelen + 1);
8906 
8907         /*
8908          * The getdents type is in what was formerly a padding byte at the
8909          * end of the structure.
8910          */
8911 #ifdef EMULATE_GETDENTS_WITH_GETDENTS
8912         type = *((uint8_t *)hde + hreclen - 1);
8913 #else
8914         type = hde->d_type;
8915 #endif
8916         *((uint8_t *)tde + treclen - 1) = type;
8917     }
8918 
8919     unlock_user(tdirp, arg2, toff);
8920     return toff;
8921 }
8922 #endif /* TARGET_NR_getdents */
8923 
8924 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
8925 static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
8926 {
8927     g_autofree void *hdirp = NULL;
8928     void *tdirp;
8929     int hlen, hoff, toff;
8930     int hreclen, treclen;
8931     off64_t prev_diroff = 0;
8932 
8933     hdirp = g_try_malloc(count);
8934     if (!hdirp) {
8935         return -TARGET_ENOMEM;
8936     }
8937 
8938     hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
8939     if (is_error(hlen)) {
8940         return hlen;
8941     }
8942 
8943     tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
8944     if (!tdirp) {
8945         return -TARGET_EFAULT;
8946     }
8947 
8948     for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
8949         struct linux_dirent64 *hde = hdirp + hoff;
8950         struct target_dirent64 *tde = tdirp + toff;
8951         int namelen;
8952 
8953         namelen = strlen(hde->d_name) + 1;
8954         hreclen = hde->d_reclen;
8955         treclen = offsetof(struct target_dirent64, d_name) + namelen;
8956         treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
8957 
8958         if (toff + treclen > count) {
8959             /*
8960              * If the host struct is smaller than the target struct, or
8961              * requires less alignment and thus packs into less space,
8962              * then the host can return more entries than we can pass
8963              * on to the guest.
8964              */
8965             if (toff == 0) {
8966                 toff = -TARGET_EINVAL; /* result buffer is too small */
8967                 break;
8968             }
8969             /*
8970              * Return what we have, resetting the file pointer to the
8971              * location of the first record not returned.
8972              */
8973             lseek64(dirfd, prev_diroff, SEEK_SET);
8974             break;
8975         }
8976 
8977         prev_diroff = hde->d_off;
8978         tde->d_ino = tswap64(hde->d_ino);
8979         tde->d_off = tswap64(hde->d_off);
8980         tde->d_reclen = tswap16(treclen);
8981         tde->d_type = hde->d_type;
8982         memcpy(tde->d_name, hde->d_name, namelen);
8983     }
8984 
8985     unlock_user(tdirp, arg2, toff);
8986     return toff;
8987 }
8988 #endif /* TARGET_NR_getdents64 */
8989 
8990 #if defined(TARGET_NR_riscv_hwprobe)
8991 
8992 #define RISCV_HWPROBE_KEY_MVENDORID     0
8993 #define RISCV_HWPROBE_KEY_MARCHID       1
8994 #define RISCV_HWPROBE_KEY_MIMPID        2
8995 
8996 #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
8997 #define     RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
8998 
8999 #define RISCV_HWPROBE_KEY_IMA_EXT_0     4
9000 #define     RISCV_HWPROBE_IMA_FD       (1 << 0)
9001 #define     RISCV_HWPROBE_IMA_C        (1 << 1)
9002 
9003 #define RISCV_HWPROBE_KEY_CPUPERF_0     5
9004 #define     RISCV_HWPROBE_MISALIGNED_UNKNOWN     (0 << 0)
9005 #define     RISCV_HWPROBE_MISALIGNED_EMULATED    (1 << 0)
9006 #define     RISCV_HWPROBE_MISALIGNED_SLOW        (2 << 0)
9007 #define     RISCV_HWPROBE_MISALIGNED_FAST        (3 << 0)
9008 #define     RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
9009 #define     RISCV_HWPROBE_MISALIGNED_MASK        (7 << 0)
9010 
9011 struct riscv_hwprobe {
9012     abi_llong  key;
9013     abi_ullong value;
9014 };
9015 
9016 static void risc_hwprobe_fill_pairs(CPURISCVState *env,
9017                                     struct riscv_hwprobe *pair,
9018                                     size_t pair_count)
9019 {
9020     const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
9021 
9022     for (; pair_count > 0; pair_count--, pair++) {
9023         abi_llong key;
9024         abi_ullong value;
9025         __put_user(0, &pair->value);
9026         __get_user(key, &pair->key);
9027         switch (key) {
9028         case RISCV_HWPROBE_KEY_MVENDORID:
9029             __put_user(cfg->mvendorid, &pair->value);
9030             break;
9031         case RISCV_HWPROBE_KEY_MARCHID:
9032             __put_user(cfg->marchid, &pair->value);
9033             break;
9034         case RISCV_HWPROBE_KEY_MIMPID:
9035             __put_user(cfg->mimpid, &pair->value);
9036             break;
9037         case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
9038             value = riscv_has_ext(env, RVI) &&
9039                     riscv_has_ext(env, RVM) &&
9040                     riscv_has_ext(env, RVA) ?
9041                     RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
9042             __put_user(value, &pair->value);
9043             break;
9044         case RISCV_HWPROBE_KEY_IMA_EXT_0:
9045             value = riscv_has_ext(env, RVF) &&
9046                     riscv_has_ext(env, RVD) ?
9047                     RISCV_HWPROBE_IMA_FD : 0;
9048             value |= riscv_has_ext(env, RVC) ?
9049                      RISCV_HWPROBE_IMA_C : pair->value;
9050             __put_user(value, &pair->value);
9051             break;
9052         case RISCV_HWPROBE_KEY_CPUPERF_0:
9053             __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
9054             break;
9055         default:
9056             __put_user(-1, &pair->key);
9057             break;
9058         }
9059     }
9060 }
9061 
9062 static int cpu_set_valid(abi_long arg3, abi_long arg4)
9063 {
9064     int ret, i, tmp;
9065     size_t host_mask_size, target_mask_size;
9066     unsigned long *host_mask;
9067 
9068     /*
9069      * cpu_set_t represent CPU masks as bit masks of type unsigned long *.
9070      * arg3 contains the cpu count.
9071      */
9072     tmp = (8 * sizeof(abi_ulong));
9073     target_mask_size = ((arg3 + tmp - 1) / tmp) * sizeof(abi_ulong);
9074     host_mask_size = (target_mask_size + (sizeof(*host_mask) - 1)) &
9075                      ~(sizeof(*host_mask) - 1);
9076 
9077     host_mask = alloca(host_mask_size);
9078 
9079     ret = target_to_host_cpu_mask(host_mask, host_mask_size,
9080                                   arg4, target_mask_size);
9081     if (ret != 0) {
9082         return ret;
9083     }
9084 
9085     for (i = 0 ; i < host_mask_size / sizeof(*host_mask); i++) {
9086         if (host_mask[i] != 0) {
9087             return 0;
9088         }
9089     }
9090     return -TARGET_EINVAL;
9091 }
9092 
9093 static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
9094                                  abi_long arg2, abi_long arg3,
9095                                  abi_long arg4, abi_long arg5)
9096 {
9097     int ret;
9098     struct riscv_hwprobe *host_pairs;
9099 
9100     /* flags must be 0 */
9101     if (arg5 != 0) {
9102         return -TARGET_EINVAL;
9103     }
9104 
9105     /* check cpu_set */
9106     if (arg3 != 0) {
9107         ret = cpu_set_valid(arg3, arg4);
9108         if (ret != 0) {
9109             return ret;
9110         }
9111     } else if (arg4 != 0) {
9112         return -TARGET_EINVAL;
9113     }
9114 
9115     /* no pairs */
9116     if (arg2 == 0) {
9117         return 0;
9118     }
9119 
9120     host_pairs = lock_user(VERIFY_WRITE, arg1,
9121                            sizeof(*host_pairs) * (size_t)arg2, 0);
9122     if (host_pairs == NULL) {
9123         return -TARGET_EFAULT;
9124     }
9125     risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
9126     unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
9127     return 0;
9128 }
9129 #endif /* TARGET_NR_riscv_hwprobe */
9130 
9131 #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
9132 _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
9133 #endif
9134 
9135 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9136 #define __NR_sys_open_tree __NR_open_tree
9137 _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
9138           unsigned int, __flags)
9139 #endif
9140 
9141 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9142 #define __NR_sys_move_mount __NR_move_mount
9143 _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
9144            int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
9145 #endif
9146 
9147 /* This is an internal helper for do_syscall so that it is easier
9148  * to have a single return point, so that actions, such as logging
9149  * of syscall results, can be performed.
9150  * All errnos that do_syscall() returns must be -TARGET_<errcode>.
9151  */
9152 static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
9153                             abi_long arg2, abi_long arg3, abi_long arg4,
9154                             abi_long arg5, abi_long arg6, abi_long arg7,
9155                             abi_long arg8)
9156 {
9157     CPUState *cpu = env_cpu(cpu_env);
9158     abi_long ret;
9159 #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
9160     || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
9161     || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
9162     || defined(TARGET_NR_statx)
9163     struct stat st;
9164 #endif
9165 #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
9166     || defined(TARGET_NR_fstatfs)
9167     struct statfs stfs;
9168 #endif
9169     void *p;
9170 
9171     switch(num) {
9172     case TARGET_NR_exit:
9173         /* In old applications this may be used to implement _exit(2).
9174            However in threaded applications it is used for thread termination,
9175            and _exit_group is used for application termination.
9176            Do thread termination if we have more then one thread.  */
9177 
9178         if (block_signals()) {
9179             return -QEMU_ERESTARTSYS;
9180         }
9181 
9182         pthread_mutex_lock(&clone_lock);
9183 
9184         if (CPU_NEXT(first_cpu)) {
9185             TaskState *ts = cpu->opaque;
9186 
9187             if (ts->child_tidptr) {
9188                 put_user_u32(0, ts->child_tidptr);
9189                 do_sys_futex(g2h(cpu, ts->child_tidptr),
9190                              FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
9191             }
9192 
9193             object_unparent(OBJECT(cpu));
9194             object_unref(OBJECT(cpu));
9195             /*
9196              * At this point the CPU should be unrealized and removed
9197              * from cpu lists. We can clean-up the rest of the thread
9198              * data without the lock held.
9199              */
9200 
9201             pthread_mutex_unlock(&clone_lock);
9202 
9203             thread_cpu = NULL;
9204             g_free(ts);
9205             rcu_unregister_thread();
9206             pthread_exit(NULL);
9207         }
9208 
9209         pthread_mutex_unlock(&clone_lock);
9210         preexit_cleanup(cpu_env, arg1);
9211         _exit(arg1);
9212         return 0; /* avoid warning */
9213     case TARGET_NR_read:
9214         if (arg2 == 0 && arg3 == 0) {
9215             return get_errno(safe_read(arg1, 0, 0));
9216         } else {
9217             if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
9218                 return -TARGET_EFAULT;
9219             ret = get_errno(safe_read(arg1, p, arg3));
9220             if (ret >= 0 &&
9221                 fd_trans_host_to_target_data(arg1)) {
9222                 ret = fd_trans_host_to_target_data(arg1)(p, ret);
9223             }
9224             unlock_user(p, arg2, ret);
9225         }
9226         return ret;
9227     case TARGET_NR_write:
9228         if (arg2 == 0 && arg3 == 0) {
9229             return get_errno(safe_write(arg1, 0, 0));
9230         }
9231         if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
9232             return -TARGET_EFAULT;
9233         if (fd_trans_target_to_host_data(arg1)) {
9234             void *copy = g_malloc(arg3);
9235             memcpy(copy, p, arg3);
9236             ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
9237             if (ret >= 0) {
9238                 ret = get_errno(safe_write(arg1, copy, ret));
9239             }
9240             g_free(copy);
9241         } else {
9242             ret = get_errno(safe_write(arg1, p, arg3));
9243         }
9244         unlock_user(p, arg2, 0);
9245         return ret;
9246 
9247 #ifdef TARGET_NR_open
9248     case TARGET_NR_open:
9249         if (!(p = lock_user_string(arg1)))
9250             return -TARGET_EFAULT;
9251         ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
9252                                   target_to_host_bitmask(arg2, fcntl_flags_tbl),
9253                                   arg3, true));
9254         fd_trans_unregister(ret);
9255         unlock_user(p, arg1, 0);
9256         return ret;
9257 #endif
9258     case TARGET_NR_openat:
9259         if (!(p = lock_user_string(arg2)))
9260             return -TARGET_EFAULT;
9261         ret = get_errno(do_guest_openat(cpu_env, arg1, p,
9262                                   target_to_host_bitmask(arg3, fcntl_flags_tbl),
9263                                   arg4, true));
9264         fd_trans_unregister(ret);
9265         unlock_user(p, arg2, 0);
9266         return ret;
9267 #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9268     case TARGET_NR_name_to_handle_at:
9269         ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
9270         return ret;
9271 #endif
9272 #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
9273     case TARGET_NR_open_by_handle_at:
9274         ret = do_open_by_handle_at(arg1, arg2, arg3);
9275         fd_trans_unregister(ret);
9276         return ret;
9277 #endif
9278 #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
9279     case TARGET_NR_pidfd_open:
9280         return get_errno(pidfd_open(arg1, arg2));
9281 #endif
9282 #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
9283     case TARGET_NR_pidfd_send_signal:
9284         {
9285             siginfo_t uinfo, *puinfo;
9286 
9287             if (arg3) {
9288                 p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
9289                 if (!p) {
9290                     return -TARGET_EFAULT;
9291                  }
9292                  target_to_host_siginfo(&uinfo, p);
9293                  unlock_user(p, arg3, 0);
9294                  puinfo = &uinfo;
9295             } else {
9296                  puinfo = NULL;
9297             }
9298             ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
9299                                               puinfo, arg4));
9300         }
9301         return ret;
9302 #endif
9303 #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
9304     case TARGET_NR_pidfd_getfd:
9305         return get_errno(pidfd_getfd(arg1, arg2, arg3));
9306 #endif
9307     case TARGET_NR_close:
9308         fd_trans_unregister(arg1);
9309         return get_errno(close(arg1));
9310 #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
9311     case TARGET_NR_close_range:
9312         ret = get_errno(sys_close_range(arg1, arg2, arg3));
9313         if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
9314             abi_long fd, maxfd;
9315             maxfd = MIN(arg2, target_fd_max);
9316             for (fd = arg1; fd < maxfd; fd++) {
9317                 fd_trans_unregister(fd);
9318             }
9319         }
9320         return ret;
9321 #endif
9322 
9323     case TARGET_NR_brk:
9324         return do_brk(arg1);
9325 #ifdef TARGET_NR_fork
9326     case TARGET_NR_fork:
9327         return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
9328 #endif
9329 #ifdef TARGET_NR_waitpid
9330     case TARGET_NR_waitpid:
9331         {
9332             int status;
9333             ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
9334             if (!is_error(ret) && arg2 && ret
9335                 && put_user_s32(host_to_target_waitstatus(status), arg2))
9336                 return -TARGET_EFAULT;
9337         }
9338         return ret;
9339 #endif
9340 #ifdef TARGET_NR_waitid
9341     case TARGET_NR_waitid:
9342         {
9343             siginfo_t info;
9344             info.si_pid = 0;
9345             ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL));
9346             if (!is_error(ret) && arg3 && info.si_pid != 0) {
9347                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
9348                     return -TARGET_EFAULT;
9349                 host_to_target_siginfo(p, &info);
9350                 unlock_user(p, arg3, sizeof(target_siginfo_t));
9351             }
9352         }
9353         return ret;
9354 #endif
9355 #ifdef TARGET_NR_creat /* not on alpha */
9356     case TARGET_NR_creat:
9357         if (!(p = lock_user_string(arg1)))
9358             return -TARGET_EFAULT;
9359         ret = get_errno(creat(p, arg2));
9360         fd_trans_unregister(ret);
9361         unlock_user(p, arg1, 0);
9362         return ret;
9363 #endif
9364 #ifdef TARGET_NR_link
9365     case TARGET_NR_link:
9366         {
9367             void * p2;
9368             p = lock_user_string(arg1);
9369             p2 = lock_user_string(arg2);
9370             if (!p || !p2)
9371                 ret = -TARGET_EFAULT;
9372             else
9373                 ret = get_errno(link(p, p2));
9374             unlock_user(p2, arg2, 0);
9375             unlock_user(p, arg1, 0);
9376         }
9377         return ret;
9378 #endif
9379 #if defined(TARGET_NR_linkat)
9380     case TARGET_NR_linkat:
9381         {
9382             void * p2 = NULL;
9383             if (!arg2 || !arg4)
9384                 return -TARGET_EFAULT;
9385             p  = lock_user_string(arg2);
9386             p2 = lock_user_string(arg4);
9387             if (!p || !p2)
9388                 ret = -TARGET_EFAULT;
9389             else
9390                 ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
9391             unlock_user(p, arg2, 0);
9392             unlock_user(p2, arg4, 0);
9393         }
9394         return ret;
9395 #endif
9396 #ifdef TARGET_NR_unlink
9397     case TARGET_NR_unlink:
9398         if (!(p = lock_user_string(arg1)))
9399             return -TARGET_EFAULT;
9400         ret = get_errno(unlink(p));
9401         unlock_user(p, arg1, 0);
9402         return ret;
9403 #endif
9404 #if defined(TARGET_NR_unlinkat)
9405     case TARGET_NR_unlinkat:
9406         if (!(p = lock_user_string(arg2)))
9407             return -TARGET_EFAULT;
9408         ret = get_errno(unlinkat(arg1, p, arg3));
9409         unlock_user(p, arg2, 0);
9410         return ret;
9411 #endif
9412     case TARGET_NR_execveat:
9413         return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
9414     case TARGET_NR_execve:
9415         return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
9416     case TARGET_NR_chdir:
9417         if (!(p = lock_user_string(arg1)))
9418             return -TARGET_EFAULT;
9419         ret = get_errno(chdir(p));
9420         unlock_user(p, arg1, 0);
9421         return ret;
9422 #ifdef TARGET_NR_time
9423     case TARGET_NR_time:
9424         {
9425             time_t host_time;
9426             ret = get_errno(time(&host_time));
9427             if (!is_error(ret)
9428                 && arg1
9429                 && put_user_sal(host_time, arg1))
9430                 return -TARGET_EFAULT;
9431         }
9432         return ret;
9433 #endif
9434 #ifdef TARGET_NR_mknod
9435     case TARGET_NR_mknod:
9436         if (!(p = lock_user_string(arg1)))
9437             return -TARGET_EFAULT;
9438         ret = get_errno(mknod(p, arg2, arg3));
9439         unlock_user(p, arg1, 0);
9440         return ret;
9441 #endif
9442 #if defined(TARGET_NR_mknodat)
9443     case TARGET_NR_mknodat:
9444         if (!(p = lock_user_string(arg2)))
9445             return -TARGET_EFAULT;
9446         ret = get_errno(mknodat(arg1, p, arg3, arg4));
9447         unlock_user(p, arg2, 0);
9448         return ret;
9449 #endif
9450 #ifdef TARGET_NR_chmod
9451     case TARGET_NR_chmod:
9452         if (!(p = lock_user_string(arg1)))
9453             return -TARGET_EFAULT;
9454         ret = get_errno(chmod(p, arg2));
9455         unlock_user(p, arg1, 0);
9456         return ret;
9457 #endif
9458 #ifdef TARGET_NR_lseek
9459     case TARGET_NR_lseek:
9460         return get_errno(lseek(arg1, arg2, arg3));
9461 #endif
9462 #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
9463     /* Alpha specific */
9464     case TARGET_NR_getxpid:
9465         cpu_env->ir[IR_A4] = getppid();
9466         return get_errno(getpid());
9467 #endif
9468 #ifdef TARGET_NR_getpid
9469     case TARGET_NR_getpid:
9470         return get_errno(getpid());
9471 #endif
9472     case TARGET_NR_mount:
9473         {
9474             /* need to look at the data field */
9475             void *p2, *p3;
9476 
9477             if (arg1) {
9478                 p = lock_user_string(arg1);
9479                 if (!p) {
9480                     return -TARGET_EFAULT;
9481                 }
9482             } else {
9483                 p = NULL;
9484             }
9485 
9486             p2 = lock_user_string(arg2);
9487             if (!p2) {
9488                 if (arg1) {
9489                     unlock_user(p, arg1, 0);
9490                 }
9491                 return -TARGET_EFAULT;
9492             }
9493 
9494             if (arg3) {
9495                 p3 = lock_user_string(arg3);
9496                 if (!p3) {
9497                     if (arg1) {
9498                         unlock_user(p, arg1, 0);
9499                     }
9500                     unlock_user(p2, arg2, 0);
9501                     return -TARGET_EFAULT;
9502                 }
9503             } else {
9504                 p3 = NULL;
9505             }
9506 
9507             /* FIXME - arg5 should be locked, but it isn't clear how to
9508              * do that since it's not guaranteed to be a NULL-terminated
9509              * string.
9510              */
9511             if (!arg5) {
9512                 ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
9513             } else {
9514                 ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
9515             }
9516             ret = get_errno(ret);
9517 
9518             if (arg1) {
9519                 unlock_user(p, arg1, 0);
9520             }
9521             unlock_user(p2, arg2, 0);
9522             if (arg3) {
9523                 unlock_user(p3, arg3, 0);
9524             }
9525         }
9526         return ret;
9527 #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
9528 #if defined(TARGET_NR_umount)
9529     case TARGET_NR_umount:
9530 #endif
9531 #if defined(TARGET_NR_oldumount)
9532     case TARGET_NR_oldumount:
9533 #endif
9534         if (!(p = lock_user_string(arg1)))
9535             return -TARGET_EFAULT;
9536         ret = get_errno(umount(p));
9537         unlock_user(p, arg1, 0);
9538         return ret;
9539 #endif
9540 #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
9541     case TARGET_NR_move_mount:
9542         {
9543             void *p2, *p4;
9544 
9545             if (!arg2 || !arg4) {
9546                 return -TARGET_EFAULT;
9547             }
9548 
9549             p2 = lock_user_string(arg2);
9550             if (!p2) {
9551                 return -TARGET_EFAULT;
9552             }
9553 
9554             p4 = lock_user_string(arg4);
9555             if (!p4) {
9556                 unlock_user(p2, arg2, 0);
9557                 return -TARGET_EFAULT;
9558             }
9559             ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
9560 
9561             unlock_user(p2, arg2, 0);
9562             unlock_user(p4, arg4, 0);
9563 
9564             return ret;
9565         }
9566 #endif
9567 #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
9568     case TARGET_NR_open_tree:
9569         {
9570             void *p2;
9571             int host_flags;
9572 
9573             if (!arg2) {
9574                 return -TARGET_EFAULT;
9575             }
9576 
9577             p2 = lock_user_string(arg2);
9578             if (!p2) {
9579                 return -TARGET_EFAULT;
9580             }
9581 
9582             host_flags = arg3 & ~TARGET_O_CLOEXEC;
9583             if (arg3 & TARGET_O_CLOEXEC) {
9584                 host_flags |= O_CLOEXEC;
9585             }
9586 
9587             ret = get_errno(sys_open_tree(arg1, p2, host_flags));
9588 
9589             unlock_user(p2, arg2, 0);
9590 
9591             return ret;
9592         }
9593 #endif
9594 #ifdef TARGET_NR_stime /* not on alpha */
9595     case TARGET_NR_stime:
9596         {
9597             struct timespec ts;
9598             ts.tv_nsec = 0;
9599             if (get_user_sal(ts.tv_sec, arg1)) {
9600                 return -TARGET_EFAULT;
9601             }
9602             return get_errno(clock_settime(CLOCK_REALTIME, &ts));
9603         }
9604 #endif
9605 #ifdef TARGET_NR_alarm /* not on alpha */
9606     case TARGET_NR_alarm:
9607         return alarm(arg1);
9608 #endif
9609 #ifdef TARGET_NR_pause /* not on alpha */
9610     case TARGET_NR_pause:
9611         if (!block_signals()) {
9612             sigsuspend(&((TaskState *)cpu->opaque)->signal_mask);
9613         }
9614         return -TARGET_EINTR;
9615 #endif
9616 #ifdef TARGET_NR_utime
9617     case TARGET_NR_utime:
9618         {
9619             struct utimbuf tbuf, *host_tbuf;
9620             struct target_utimbuf *target_tbuf;
9621             if (arg2) {
9622                 if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
9623                     return -TARGET_EFAULT;
9624                 tbuf.actime = tswapal(target_tbuf->actime);
9625                 tbuf.modtime = tswapal(target_tbuf->modtime);
9626                 unlock_user_struct(target_tbuf, arg2, 0);
9627                 host_tbuf = &tbuf;
9628             } else {
9629                 host_tbuf = NULL;
9630             }
9631             if (!(p = lock_user_string(arg1)))
9632                 return -TARGET_EFAULT;
9633             ret = get_errno(utime(p, host_tbuf));
9634             unlock_user(p, arg1, 0);
9635         }
9636         return ret;
9637 #endif
9638 #ifdef TARGET_NR_utimes
9639     case TARGET_NR_utimes:
9640         {
9641             struct timeval *tvp, tv[2];
9642             if (arg2) {
9643                 if (copy_from_user_timeval(&tv[0], arg2)
9644                     || copy_from_user_timeval(&tv[1],
9645                                               arg2 + sizeof(struct target_timeval)))
9646                     return -TARGET_EFAULT;
9647                 tvp = tv;
9648             } else {
9649                 tvp = NULL;
9650             }
9651             if (!(p = lock_user_string(arg1)))
9652                 return -TARGET_EFAULT;
9653             ret = get_errno(utimes(p, tvp));
9654             unlock_user(p, arg1, 0);
9655         }
9656         return ret;
9657 #endif
9658 #if defined(TARGET_NR_futimesat)
9659     case TARGET_NR_futimesat:
9660         {
9661             struct timeval *tvp, tv[2];
9662             if (arg3) {
9663                 if (copy_from_user_timeval(&tv[0], arg3)
9664                     || copy_from_user_timeval(&tv[1],
9665                                               arg3 + sizeof(struct target_timeval)))
9666                     return -TARGET_EFAULT;
9667                 tvp = tv;
9668             } else {
9669                 tvp = NULL;
9670             }
9671             if (!(p = lock_user_string(arg2))) {
9672                 return -TARGET_EFAULT;
9673             }
9674             ret = get_errno(futimesat(arg1, path(p), tvp));
9675             unlock_user(p, arg2, 0);
9676         }
9677         return ret;
9678 #endif
9679 #ifdef TARGET_NR_access
9680     case TARGET_NR_access:
9681         if (!(p = lock_user_string(arg1))) {
9682             return -TARGET_EFAULT;
9683         }
9684         ret = get_errno(access(path(p), arg2));
9685         unlock_user(p, arg1, 0);
9686         return ret;
9687 #endif
9688 #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
9689     case TARGET_NR_faccessat:
9690         if (!(p = lock_user_string(arg2))) {
9691             return -TARGET_EFAULT;
9692         }
9693         ret = get_errno(faccessat(arg1, p, arg3, 0));
9694         unlock_user(p, arg2, 0);
9695         return ret;
9696 #endif
9697 #if defined(TARGET_NR_faccessat2)
9698     case TARGET_NR_faccessat2:
9699         if (!(p = lock_user_string(arg2))) {
9700             return -TARGET_EFAULT;
9701         }
9702         ret = get_errno(faccessat(arg1, p, arg3, arg4));
9703         unlock_user(p, arg2, 0);
9704         return ret;
9705 #endif
9706 #ifdef TARGET_NR_nice /* not on alpha */
9707     case TARGET_NR_nice:
9708         return get_errno(nice(arg1));
9709 #endif
9710     case TARGET_NR_sync:
9711         sync();
9712         return 0;
9713 #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
9714     case TARGET_NR_syncfs:
9715         return get_errno(syncfs(arg1));
9716 #endif
9717     case TARGET_NR_kill:
9718         return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
9719 #ifdef TARGET_NR_rename
9720     case TARGET_NR_rename:
9721         {
9722             void *p2;
9723             p = lock_user_string(arg1);
9724             p2 = lock_user_string(arg2);
9725             if (!p || !p2)
9726                 ret = -TARGET_EFAULT;
9727             else
9728                 ret = get_errno(rename(p, p2));
9729             unlock_user(p2, arg2, 0);
9730             unlock_user(p, arg1, 0);
9731         }
9732         return ret;
9733 #endif
9734 #if defined(TARGET_NR_renameat)
9735     case TARGET_NR_renameat:
9736         {
9737             void *p2;
9738             p  = lock_user_string(arg2);
9739             p2 = lock_user_string(arg4);
9740             if (!p || !p2)
9741                 ret = -TARGET_EFAULT;
9742             else
9743                 ret = get_errno(renameat(arg1, p, arg3, p2));
9744             unlock_user(p2, arg4, 0);
9745             unlock_user(p, arg2, 0);
9746         }
9747         return ret;
9748 #endif
9749 #if defined(TARGET_NR_renameat2)
9750     case TARGET_NR_renameat2:
9751         {
9752             void *p2;
9753             p  = lock_user_string(arg2);
9754             p2 = lock_user_string(arg4);
9755             if (!p || !p2) {
9756                 ret = -TARGET_EFAULT;
9757             } else {
9758                 ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
9759             }
9760             unlock_user(p2, arg4, 0);
9761             unlock_user(p, arg2, 0);
9762         }
9763         return ret;
9764 #endif
9765 #ifdef TARGET_NR_mkdir
9766     case TARGET_NR_mkdir:
9767         if (!(p = lock_user_string(arg1)))
9768             return -TARGET_EFAULT;
9769         ret = get_errno(mkdir(p, arg2));
9770         unlock_user(p, arg1, 0);
9771         return ret;
9772 #endif
9773 #if defined(TARGET_NR_mkdirat)
9774     case TARGET_NR_mkdirat:
9775         if (!(p = lock_user_string(arg2)))
9776             return -TARGET_EFAULT;
9777         ret = get_errno(mkdirat(arg1, p, arg3));
9778         unlock_user(p, arg2, 0);
9779         return ret;
9780 #endif
9781 #ifdef TARGET_NR_rmdir
9782     case TARGET_NR_rmdir:
9783         if (!(p = lock_user_string(arg1)))
9784             return -TARGET_EFAULT;
9785         ret = get_errno(rmdir(p));
9786         unlock_user(p, arg1, 0);
9787         return ret;
9788 #endif
9789     case TARGET_NR_dup:
9790         ret = get_errno(dup(arg1));
9791         if (ret >= 0) {
9792             fd_trans_dup(arg1, ret);
9793         }
9794         return ret;
9795 #ifdef TARGET_NR_pipe
9796     case TARGET_NR_pipe:
9797         return do_pipe(cpu_env, arg1, 0, 0);
9798 #endif
9799 #ifdef TARGET_NR_pipe2
9800     case TARGET_NR_pipe2:
9801         return do_pipe(cpu_env, arg1,
9802                        target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
9803 #endif
9804     case TARGET_NR_times:
9805         {
9806             struct target_tms *tmsp;
9807             struct tms tms;
9808             ret = get_errno(times(&tms));
9809             if (arg1) {
9810                 tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
9811                 if (!tmsp)
9812                     return -TARGET_EFAULT;
9813                 tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
9814                 tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
9815                 tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
9816                 tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
9817             }
9818             if (!is_error(ret))
9819                 ret = host_to_target_clock_t(ret);
9820         }
9821         return ret;
9822     case TARGET_NR_acct:
9823         if (arg1 == 0) {
9824             ret = get_errno(acct(NULL));
9825         } else {
9826             if (!(p = lock_user_string(arg1))) {
9827                 return -TARGET_EFAULT;
9828             }
9829             ret = get_errno(acct(path(p)));
9830             unlock_user(p, arg1, 0);
9831         }
9832         return ret;
9833 #ifdef TARGET_NR_umount2
9834     case TARGET_NR_umount2:
9835         if (!(p = lock_user_string(arg1)))
9836             return -TARGET_EFAULT;
9837         ret = get_errno(umount2(p, arg2));
9838         unlock_user(p, arg1, 0);
9839         return ret;
9840 #endif
9841     case TARGET_NR_ioctl:
9842         return do_ioctl(arg1, arg2, arg3);
9843 #ifdef TARGET_NR_fcntl
9844     case TARGET_NR_fcntl:
9845         return do_fcntl(arg1, arg2, arg3);
9846 #endif
9847     case TARGET_NR_setpgid:
9848         return get_errno(setpgid(arg1, arg2));
9849     case TARGET_NR_umask:
9850         return get_errno(umask(arg1));
9851     case TARGET_NR_chroot:
9852         if (!(p = lock_user_string(arg1)))
9853             return -TARGET_EFAULT;
9854         ret = get_errno(chroot(p));
9855         unlock_user(p, arg1, 0);
9856         return ret;
9857 #ifdef TARGET_NR_dup2
9858     case TARGET_NR_dup2:
9859         ret = get_errno(dup2(arg1, arg2));
9860         if (ret >= 0) {
9861             fd_trans_dup(arg1, arg2);
9862         }
9863         return ret;
9864 #endif
9865 #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
9866     case TARGET_NR_dup3:
9867     {
9868         int host_flags;
9869 
9870         if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
9871             return -EINVAL;
9872         }
9873         host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
9874         ret = get_errno(dup3(arg1, arg2, host_flags));
9875         if (ret >= 0) {
9876             fd_trans_dup(arg1, arg2);
9877         }
9878         return ret;
9879     }
9880 #endif
9881 #ifdef TARGET_NR_getppid /* not on alpha */
9882     case TARGET_NR_getppid:
9883         return get_errno(getppid());
9884 #endif
9885 #ifdef TARGET_NR_getpgrp
9886     case TARGET_NR_getpgrp:
9887         return get_errno(getpgrp());
9888 #endif
9889     case TARGET_NR_setsid:
9890         return get_errno(setsid());
9891 #ifdef TARGET_NR_sigaction
9892     case TARGET_NR_sigaction:
9893         {
9894 #if defined(TARGET_MIPS)
9895 	    struct target_sigaction act, oact, *pact, *old_act;
9896 
9897 	    if (arg2) {
9898                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9899                     return -TARGET_EFAULT;
9900 		act._sa_handler = old_act->_sa_handler;
9901 		target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
9902 		act.sa_flags = old_act->sa_flags;
9903 		unlock_user_struct(old_act, arg2, 0);
9904 		pact = &act;
9905 	    } else {
9906 		pact = NULL;
9907 	    }
9908 
9909         ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9910 
9911 	    if (!is_error(ret) && arg3) {
9912                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9913                     return -TARGET_EFAULT;
9914 		old_act->_sa_handler = oact._sa_handler;
9915 		old_act->sa_flags = oact.sa_flags;
9916 		old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
9917 		old_act->sa_mask.sig[1] = 0;
9918 		old_act->sa_mask.sig[2] = 0;
9919 		old_act->sa_mask.sig[3] = 0;
9920 		unlock_user_struct(old_act, arg3, 1);
9921 	    }
9922 #else
9923             struct target_old_sigaction *old_act;
9924             struct target_sigaction act, oact, *pact;
9925             if (arg2) {
9926                 if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
9927                     return -TARGET_EFAULT;
9928                 act._sa_handler = old_act->_sa_handler;
9929                 target_siginitset(&act.sa_mask, old_act->sa_mask);
9930                 act.sa_flags = old_act->sa_flags;
9931 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9932                 act.sa_restorer = old_act->sa_restorer;
9933 #endif
9934                 unlock_user_struct(old_act, arg2, 0);
9935                 pact = &act;
9936             } else {
9937                 pact = NULL;
9938             }
9939             ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
9940             if (!is_error(ret) && arg3) {
9941                 if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
9942                     return -TARGET_EFAULT;
9943                 old_act->_sa_handler = oact._sa_handler;
9944                 old_act->sa_mask = oact.sa_mask.sig[0];
9945                 old_act->sa_flags = oact.sa_flags;
9946 #ifdef TARGET_ARCH_HAS_SA_RESTORER
9947                 old_act->sa_restorer = oact.sa_restorer;
9948 #endif
9949                 unlock_user_struct(old_act, arg3, 1);
9950             }
9951 #endif
9952         }
9953         return ret;
9954 #endif
9955     case TARGET_NR_rt_sigaction:
9956         {
9957             /*
9958              * For Alpha and SPARC this is a 5 argument syscall, with
9959              * a 'restorer' parameter which must be copied into the
9960              * sa_restorer field of the sigaction struct.
9961              * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
9962              * and arg5 is the sigsetsize.
9963              */
9964 #if defined(TARGET_ALPHA)
9965             target_ulong sigsetsize = arg4;
9966             target_ulong restorer = arg5;
9967 #elif defined(TARGET_SPARC)
9968             target_ulong restorer = arg4;
9969             target_ulong sigsetsize = arg5;
9970 #else
9971             target_ulong sigsetsize = arg4;
9972             target_ulong restorer = 0;
9973 #endif
9974             struct target_sigaction *act = NULL;
9975             struct target_sigaction *oact = NULL;
9976 
9977             if (sigsetsize != sizeof(target_sigset_t)) {
9978                 return -TARGET_EINVAL;
9979             }
9980             if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
9981                 return -TARGET_EFAULT;
9982             }
9983             if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
9984                 ret = -TARGET_EFAULT;
9985             } else {
9986                 ret = get_errno(do_sigaction(arg1, act, oact, restorer));
9987                 if (oact) {
9988                     unlock_user_struct(oact, arg3, 1);
9989                 }
9990             }
9991             if (act) {
9992                 unlock_user_struct(act, arg2, 0);
9993             }
9994         }
9995         return ret;
9996 #ifdef TARGET_NR_sgetmask /* not on alpha */
9997     case TARGET_NR_sgetmask:
9998         {
9999             sigset_t cur_set;
10000             abi_ulong target_set;
10001             ret = do_sigprocmask(0, NULL, &cur_set);
10002             if (!ret) {
10003                 host_to_target_old_sigset(&target_set, &cur_set);
10004                 ret = target_set;
10005             }
10006         }
10007         return ret;
10008 #endif
10009 #ifdef TARGET_NR_ssetmask /* not on alpha */
10010     case TARGET_NR_ssetmask:
10011         {
10012             sigset_t set, oset;
10013             abi_ulong target_set = arg1;
10014             target_to_host_old_sigset(&set, &target_set);
10015             ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
10016             if (!ret) {
10017                 host_to_target_old_sigset(&target_set, &oset);
10018                 ret = target_set;
10019             }
10020         }
10021         return ret;
10022 #endif
10023 #ifdef TARGET_NR_sigprocmask
10024     case TARGET_NR_sigprocmask:
10025         {
10026 #if defined(TARGET_ALPHA)
10027             sigset_t set, oldset;
10028             abi_ulong mask;
10029             int how;
10030 
10031             switch (arg1) {
10032             case TARGET_SIG_BLOCK:
10033                 how = SIG_BLOCK;
10034                 break;
10035             case TARGET_SIG_UNBLOCK:
10036                 how = SIG_UNBLOCK;
10037                 break;
10038             case TARGET_SIG_SETMASK:
10039                 how = SIG_SETMASK;
10040                 break;
10041             default:
10042                 return -TARGET_EINVAL;
10043             }
10044             mask = arg2;
10045             target_to_host_old_sigset(&set, &mask);
10046 
10047             ret = do_sigprocmask(how, &set, &oldset);
10048             if (!is_error(ret)) {
10049                 host_to_target_old_sigset(&mask, &oldset);
10050                 ret = mask;
10051                 cpu_env->ir[IR_V0] = 0; /* force no error */
10052             }
10053 #else
10054             sigset_t set, oldset, *set_ptr;
10055             int how;
10056 
10057             if (arg2) {
10058                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10059                 if (!p) {
10060                     return -TARGET_EFAULT;
10061                 }
10062                 target_to_host_old_sigset(&set, p);
10063                 unlock_user(p, arg2, 0);
10064                 set_ptr = &set;
10065                 switch (arg1) {
10066                 case TARGET_SIG_BLOCK:
10067                     how = SIG_BLOCK;
10068                     break;
10069                 case TARGET_SIG_UNBLOCK:
10070                     how = SIG_UNBLOCK;
10071                     break;
10072                 case TARGET_SIG_SETMASK:
10073                     how = SIG_SETMASK;
10074                     break;
10075                 default:
10076                     return -TARGET_EINVAL;
10077                 }
10078             } else {
10079                 how = 0;
10080                 set_ptr = NULL;
10081             }
10082             ret = do_sigprocmask(how, set_ptr, &oldset);
10083             if (!is_error(ret) && arg3) {
10084                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10085                     return -TARGET_EFAULT;
10086                 host_to_target_old_sigset(p, &oldset);
10087                 unlock_user(p, arg3, sizeof(target_sigset_t));
10088             }
10089 #endif
10090         }
10091         return ret;
10092 #endif
10093     case TARGET_NR_rt_sigprocmask:
10094         {
10095             int how = arg1;
10096             sigset_t set, oldset, *set_ptr;
10097 
10098             if (arg4 != sizeof(target_sigset_t)) {
10099                 return -TARGET_EINVAL;
10100             }
10101 
10102             if (arg2) {
10103                 p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
10104                 if (!p) {
10105                     return -TARGET_EFAULT;
10106                 }
10107                 target_to_host_sigset(&set, p);
10108                 unlock_user(p, arg2, 0);
10109                 set_ptr = &set;
10110                 switch(how) {
10111                 case TARGET_SIG_BLOCK:
10112                     how = SIG_BLOCK;
10113                     break;
10114                 case TARGET_SIG_UNBLOCK:
10115                     how = SIG_UNBLOCK;
10116                     break;
10117                 case TARGET_SIG_SETMASK:
10118                     how = SIG_SETMASK;
10119                     break;
10120                 default:
10121                     return -TARGET_EINVAL;
10122                 }
10123             } else {
10124                 how = 0;
10125                 set_ptr = NULL;
10126             }
10127             ret = do_sigprocmask(how, set_ptr, &oldset);
10128             if (!is_error(ret) && arg3) {
10129                 if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
10130                     return -TARGET_EFAULT;
10131                 host_to_target_sigset(p, &oldset);
10132                 unlock_user(p, arg3, sizeof(target_sigset_t));
10133             }
10134         }
10135         return ret;
10136 #ifdef TARGET_NR_sigpending
10137     case TARGET_NR_sigpending:
10138         {
10139             sigset_t set;
10140             ret = get_errno(sigpending(&set));
10141             if (!is_error(ret)) {
10142                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10143                     return -TARGET_EFAULT;
10144                 host_to_target_old_sigset(p, &set);
10145                 unlock_user(p, arg1, sizeof(target_sigset_t));
10146             }
10147         }
10148         return ret;
10149 #endif
10150     case TARGET_NR_rt_sigpending:
10151         {
10152             sigset_t set;
10153 
10154             /* Yes, this check is >, not != like most. We follow the kernel's
10155              * logic and it does it like this because it implements
10156              * NR_sigpending through the same code path, and in that case
10157              * the old_sigset_t is smaller in size.
10158              */
10159             if (arg2 > sizeof(target_sigset_t)) {
10160                 return -TARGET_EINVAL;
10161             }
10162 
10163             ret = get_errno(sigpending(&set));
10164             if (!is_error(ret)) {
10165                 if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
10166                     return -TARGET_EFAULT;
10167                 host_to_target_sigset(p, &set);
10168                 unlock_user(p, arg1, sizeof(target_sigset_t));
10169             }
10170         }
10171         return ret;
10172 #ifdef TARGET_NR_sigsuspend
10173     case TARGET_NR_sigsuspend:
10174         {
10175             sigset_t *set;
10176 
10177 #if defined(TARGET_ALPHA)
10178             TaskState *ts = cpu->opaque;
10179             /* target_to_host_old_sigset will bswap back */
10180             abi_ulong mask = tswapal(arg1);
10181             set = &ts->sigsuspend_mask;
10182             target_to_host_old_sigset(set, &mask);
10183 #else
10184             ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
10185             if (ret != 0) {
10186                 return ret;
10187             }
10188 #endif
10189             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10190             finish_sigsuspend_mask(ret);
10191         }
10192         return ret;
10193 #endif
10194     case TARGET_NR_rt_sigsuspend:
10195         {
10196             sigset_t *set;
10197 
10198             ret = process_sigsuspend_mask(&set, arg1, arg2);
10199             if (ret != 0) {
10200                 return ret;
10201             }
10202             ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
10203             finish_sigsuspend_mask(ret);
10204         }
10205         return ret;
10206 #ifdef TARGET_NR_rt_sigtimedwait
10207     case TARGET_NR_rt_sigtimedwait:
10208         {
10209             sigset_t set;
10210             struct timespec uts, *puts;
10211             siginfo_t uinfo;
10212 
10213             if (arg4 != sizeof(target_sigset_t)) {
10214                 return -TARGET_EINVAL;
10215             }
10216 
10217             if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
10218                 return -TARGET_EFAULT;
10219             target_to_host_sigset(&set, p);
10220             unlock_user(p, arg1, 0);
10221             if (arg3) {
10222                 puts = &uts;
10223                 if (target_to_host_timespec(puts, arg3)) {
10224                     return -TARGET_EFAULT;
10225                 }
10226             } else {
10227                 puts = NULL;
10228             }
10229             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10230                                                  SIGSET_T_SIZE));
10231             if (!is_error(ret)) {
10232                 if (arg2) {
10233                     p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
10234                                   0);
10235                     if (!p) {
10236                         return -TARGET_EFAULT;
10237                     }
10238                     host_to_target_siginfo(p, &uinfo);
10239                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10240                 }
10241                 ret = host_to_target_signal(ret);
10242             }
10243         }
10244         return ret;
10245 #endif
10246 #ifdef TARGET_NR_rt_sigtimedwait_time64
10247     case TARGET_NR_rt_sigtimedwait_time64:
10248         {
10249             sigset_t set;
10250             struct timespec uts, *puts;
10251             siginfo_t uinfo;
10252 
10253             if (arg4 != sizeof(target_sigset_t)) {
10254                 return -TARGET_EINVAL;
10255             }
10256 
10257             p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
10258             if (!p) {
10259                 return -TARGET_EFAULT;
10260             }
10261             target_to_host_sigset(&set, p);
10262             unlock_user(p, arg1, 0);
10263             if (arg3) {
10264                 puts = &uts;
10265                 if (target_to_host_timespec64(puts, arg3)) {
10266                     return -TARGET_EFAULT;
10267                 }
10268             } else {
10269                 puts = NULL;
10270             }
10271             ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
10272                                                  SIGSET_T_SIZE));
10273             if (!is_error(ret)) {
10274                 if (arg2) {
10275                     p = lock_user(VERIFY_WRITE, arg2,
10276                                   sizeof(target_siginfo_t), 0);
10277                     if (!p) {
10278                         return -TARGET_EFAULT;
10279                     }
10280                     host_to_target_siginfo(p, &uinfo);
10281                     unlock_user(p, arg2, sizeof(target_siginfo_t));
10282                 }
10283                 ret = host_to_target_signal(ret);
10284             }
10285         }
10286         return ret;
10287 #endif
10288     case TARGET_NR_rt_sigqueueinfo:
10289         {
10290             siginfo_t uinfo;
10291 
10292             p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
10293             if (!p) {
10294                 return -TARGET_EFAULT;
10295             }
10296             target_to_host_siginfo(&uinfo, p);
10297             unlock_user(p, arg3, 0);
10298             ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
10299         }
10300         return ret;
10301     case TARGET_NR_rt_tgsigqueueinfo:
10302         {
10303             siginfo_t uinfo;
10304 
10305             p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
10306             if (!p) {
10307                 return -TARGET_EFAULT;
10308             }
10309             target_to_host_siginfo(&uinfo, p);
10310             unlock_user(p, arg4, 0);
10311             ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
10312         }
10313         return ret;
10314 #ifdef TARGET_NR_sigreturn
10315     case TARGET_NR_sigreturn:
10316         if (block_signals()) {
10317             return -QEMU_ERESTARTSYS;
10318         }
10319         return do_sigreturn(cpu_env);
10320 #endif
10321     case TARGET_NR_rt_sigreturn:
10322         if (block_signals()) {
10323             return -QEMU_ERESTARTSYS;
10324         }
10325         return do_rt_sigreturn(cpu_env);
10326     case TARGET_NR_sethostname:
10327         if (!(p = lock_user_string(arg1)))
10328             return -TARGET_EFAULT;
10329         ret = get_errno(sethostname(p, arg2));
10330         unlock_user(p, arg1, 0);
10331         return ret;
10332 #ifdef TARGET_NR_setrlimit
10333     case TARGET_NR_setrlimit:
10334         {
10335             int resource = target_to_host_resource(arg1);
10336             struct target_rlimit *target_rlim;
10337             struct rlimit rlim;
10338             if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
10339                 return -TARGET_EFAULT;
10340             rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
10341             rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
10342             unlock_user_struct(target_rlim, arg2, 0);
10343             /*
10344              * If we just passed through resource limit settings for memory then
10345              * they would also apply to QEMU's own allocations, and QEMU will
10346              * crash or hang or die if its allocations fail. Ideally we would
10347              * track the guest allocations in QEMU and apply the limits ourselves.
10348              * For now, just tell the guest the call succeeded but don't actually
10349              * limit anything.
10350              */
10351             if (resource != RLIMIT_AS &&
10352                 resource != RLIMIT_DATA &&
10353                 resource != RLIMIT_STACK) {
10354                 return get_errno(setrlimit(resource, &rlim));
10355             } else {
10356                 return 0;
10357             }
10358         }
10359 #endif
10360 #ifdef TARGET_NR_getrlimit
10361     case TARGET_NR_getrlimit:
10362         {
10363             int resource = target_to_host_resource(arg1);
10364             struct target_rlimit *target_rlim;
10365             struct rlimit rlim;
10366 
10367             ret = get_errno(getrlimit(resource, &rlim));
10368             if (!is_error(ret)) {
10369                 if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
10370                     return -TARGET_EFAULT;
10371                 target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
10372                 target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
10373                 unlock_user_struct(target_rlim, arg2, 1);
10374             }
10375         }
10376         return ret;
10377 #endif
10378     case TARGET_NR_getrusage:
10379         {
10380             struct rusage rusage;
10381             ret = get_errno(getrusage(arg1, &rusage));
10382             if (!is_error(ret)) {
10383                 ret = host_to_target_rusage(arg2, &rusage);
10384             }
10385         }
10386         return ret;
10387 #if defined(TARGET_NR_gettimeofday)
10388     case TARGET_NR_gettimeofday:
10389         {
10390             struct timeval tv;
10391             struct timezone tz;
10392 
10393             ret = get_errno(gettimeofday(&tv, &tz));
10394             if (!is_error(ret)) {
10395                 if (arg1 && copy_to_user_timeval(arg1, &tv)) {
10396                     return -TARGET_EFAULT;
10397                 }
10398                 if (arg2 && copy_to_user_timezone(arg2, &tz)) {
10399                     return -TARGET_EFAULT;
10400                 }
10401             }
10402         }
10403         return ret;
10404 #endif
10405 #if defined(TARGET_NR_settimeofday)
10406     case TARGET_NR_settimeofday:
10407         {
10408             struct timeval tv, *ptv = NULL;
10409             struct timezone tz, *ptz = NULL;
10410 
10411             if (arg1) {
10412                 if (copy_from_user_timeval(&tv, arg1)) {
10413                     return -TARGET_EFAULT;
10414                 }
10415                 ptv = &tv;
10416             }
10417 
10418             if (arg2) {
10419                 if (copy_from_user_timezone(&tz, arg2)) {
10420                     return -TARGET_EFAULT;
10421                 }
10422                 ptz = &tz;
10423             }
10424 
10425             return get_errno(settimeofday(ptv, ptz));
10426         }
10427 #endif
10428 #if defined(TARGET_NR_select)
10429     case TARGET_NR_select:
10430 #if defined(TARGET_WANT_NI_OLD_SELECT)
10431         /* some architectures used to have old_select here
10432          * but now ENOSYS it.
10433          */
10434         ret = -TARGET_ENOSYS;
10435 #elif defined(TARGET_WANT_OLD_SYS_SELECT)
10436         ret = do_old_select(arg1);
10437 #else
10438         ret = do_select(arg1, arg2, arg3, arg4, arg5);
10439 #endif
10440         return ret;
10441 #endif
10442 #ifdef TARGET_NR_pselect6
10443     case TARGET_NR_pselect6:
10444         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
10445 #endif
10446 #ifdef TARGET_NR_pselect6_time64
10447     case TARGET_NR_pselect6_time64:
10448         return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
10449 #endif
10450 #ifdef TARGET_NR_symlink
10451     case TARGET_NR_symlink:
10452         {
10453             void *p2;
10454             p = lock_user_string(arg1);
10455             p2 = lock_user_string(arg2);
10456             if (!p || !p2)
10457                 ret = -TARGET_EFAULT;
10458             else
10459                 ret = get_errno(symlink(p, p2));
10460             unlock_user(p2, arg2, 0);
10461             unlock_user(p, arg1, 0);
10462         }
10463         return ret;
10464 #endif
10465 #if defined(TARGET_NR_symlinkat)
10466     case TARGET_NR_symlinkat:
10467         {
10468             void *p2;
10469             p  = lock_user_string(arg1);
10470             p2 = lock_user_string(arg3);
10471             if (!p || !p2)
10472                 ret = -TARGET_EFAULT;
10473             else
10474                 ret = get_errno(symlinkat(p, arg2, p2));
10475             unlock_user(p2, arg3, 0);
10476             unlock_user(p, arg1, 0);
10477         }
10478         return ret;
10479 #endif
10480 #ifdef TARGET_NR_readlink
10481     case TARGET_NR_readlink:
10482         {
10483             void *p2;
10484             p = lock_user_string(arg1);
10485             p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10486             ret = get_errno(do_guest_readlink(p, p2, arg3));
10487             unlock_user(p2, arg2, ret);
10488             unlock_user(p, arg1, 0);
10489         }
10490         return ret;
10491 #endif
10492 #if defined(TARGET_NR_readlinkat)
10493     case TARGET_NR_readlinkat:
10494         {
10495             void *p2;
10496             p  = lock_user_string(arg2);
10497             p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
10498             if (!p || !p2) {
10499                 ret = -TARGET_EFAULT;
10500             } else if (!arg4) {
10501                 /* Short circuit this for the magic exe check. */
10502                 ret = -TARGET_EINVAL;
10503             } else if (is_proc_myself((const char *)p, "exe")) {
10504                 /*
10505                  * Don't worry about sign mismatch as earlier mapping
10506                  * logic would have thrown a bad address error.
10507                  */
10508                 ret = MIN(strlen(exec_path), arg4);
10509                 /* We cannot NUL terminate the string. */
10510                 memcpy(p2, exec_path, ret);
10511             } else {
10512                 ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
10513             }
10514             unlock_user(p2, arg3, ret);
10515             unlock_user(p, arg2, 0);
10516         }
10517         return ret;
10518 #endif
10519 #ifdef TARGET_NR_swapon
10520     case TARGET_NR_swapon:
10521         if (!(p = lock_user_string(arg1)))
10522             return -TARGET_EFAULT;
10523         ret = get_errno(swapon(p, arg2));
10524         unlock_user(p, arg1, 0);
10525         return ret;
10526 #endif
10527     case TARGET_NR_reboot:
10528         if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
10529            /* arg4 must be ignored in all other cases */
10530            p = lock_user_string(arg4);
10531            if (!p) {
10532                return -TARGET_EFAULT;
10533            }
10534            ret = get_errno(reboot(arg1, arg2, arg3, p));
10535            unlock_user(p, arg4, 0);
10536         } else {
10537            ret = get_errno(reboot(arg1, arg2, arg3, NULL));
10538         }
10539         return ret;
10540 #ifdef TARGET_NR_mmap
10541     case TARGET_NR_mmap:
10542 #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
10543     (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
10544     defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
10545     || defined(TARGET_S390X)
10546         {
10547             abi_ulong *v;
10548             abi_ulong v1, v2, v3, v4, v5, v6;
10549             if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
10550                 return -TARGET_EFAULT;
10551             v1 = tswapal(v[0]);
10552             v2 = tswapal(v[1]);
10553             v3 = tswapal(v[2]);
10554             v4 = tswapal(v[3]);
10555             v5 = tswapal(v[4]);
10556             v6 = tswapal(v[5]);
10557             unlock_user(v, arg1, 0);
10558             ret = get_errno(target_mmap(v1, v2, v3,
10559                                         target_to_host_bitmask(v4, mmap_flags_tbl),
10560                                         v5, v6));
10561         }
10562 #else
10563         /* mmap pointers are always untagged */
10564         ret = get_errno(target_mmap(arg1, arg2, arg3,
10565                                     target_to_host_bitmask(arg4, mmap_flags_tbl),
10566                                     arg5,
10567                                     arg6));
10568 #endif
10569         return ret;
10570 #endif
10571 #ifdef TARGET_NR_mmap2
10572     case TARGET_NR_mmap2:
10573 #ifndef MMAP_SHIFT
10574 #define MMAP_SHIFT 12
10575 #endif
10576         ret = target_mmap(arg1, arg2, arg3,
10577                           target_to_host_bitmask(arg4, mmap_flags_tbl),
10578                           arg5, arg6 << MMAP_SHIFT);
10579         return get_errno(ret);
10580 #endif
10581     case TARGET_NR_munmap:
10582         arg1 = cpu_untagged_addr(cpu, arg1);
10583         return get_errno(target_munmap(arg1, arg2));
10584     case TARGET_NR_mprotect:
10585         arg1 = cpu_untagged_addr(cpu, arg1);
10586         {
10587             TaskState *ts = cpu->opaque;
10588             /* Special hack to detect libc making the stack executable.  */
10589             if ((arg3 & PROT_GROWSDOWN)
10590                 && arg1 >= ts->info->stack_limit
10591                 && arg1 <= ts->info->start_stack) {
10592                 arg3 &= ~PROT_GROWSDOWN;
10593                 arg2 = arg2 + arg1 - ts->info->stack_limit;
10594                 arg1 = ts->info->stack_limit;
10595             }
10596         }
10597         return get_errno(target_mprotect(arg1, arg2, arg3));
10598 #ifdef TARGET_NR_mremap
10599     case TARGET_NR_mremap:
10600         arg1 = cpu_untagged_addr(cpu, arg1);
10601         /* mremap new_addr (arg5) is always untagged */
10602         return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
10603 #endif
10604         /* ??? msync/mlock/munlock are broken for softmmu.  */
10605 #ifdef TARGET_NR_msync
10606     case TARGET_NR_msync:
10607         return get_errno(msync(g2h(cpu, arg1), arg2,
10608                                target_to_host_msync_arg(arg3)));
10609 #endif
10610 #ifdef TARGET_NR_mlock
10611     case TARGET_NR_mlock:
10612         return get_errno(mlock(g2h(cpu, arg1), arg2));
10613 #endif
10614 #ifdef TARGET_NR_munlock
10615     case TARGET_NR_munlock:
10616         return get_errno(munlock(g2h(cpu, arg1), arg2));
10617 #endif
10618 #ifdef TARGET_NR_mlockall
10619     case TARGET_NR_mlockall:
10620         return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
10621 #endif
10622 #ifdef TARGET_NR_munlockall
10623     case TARGET_NR_munlockall:
10624         return get_errno(munlockall());
10625 #endif
10626 #ifdef TARGET_NR_truncate
10627     case TARGET_NR_truncate:
10628         if (!(p = lock_user_string(arg1)))
10629             return -TARGET_EFAULT;
10630         ret = get_errno(truncate(p, arg2));
10631         unlock_user(p, arg1, 0);
10632         return ret;
10633 #endif
10634 #ifdef TARGET_NR_ftruncate
10635     case TARGET_NR_ftruncate:
10636         return get_errno(ftruncate(arg1, arg2));
10637 #endif
10638     case TARGET_NR_fchmod:
10639         return get_errno(fchmod(arg1, arg2));
10640 #if defined(TARGET_NR_fchmodat)
10641     case TARGET_NR_fchmodat:
10642         if (!(p = lock_user_string(arg2)))
10643             return -TARGET_EFAULT;
10644         ret = get_errno(fchmodat(arg1, p, arg3, 0));
10645         unlock_user(p, arg2, 0);
10646         return ret;
10647 #endif
10648     case TARGET_NR_getpriority:
10649         /* Note that negative values are valid for getpriority, so we must
10650            differentiate based on errno settings.  */
10651         errno = 0;
10652         ret = getpriority(arg1, arg2);
10653         if (ret == -1 && errno != 0) {
10654             return -host_to_target_errno(errno);
10655         }
10656 #ifdef TARGET_ALPHA
10657         /* Return value is the unbiased priority.  Signal no error.  */
10658         cpu_env->ir[IR_V0] = 0;
10659 #else
10660         /* Return value is a biased priority to avoid negative numbers.  */
10661         ret = 20 - ret;
10662 #endif
10663         return ret;
10664     case TARGET_NR_setpriority:
10665         return get_errno(setpriority(arg1, arg2, arg3));
10666 #ifdef TARGET_NR_statfs
10667     case TARGET_NR_statfs:
10668         if (!(p = lock_user_string(arg1))) {
10669             return -TARGET_EFAULT;
10670         }
10671         ret = get_errno(statfs(path(p), &stfs));
10672         unlock_user(p, arg1, 0);
10673     convert_statfs:
10674         if (!is_error(ret)) {
10675             struct target_statfs *target_stfs;
10676 
10677             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
10678                 return -TARGET_EFAULT;
10679             __put_user(stfs.f_type, &target_stfs->f_type);
10680             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10681             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10682             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10683             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10684             __put_user(stfs.f_files, &target_stfs->f_files);
10685             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10686             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10687             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10688             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10689             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10690 #ifdef _STATFS_F_FLAGS
10691             __put_user(stfs.f_flags, &target_stfs->f_flags);
10692 #else
10693             __put_user(0, &target_stfs->f_flags);
10694 #endif
10695             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10696             unlock_user_struct(target_stfs, arg2, 1);
10697         }
10698         return ret;
10699 #endif
10700 #ifdef TARGET_NR_fstatfs
10701     case TARGET_NR_fstatfs:
10702         ret = get_errno(fstatfs(arg1, &stfs));
10703         goto convert_statfs;
10704 #endif
10705 #ifdef TARGET_NR_statfs64
10706     case TARGET_NR_statfs64:
10707         if (!(p = lock_user_string(arg1))) {
10708             return -TARGET_EFAULT;
10709         }
10710         ret = get_errno(statfs(path(p), &stfs));
10711         unlock_user(p, arg1, 0);
10712     convert_statfs64:
10713         if (!is_error(ret)) {
10714             struct target_statfs64 *target_stfs;
10715 
10716             if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
10717                 return -TARGET_EFAULT;
10718             __put_user(stfs.f_type, &target_stfs->f_type);
10719             __put_user(stfs.f_bsize, &target_stfs->f_bsize);
10720             __put_user(stfs.f_blocks, &target_stfs->f_blocks);
10721             __put_user(stfs.f_bfree, &target_stfs->f_bfree);
10722             __put_user(stfs.f_bavail, &target_stfs->f_bavail);
10723             __put_user(stfs.f_files, &target_stfs->f_files);
10724             __put_user(stfs.f_ffree, &target_stfs->f_ffree);
10725             __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
10726             __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
10727             __put_user(stfs.f_namelen, &target_stfs->f_namelen);
10728             __put_user(stfs.f_frsize, &target_stfs->f_frsize);
10729 #ifdef _STATFS_F_FLAGS
10730             __put_user(stfs.f_flags, &target_stfs->f_flags);
10731 #else
10732             __put_user(0, &target_stfs->f_flags);
10733 #endif
10734             memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
10735             unlock_user_struct(target_stfs, arg3, 1);
10736         }
10737         return ret;
10738     case TARGET_NR_fstatfs64:
10739         ret = get_errno(fstatfs(arg1, &stfs));
10740         goto convert_statfs64;
10741 #endif
10742 #ifdef TARGET_NR_socketcall
10743     case TARGET_NR_socketcall:
10744         return do_socketcall(arg1, arg2);
10745 #endif
10746 #ifdef TARGET_NR_accept
10747     case TARGET_NR_accept:
10748         return do_accept4(arg1, arg2, arg3, 0);
10749 #endif
10750 #ifdef TARGET_NR_accept4
10751     case TARGET_NR_accept4:
10752         return do_accept4(arg1, arg2, arg3, arg4);
10753 #endif
10754 #ifdef TARGET_NR_bind
10755     case TARGET_NR_bind:
10756         return do_bind(arg1, arg2, arg3);
10757 #endif
10758 #ifdef TARGET_NR_connect
10759     case TARGET_NR_connect:
10760         return do_connect(arg1, arg2, arg3);
10761 #endif
10762 #ifdef TARGET_NR_getpeername
10763     case TARGET_NR_getpeername:
10764         return do_getpeername(arg1, arg2, arg3);
10765 #endif
10766 #ifdef TARGET_NR_getsockname
10767     case TARGET_NR_getsockname:
10768         return do_getsockname(arg1, arg2, arg3);
10769 #endif
10770 #ifdef TARGET_NR_getsockopt
10771     case TARGET_NR_getsockopt:
10772         return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
10773 #endif
10774 #ifdef TARGET_NR_listen
10775     case TARGET_NR_listen:
10776         return get_errno(listen(arg1, arg2));
10777 #endif
10778 #ifdef TARGET_NR_recv
10779     case TARGET_NR_recv:
10780         return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
10781 #endif
10782 #ifdef TARGET_NR_recvfrom
10783     case TARGET_NR_recvfrom:
10784         return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
10785 #endif
10786 #ifdef TARGET_NR_recvmsg
10787     case TARGET_NR_recvmsg:
10788         return do_sendrecvmsg(arg1, arg2, arg3, 0);
10789 #endif
10790 #ifdef TARGET_NR_send
10791     case TARGET_NR_send:
10792         return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
10793 #endif
10794 #ifdef TARGET_NR_sendmsg
10795     case TARGET_NR_sendmsg:
10796         return do_sendrecvmsg(arg1, arg2, arg3, 1);
10797 #endif
10798 #ifdef TARGET_NR_sendmmsg
10799     case TARGET_NR_sendmmsg:
10800         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
10801 #endif
10802 #ifdef TARGET_NR_recvmmsg
10803     case TARGET_NR_recvmmsg:
10804         return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
10805 #endif
10806 #ifdef TARGET_NR_sendto
10807     case TARGET_NR_sendto:
10808         return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
10809 #endif
10810 #ifdef TARGET_NR_shutdown
10811     case TARGET_NR_shutdown:
10812         return get_errno(shutdown(arg1, arg2));
10813 #endif
10814 #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
10815     case TARGET_NR_getrandom:
10816         p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
10817         if (!p) {
10818             return -TARGET_EFAULT;
10819         }
10820         ret = get_errno(getrandom(p, arg2, arg3));
10821         unlock_user(p, arg1, ret);
10822         return ret;
10823 #endif
10824 #ifdef TARGET_NR_socket
10825     case TARGET_NR_socket:
10826         return do_socket(arg1, arg2, arg3);
10827 #endif
10828 #ifdef TARGET_NR_socketpair
10829     case TARGET_NR_socketpair:
10830         return do_socketpair(arg1, arg2, arg3, arg4);
10831 #endif
10832 #ifdef TARGET_NR_setsockopt
10833     case TARGET_NR_setsockopt:
10834         return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
10835 #endif
10836 #if defined(TARGET_NR_syslog)
10837     case TARGET_NR_syslog:
10838         {
10839             int len = arg2;
10840 
10841             switch (arg1) {
10842             case TARGET_SYSLOG_ACTION_CLOSE:         /* Close log */
10843             case TARGET_SYSLOG_ACTION_OPEN:          /* Open log */
10844             case TARGET_SYSLOG_ACTION_CLEAR:         /* Clear ring buffer */
10845             case TARGET_SYSLOG_ACTION_CONSOLE_OFF:   /* Disable logging */
10846             case TARGET_SYSLOG_ACTION_CONSOLE_ON:    /* Enable logging */
10847             case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
10848             case TARGET_SYSLOG_ACTION_SIZE_UNREAD:   /* Number of chars */
10849             case TARGET_SYSLOG_ACTION_SIZE_BUFFER:   /* Size of the buffer */
10850                 return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
10851             case TARGET_SYSLOG_ACTION_READ:          /* Read from log */
10852             case TARGET_SYSLOG_ACTION_READ_CLEAR:    /* Read/clear msgs */
10853             case TARGET_SYSLOG_ACTION_READ_ALL:      /* Read last messages */
10854                 {
10855                     if (len < 0) {
10856                         return -TARGET_EINVAL;
10857                     }
10858                     if (len == 0) {
10859                         return 0;
10860                     }
10861                     p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
10862                     if (!p) {
10863                         return -TARGET_EFAULT;
10864                     }
10865                     ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
10866                     unlock_user(p, arg2, arg3);
10867                 }
10868                 return ret;
10869             default:
10870                 return -TARGET_EINVAL;
10871             }
10872         }
10873         break;
10874 #endif
10875     case TARGET_NR_setitimer:
10876         {
10877             struct itimerval value, ovalue, *pvalue;
10878 
10879             if (arg2) {
10880                 pvalue = &value;
10881                 if (copy_from_user_timeval(&pvalue->it_interval, arg2)
10882                     || copy_from_user_timeval(&pvalue->it_value,
10883                                               arg2 + sizeof(struct target_timeval)))
10884                     return -TARGET_EFAULT;
10885             } else {
10886                 pvalue = NULL;
10887             }
10888             ret = get_errno(setitimer(arg1, pvalue, &ovalue));
10889             if (!is_error(ret) && arg3) {
10890                 if (copy_to_user_timeval(arg3,
10891                                          &ovalue.it_interval)
10892                     || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
10893                                             &ovalue.it_value))
10894                     return -TARGET_EFAULT;
10895             }
10896         }
10897         return ret;
10898     case TARGET_NR_getitimer:
10899         {
10900             struct itimerval value;
10901 
10902             ret = get_errno(getitimer(arg1, &value));
10903             if (!is_error(ret) && arg2) {
10904                 if (copy_to_user_timeval(arg2,
10905                                          &value.it_interval)
10906                     || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
10907                                             &value.it_value))
10908                     return -TARGET_EFAULT;
10909             }
10910         }
10911         return ret;
10912 #ifdef TARGET_NR_stat
10913     case TARGET_NR_stat:
10914         if (!(p = lock_user_string(arg1))) {
10915             return -TARGET_EFAULT;
10916         }
10917         ret = get_errno(stat(path(p), &st));
10918         unlock_user(p, arg1, 0);
10919         goto do_stat;
10920 #endif
10921 #ifdef TARGET_NR_lstat
10922     case TARGET_NR_lstat:
10923         if (!(p = lock_user_string(arg1))) {
10924             return -TARGET_EFAULT;
10925         }
10926         ret = get_errno(lstat(path(p), &st));
10927         unlock_user(p, arg1, 0);
10928         goto do_stat;
10929 #endif
10930 #ifdef TARGET_NR_fstat
10931     case TARGET_NR_fstat:
10932         {
10933             ret = get_errno(fstat(arg1, &st));
10934 #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
10935         do_stat:
10936 #endif
10937             if (!is_error(ret)) {
10938                 struct target_stat *target_st;
10939 
10940                 if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
10941                     return -TARGET_EFAULT;
10942                 memset(target_st, 0, sizeof(*target_st));
10943                 __put_user(st.st_dev, &target_st->st_dev);
10944                 __put_user(st.st_ino, &target_st->st_ino);
10945                 __put_user(st.st_mode, &target_st->st_mode);
10946                 __put_user(st.st_uid, &target_st->st_uid);
10947                 __put_user(st.st_gid, &target_st->st_gid);
10948                 __put_user(st.st_nlink, &target_st->st_nlink);
10949                 __put_user(st.st_rdev, &target_st->st_rdev);
10950                 __put_user(st.st_size, &target_st->st_size);
10951                 __put_user(st.st_blksize, &target_st->st_blksize);
10952                 __put_user(st.st_blocks, &target_st->st_blocks);
10953                 __put_user(st.st_atime, &target_st->target_st_atime);
10954                 __put_user(st.st_mtime, &target_st->target_st_mtime);
10955                 __put_user(st.st_ctime, &target_st->target_st_ctime);
10956 #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
10957                 __put_user(st.st_atim.tv_nsec,
10958                            &target_st->target_st_atime_nsec);
10959                 __put_user(st.st_mtim.tv_nsec,
10960                            &target_st->target_st_mtime_nsec);
10961                 __put_user(st.st_ctim.tv_nsec,
10962                            &target_st->target_st_ctime_nsec);
10963 #endif
10964                 unlock_user_struct(target_st, arg2, 1);
10965             }
10966         }
10967         return ret;
10968 #endif
10969     case TARGET_NR_vhangup:
10970         return get_errno(vhangup());
10971 #ifdef TARGET_NR_syscall
10972     case TARGET_NR_syscall:
10973         return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
10974                           arg6, arg7, arg8, 0);
10975 #endif
10976 #if defined(TARGET_NR_wait4)
10977     case TARGET_NR_wait4:
10978         {
10979             int status;
10980             abi_long status_ptr = arg2;
10981             struct rusage rusage, *rusage_ptr;
10982             abi_ulong target_rusage = arg4;
10983             abi_long rusage_err;
10984             if (target_rusage)
10985                 rusage_ptr = &rusage;
10986             else
10987                 rusage_ptr = NULL;
10988             ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
10989             if (!is_error(ret)) {
10990                 if (status_ptr && ret) {
10991                     status = host_to_target_waitstatus(status);
10992                     if (put_user_s32(status, status_ptr))
10993                         return -TARGET_EFAULT;
10994                 }
10995                 if (target_rusage) {
10996                     rusage_err = host_to_target_rusage(target_rusage, &rusage);
10997                     if (rusage_err) {
10998                         ret = rusage_err;
10999                     }
11000                 }
11001             }
11002         }
11003         return ret;
11004 #endif
11005 #ifdef TARGET_NR_swapoff
11006     case TARGET_NR_swapoff:
11007         if (!(p = lock_user_string(arg1)))
11008             return -TARGET_EFAULT;
11009         ret = get_errno(swapoff(p));
11010         unlock_user(p, arg1, 0);
11011         return ret;
11012 #endif
11013     case TARGET_NR_sysinfo:
11014         {
11015             struct target_sysinfo *target_value;
11016             struct sysinfo value;
11017             ret = get_errno(sysinfo(&value));
11018             if (!is_error(ret) && arg1)
11019             {
11020                 if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
11021                     return -TARGET_EFAULT;
11022                 __put_user(value.uptime, &target_value->uptime);
11023                 __put_user(value.loads[0], &target_value->loads[0]);
11024                 __put_user(value.loads[1], &target_value->loads[1]);
11025                 __put_user(value.loads[2], &target_value->loads[2]);
11026                 __put_user(value.totalram, &target_value->totalram);
11027                 __put_user(value.freeram, &target_value->freeram);
11028                 __put_user(value.sharedram, &target_value->sharedram);
11029                 __put_user(value.bufferram, &target_value->bufferram);
11030                 __put_user(value.totalswap, &target_value->totalswap);
11031                 __put_user(value.freeswap, &target_value->freeswap);
11032                 __put_user(value.procs, &target_value->procs);
11033                 __put_user(value.totalhigh, &target_value->totalhigh);
11034                 __put_user(value.freehigh, &target_value->freehigh);
11035                 __put_user(value.mem_unit, &target_value->mem_unit);
11036                 unlock_user_struct(target_value, arg1, 1);
11037             }
11038         }
11039         return ret;
11040 #ifdef TARGET_NR_ipc
11041     case TARGET_NR_ipc:
11042         return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
11043 #endif
11044 #ifdef TARGET_NR_semget
11045     case TARGET_NR_semget:
11046         return get_errno(semget(arg1, arg2, arg3));
11047 #endif
11048 #ifdef TARGET_NR_semop
11049     case TARGET_NR_semop:
11050         return do_semtimedop(arg1, arg2, arg3, 0, false);
11051 #endif
11052 #ifdef TARGET_NR_semtimedop
11053     case TARGET_NR_semtimedop:
11054         return do_semtimedop(arg1, arg2, arg3, arg4, false);
11055 #endif
11056 #ifdef TARGET_NR_semtimedop_time64
11057     case TARGET_NR_semtimedop_time64:
11058         return do_semtimedop(arg1, arg2, arg3, arg4, true);
11059 #endif
11060 #ifdef TARGET_NR_semctl
11061     case TARGET_NR_semctl:
11062         return do_semctl(arg1, arg2, arg3, arg4);
11063 #endif
11064 #ifdef TARGET_NR_msgctl
11065     case TARGET_NR_msgctl:
11066         return do_msgctl(arg1, arg2, arg3);
11067 #endif
11068 #ifdef TARGET_NR_msgget
11069     case TARGET_NR_msgget:
11070         return get_errno(msgget(arg1, arg2));
11071 #endif
11072 #ifdef TARGET_NR_msgrcv
11073     case TARGET_NR_msgrcv:
11074         return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
11075 #endif
11076 #ifdef TARGET_NR_msgsnd
11077     case TARGET_NR_msgsnd:
11078         return do_msgsnd(arg1, arg2, arg3, arg4);
11079 #endif
11080 #ifdef TARGET_NR_shmget
11081     case TARGET_NR_shmget:
11082         return get_errno(shmget(arg1, arg2, arg3));
11083 #endif
11084 #ifdef TARGET_NR_shmctl
11085     case TARGET_NR_shmctl:
11086         return do_shmctl(arg1, arg2, arg3);
11087 #endif
11088 #ifdef TARGET_NR_shmat
11089     case TARGET_NR_shmat:
11090         return do_shmat(cpu_env, arg1, arg2, arg3);
11091 #endif
11092 #ifdef TARGET_NR_shmdt
11093     case TARGET_NR_shmdt:
11094         return do_shmdt(arg1);
11095 #endif
11096     case TARGET_NR_fsync:
11097         return get_errno(fsync(arg1));
11098     case TARGET_NR_clone:
11099         /* Linux manages to have three different orderings for its
11100          * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
11101          * match the kernel's CONFIG_CLONE_* settings.
11102          * Microblaze is further special in that it uses a sixth
11103          * implicit argument to clone for the TLS pointer.
11104          */
11105 #if defined(TARGET_MICROBLAZE)
11106         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
11107 #elif defined(TARGET_CLONE_BACKWARDS)
11108         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
11109 #elif defined(TARGET_CLONE_BACKWARDS2)
11110         ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
11111 #else
11112         ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
11113 #endif
11114         return ret;
11115 #ifdef __NR_exit_group
11116         /* new thread calls */
11117     case TARGET_NR_exit_group:
11118         preexit_cleanup(cpu_env, arg1);
11119         return get_errno(exit_group(arg1));
11120 #endif
11121     case TARGET_NR_setdomainname:
11122         if (!(p = lock_user_string(arg1)))
11123             return -TARGET_EFAULT;
11124         ret = get_errno(setdomainname(p, arg2));
11125         unlock_user(p, arg1, 0);
11126         return ret;
11127     case TARGET_NR_uname:
11128         /* no need to transcode because we use the linux syscall */
11129         {
11130             struct new_utsname * buf;
11131 
11132             if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
11133                 return -TARGET_EFAULT;
11134             ret = get_errno(sys_uname(buf));
11135             if (!is_error(ret)) {
11136                 /* Overwrite the native machine name with whatever is being
11137                    emulated. */
11138                 g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
11139                           sizeof(buf->machine));
11140                 /* Allow the user to override the reported release.  */
11141                 if (qemu_uname_release && *qemu_uname_release) {
11142                     g_strlcpy(buf->release, qemu_uname_release,
11143                               sizeof(buf->release));
11144                 }
11145             }
11146             unlock_user_struct(buf, arg1, 1);
11147         }
11148         return ret;
11149 #ifdef TARGET_I386
11150     case TARGET_NR_modify_ldt:
11151         return do_modify_ldt(cpu_env, arg1, arg2, arg3);
11152 #if !defined(TARGET_X86_64)
11153     case TARGET_NR_vm86:
11154         return do_vm86(cpu_env, arg1, arg2);
11155 #endif
11156 #endif
11157 #if defined(TARGET_NR_adjtimex)
11158     case TARGET_NR_adjtimex:
11159         {
11160             struct timex host_buf;
11161 
11162             if (target_to_host_timex(&host_buf, arg1) != 0) {
11163                 return -TARGET_EFAULT;
11164             }
11165             ret = get_errno(adjtimex(&host_buf));
11166             if (!is_error(ret)) {
11167                 if (host_to_target_timex(arg1, &host_buf) != 0) {
11168                     return -TARGET_EFAULT;
11169                 }
11170             }
11171         }
11172         return ret;
11173 #endif
11174 #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
11175     case TARGET_NR_clock_adjtime:
11176         {
11177             struct timex htx, *phtx = &htx;
11178 
11179             if (target_to_host_timex(phtx, arg2) != 0) {
11180                 return -TARGET_EFAULT;
11181             }
11182             ret = get_errno(clock_adjtime(arg1, phtx));
11183             if (!is_error(ret) && phtx) {
11184                 if (host_to_target_timex(arg2, phtx) != 0) {
11185                     return -TARGET_EFAULT;
11186                 }
11187             }
11188         }
11189         return ret;
11190 #endif
11191 #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
11192     case TARGET_NR_clock_adjtime64:
11193         {
11194             struct timex htx;
11195 
11196             if (target_to_host_timex64(&htx, arg2) != 0) {
11197                 return -TARGET_EFAULT;
11198             }
11199             ret = get_errno(clock_adjtime(arg1, &htx));
11200             if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
11201                     return -TARGET_EFAULT;
11202             }
11203         }
11204         return ret;
11205 #endif
11206     case TARGET_NR_getpgid:
11207         return get_errno(getpgid(arg1));
11208     case TARGET_NR_fchdir:
11209         return get_errno(fchdir(arg1));
11210     case TARGET_NR_personality:
11211         return get_errno(personality(arg1));
11212 #ifdef TARGET_NR__llseek /* Not on alpha */
11213     case TARGET_NR__llseek:
11214         {
11215             int64_t res;
11216 #if !defined(__NR_llseek)
11217             res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
11218             if (res == -1) {
11219                 ret = get_errno(res);
11220             } else {
11221                 ret = 0;
11222             }
11223 #else
11224             ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
11225 #endif
11226             if ((ret == 0) && put_user_s64(res, arg4)) {
11227                 return -TARGET_EFAULT;
11228             }
11229         }
11230         return ret;
11231 #endif
11232 #ifdef TARGET_NR_getdents
11233     case TARGET_NR_getdents:
11234         return do_getdents(arg1, arg2, arg3);
11235 #endif /* TARGET_NR_getdents */
11236 #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
11237     case TARGET_NR_getdents64:
11238         return do_getdents64(arg1, arg2, arg3);
11239 #endif /* TARGET_NR_getdents64 */
11240 #if defined(TARGET_NR__newselect)
11241     case TARGET_NR__newselect:
11242         return do_select(arg1, arg2, arg3, arg4, arg5);
11243 #endif
11244 #ifdef TARGET_NR_poll
11245     case TARGET_NR_poll:
11246         return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
11247 #endif
11248 #ifdef TARGET_NR_ppoll
11249     case TARGET_NR_ppoll:
11250         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
11251 #endif
11252 #ifdef TARGET_NR_ppoll_time64
11253     case TARGET_NR_ppoll_time64:
11254         return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
11255 #endif
11256     case TARGET_NR_flock:
11257         /* NOTE: the flock constant seems to be the same for every
11258            Linux platform */
11259         return get_errno(safe_flock(arg1, arg2));
11260     case TARGET_NR_readv:
11261         {
11262             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11263             if (vec != NULL) {
11264                 ret = get_errno(safe_readv(arg1, vec, arg3));
11265                 unlock_iovec(vec, arg2, arg3, 1);
11266             } else {
11267                 ret = -host_to_target_errno(errno);
11268             }
11269         }
11270         return ret;
11271     case TARGET_NR_writev:
11272         {
11273             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11274             if (vec != NULL) {
11275                 ret = get_errno(safe_writev(arg1, vec, arg3));
11276                 unlock_iovec(vec, arg2, arg3, 0);
11277             } else {
11278                 ret = -host_to_target_errno(errno);
11279             }
11280         }
11281         return ret;
11282 #if defined(TARGET_NR_preadv)
11283     case TARGET_NR_preadv:
11284         {
11285             struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
11286             if (vec != NULL) {
11287                 unsigned long low, high;
11288 
11289                 target_to_host_low_high(arg4, arg5, &low, &high);
11290                 ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
11291                 unlock_iovec(vec, arg2, arg3, 1);
11292             } else {
11293                 ret = -host_to_target_errno(errno);
11294            }
11295         }
11296         return ret;
11297 #endif
11298 #if defined(TARGET_NR_pwritev)
11299     case TARGET_NR_pwritev:
11300         {
11301             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
11302             if (vec != NULL) {
11303                 unsigned long low, high;
11304 
11305                 target_to_host_low_high(arg4, arg5, &low, &high);
11306                 ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
11307                 unlock_iovec(vec, arg2, arg3, 0);
11308             } else {
11309                 ret = -host_to_target_errno(errno);
11310            }
11311         }
11312         return ret;
11313 #endif
11314     case TARGET_NR_getsid:
11315         return get_errno(getsid(arg1));
11316 #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
11317     case TARGET_NR_fdatasync:
11318         return get_errno(fdatasync(arg1));
11319 #endif
11320     case TARGET_NR_sched_getaffinity:
11321         {
11322             unsigned int mask_size;
11323             unsigned long *mask;
11324 
11325             /*
11326              * sched_getaffinity needs multiples of ulong, so need to take
11327              * care of mismatches between target ulong and host ulong sizes.
11328              */
11329             if (arg2 & (sizeof(abi_ulong) - 1)) {
11330                 return -TARGET_EINVAL;
11331             }
11332             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11333 
11334             mask = alloca(mask_size);
11335             memset(mask, 0, mask_size);
11336             ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
11337 
11338             if (!is_error(ret)) {
11339                 if (ret > arg2) {
11340                     /* More data returned than the caller's buffer will fit.
11341                      * This only happens if sizeof(abi_long) < sizeof(long)
11342                      * and the caller passed us a buffer holding an odd number
11343                      * of abi_longs. If the host kernel is actually using the
11344                      * extra 4 bytes then fail EINVAL; otherwise we can just
11345                      * ignore them and only copy the interesting part.
11346                      */
11347                     int numcpus = sysconf(_SC_NPROCESSORS_CONF);
11348                     if (numcpus > arg2 * 8) {
11349                         return -TARGET_EINVAL;
11350                     }
11351                     ret = arg2;
11352                 }
11353 
11354                 if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
11355                     return -TARGET_EFAULT;
11356                 }
11357             }
11358         }
11359         return ret;
11360     case TARGET_NR_sched_setaffinity:
11361         {
11362             unsigned int mask_size;
11363             unsigned long *mask;
11364 
11365             /*
11366              * sched_setaffinity needs multiples of ulong, so need to take
11367              * care of mismatches between target ulong and host ulong sizes.
11368              */
11369             if (arg2 & (sizeof(abi_ulong) - 1)) {
11370                 return -TARGET_EINVAL;
11371             }
11372             mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
11373             mask = alloca(mask_size);
11374 
11375             ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
11376             if (ret) {
11377                 return ret;
11378             }
11379 
11380             return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
11381         }
11382     case TARGET_NR_getcpu:
11383         {
11384             unsigned cpu, node;
11385             ret = get_errno(sys_getcpu(arg1 ? &cpu : NULL,
11386                                        arg2 ? &node : NULL,
11387                                        NULL));
11388             if (is_error(ret)) {
11389                 return ret;
11390             }
11391             if (arg1 && put_user_u32(cpu, arg1)) {
11392                 return -TARGET_EFAULT;
11393             }
11394             if (arg2 && put_user_u32(node, arg2)) {
11395                 return -TARGET_EFAULT;
11396             }
11397         }
11398         return ret;
11399     case TARGET_NR_sched_setparam:
11400         {
11401             struct target_sched_param *target_schp;
11402             struct sched_param schp;
11403 
11404             if (arg2 == 0) {
11405                 return -TARGET_EINVAL;
11406             }
11407             if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
11408                 return -TARGET_EFAULT;
11409             }
11410             schp.sched_priority = tswap32(target_schp->sched_priority);
11411             unlock_user_struct(target_schp, arg2, 0);
11412             return get_errno(sys_sched_setparam(arg1, &schp));
11413         }
11414     case TARGET_NR_sched_getparam:
11415         {
11416             struct target_sched_param *target_schp;
11417             struct sched_param schp;
11418 
11419             if (arg2 == 0) {
11420                 return -TARGET_EINVAL;
11421             }
11422             ret = get_errno(sys_sched_getparam(arg1, &schp));
11423             if (!is_error(ret)) {
11424                 if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
11425                     return -TARGET_EFAULT;
11426                 }
11427                 target_schp->sched_priority = tswap32(schp.sched_priority);
11428                 unlock_user_struct(target_schp, arg2, 1);
11429             }
11430         }
11431         return ret;
11432     case TARGET_NR_sched_setscheduler:
11433         {
11434             struct target_sched_param *target_schp;
11435             struct sched_param schp;
11436             if (arg3 == 0) {
11437                 return -TARGET_EINVAL;
11438             }
11439             if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
11440                 return -TARGET_EFAULT;
11441             }
11442             schp.sched_priority = tswap32(target_schp->sched_priority);
11443             unlock_user_struct(target_schp, arg3, 0);
11444             return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
11445         }
11446     case TARGET_NR_sched_getscheduler:
11447         return get_errno(sys_sched_getscheduler(arg1));
11448     case TARGET_NR_sched_getattr:
11449         {
11450             struct target_sched_attr *target_scha;
11451             struct sched_attr scha;
11452             if (arg2 == 0) {
11453                 return -TARGET_EINVAL;
11454             }
11455             if (arg3 > sizeof(scha)) {
11456                 arg3 = sizeof(scha);
11457             }
11458             ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
11459             if (!is_error(ret)) {
11460                 target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11461                 if (!target_scha) {
11462                     return -TARGET_EFAULT;
11463                 }
11464                 target_scha->size = tswap32(scha.size);
11465                 target_scha->sched_policy = tswap32(scha.sched_policy);
11466                 target_scha->sched_flags = tswap64(scha.sched_flags);
11467                 target_scha->sched_nice = tswap32(scha.sched_nice);
11468                 target_scha->sched_priority = tswap32(scha.sched_priority);
11469                 target_scha->sched_runtime = tswap64(scha.sched_runtime);
11470                 target_scha->sched_deadline = tswap64(scha.sched_deadline);
11471                 target_scha->sched_period = tswap64(scha.sched_period);
11472                 if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
11473                     target_scha->sched_util_min = tswap32(scha.sched_util_min);
11474                     target_scha->sched_util_max = tswap32(scha.sched_util_max);
11475                 }
11476                 unlock_user(target_scha, arg2, arg3);
11477             }
11478             return ret;
11479         }
11480     case TARGET_NR_sched_setattr:
11481         {
11482             struct target_sched_attr *target_scha;
11483             struct sched_attr scha;
11484             uint32_t size;
11485             int zeroed;
11486             if (arg2 == 0) {
11487                 return -TARGET_EINVAL;
11488             }
11489             if (get_user_u32(size, arg2)) {
11490                 return -TARGET_EFAULT;
11491             }
11492             if (!size) {
11493                 size = offsetof(struct target_sched_attr, sched_util_min);
11494             }
11495             if (size < offsetof(struct target_sched_attr, sched_util_min)) {
11496                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11497                     return -TARGET_EFAULT;
11498                 }
11499                 return -TARGET_E2BIG;
11500             }
11501 
11502             zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
11503             if (zeroed < 0) {
11504                 return zeroed;
11505             } else if (zeroed == 0) {
11506                 if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
11507                     return -TARGET_EFAULT;
11508                 }
11509                 return -TARGET_E2BIG;
11510             }
11511             if (size > sizeof(struct target_sched_attr)) {
11512                 size = sizeof(struct target_sched_attr);
11513             }
11514 
11515             target_scha = lock_user(VERIFY_READ, arg2, size, 1);
11516             if (!target_scha) {
11517                 return -TARGET_EFAULT;
11518             }
11519             scha.size = size;
11520             scha.sched_policy = tswap32(target_scha->sched_policy);
11521             scha.sched_flags = tswap64(target_scha->sched_flags);
11522             scha.sched_nice = tswap32(target_scha->sched_nice);
11523             scha.sched_priority = tswap32(target_scha->sched_priority);
11524             scha.sched_runtime = tswap64(target_scha->sched_runtime);
11525             scha.sched_deadline = tswap64(target_scha->sched_deadline);
11526             scha.sched_period = tswap64(target_scha->sched_period);
11527             if (size > offsetof(struct target_sched_attr, sched_util_min)) {
11528                 scha.sched_util_min = tswap32(target_scha->sched_util_min);
11529                 scha.sched_util_max = tswap32(target_scha->sched_util_max);
11530             }
11531             unlock_user(target_scha, arg2, 0);
11532             return get_errno(sys_sched_setattr(arg1, &scha, arg3));
11533         }
11534     case TARGET_NR_sched_yield:
11535         return get_errno(sched_yield());
11536     case TARGET_NR_sched_get_priority_max:
11537         return get_errno(sched_get_priority_max(arg1));
11538     case TARGET_NR_sched_get_priority_min:
11539         return get_errno(sched_get_priority_min(arg1));
11540 #ifdef TARGET_NR_sched_rr_get_interval
11541     case TARGET_NR_sched_rr_get_interval:
11542         {
11543             struct timespec ts;
11544             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11545             if (!is_error(ret)) {
11546                 ret = host_to_target_timespec(arg2, &ts);
11547             }
11548         }
11549         return ret;
11550 #endif
11551 #ifdef TARGET_NR_sched_rr_get_interval_time64
11552     case TARGET_NR_sched_rr_get_interval_time64:
11553         {
11554             struct timespec ts;
11555             ret = get_errno(sched_rr_get_interval(arg1, &ts));
11556             if (!is_error(ret)) {
11557                 ret = host_to_target_timespec64(arg2, &ts);
11558             }
11559         }
11560         return ret;
11561 #endif
11562 #if defined(TARGET_NR_nanosleep)
11563     case TARGET_NR_nanosleep:
11564         {
11565             struct timespec req, rem;
11566             target_to_host_timespec(&req, arg1);
11567             ret = get_errno(safe_nanosleep(&req, &rem));
11568             if (is_error(ret) && arg2) {
11569                 host_to_target_timespec(arg2, &rem);
11570             }
11571         }
11572         return ret;
11573 #endif
11574     case TARGET_NR_prctl:
11575         return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
11576         break;
11577 #ifdef TARGET_NR_arch_prctl
11578     case TARGET_NR_arch_prctl:
11579         return do_arch_prctl(cpu_env, arg1, arg2);
11580 #endif
11581 #ifdef TARGET_NR_pread64
11582     case TARGET_NR_pread64:
11583         if (regpairs_aligned(cpu_env, num)) {
11584             arg4 = arg5;
11585             arg5 = arg6;
11586         }
11587         if (arg2 == 0 && arg3 == 0) {
11588             /* Special-case NULL buffer and zero length, which should succeed */
11589             p = 0;
11590         } else {
11591             p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
11592             if (!p) {
11593                 return -TARGET_EFAULT;
11594             }
11595         }
11596         ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
11597         unlock_user(p, arg2, ret);
11598         return ret;
11599     case TARGET_NR_pwrite64:
11600         if (regpairs_aligned(cpu_env, num)) {
11601             arg4 = arg5;
11602             arg5 = arg6;
11603         }
11604         if (arg2 == 0 && arg3 == 0) {
11605             /* Special-case NULL buffer and zero length, which should succeed */
11606             p = 0;
11607         } else {
11608             p = lock_user(VERIFY_READ, arg2, arg3, 1);
11609             if (!p) {
11610                 return -TARGET_EFAULT;
11611             }
11612         }
11613         ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
11614         unlock_user(p, arg2, 0);
11615         return ret;
11616 #endif
11617     case TARGET_NR_getcwd:
11618         if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
11619             return -TARGET_EFAULT;
11620         ret = get_errno(sys_getcwd1(p, arg2));
11621         unlock_user(p, arg1, ret);
11622         return ret;
11623     case TARGET_NR_capget:
11624     case TARGET_NR_capset:
11625     {
11626         struct target_user_cap_header *target_header;
11627         struct target_user_cap_data *target_data = NULL;
11628         struct __user_cap_header_struct header;
11629         struct __user_cap_data_struct data[2];
11630         struct __user_cap_data_struct *dataptr = NULL;
11631         int i, target_datalen;
11632         int data_items = 1;
11633 
11634         if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
11635             return -TARGET_EFAULT;
11636         }
11637         header.version = tswap32(target_header->version);
11638         header.pid = tswap32(target_header->pid);
11639 
11640         if (header.version != _LINUX_CAPABILITY_VERSION) {
11641             /* Version 2 and up takes pointer to two user_data structs */
11642             data_items = 2;
11643         }
11644 
11645         target_datalen = sizeof(*target_data) * data_items;
11646 
11647         if (arg2) {
11648             if (num == TARGET_NR_capget) {
11649                 target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
11650             } else {
11651                 target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
11652             }
11653             if (!target_data) {
11654                 unlock_user_struct(target_header, arg1, 0);
11655                 return -TARGET_EFAULT;
11656             }
11657 
11658             if (num == TARGET_NR_capset) {
11659                 for (i = 0; i < data_items; i++) {
11660                     data[i].effective = tswap32(target_data[i].effective);
11661                     data[i].permitted = tswap32(target_data[i].permitted);
11662                     data[i].inheritable = tswap32(target_data[i].inheritable);
11663                 }
11664             }
11665 
11666             dataptr = data;
11667         }
11668 
11669         if (num == TARGET_NR_capget) {
11670             ret = get_errno(capget(&header, dataptr));
11671         } else {
11672             ret = get_errno(capset(&header, dataptr));
11673         }
11674 
11675         /* The kernel always updates version for both capget and capset */
11676         target_header->version = tswap32(header.version);
11677         unlock_user_struct(target_header, arg1, 1);
11678 
11679         if (arg2) {
11680             if (num == TARGET_NR_capget) {
11681                 for (i = 0; i < data_items; i++) {
11682                     target_data[i].effective = tswap32(data[i].effective);
11683                     target_data[i].permitted = tswap32(data[i].permitted);
11684                     target_data[i].inheritable = tswap32(data[i].inheritable);
11685                 }
11686                 unlock_user(target_data, arg2, target_datalen);
11687             } else {
11688                 unlock_user(target_data, arg2, 0);
11689             }
11690         }
11691         return ret;
11692     }
11693     case TARGET_NR_sigaltstack:
11694         return do_sigaltstack(arg1, arg2, cpu_env);
11695 
11696 #ifdef CONFIG_SENDFILE
11697 #ifdef TARGET_NR_sendfile
11698     case TARGET_NR_sendfile:
11699     {
11700         off_t *offp = NULL;
11701         off_t off;
11702         if (arg3) {
11703             ret = get_user_sal(off, arg3);
11704             if (is_error(ret)) {
11705                 return ret;
11706             }
11707             offp = &off;
11708         }
11709         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11710         if (!is_error(ret) && arg3) {
11711             abi_long ret2 = put_user_sal(off, arg3);
11712             if (is_error(ret2)) {
11713                 ret = ret2;
11714             }
11715         }
11716         return ret;
11717     }
11718 #endif
11719 #ifdef TARGET_NR_sendfile64
11720     case TARGET_NR_sendfile64:
11721     {
11722         off_t *offp = NULL;
11723         off_t off;
11724         if (arg3) {
11725             ret = get_user_s64(off, arg3);
11726             if (is_error(ret)) {
11727                 return ret;
11728             }
11729             offp = &off;
11730         }
11731         ret = get_errno(sendfile(arg1, arg2, offp, arg4));
11732         if (!is_error(ret) && arg3) {
11733             abi_long ret2 = put_user_s64(off, arg3);
11734             if (is_error(ret2)) {
11735                 ret = ret2;
11736             }
11737         }
11738         return ret;
11739     }
11740 #endif
11741 #endif
11742 #ifdef TARGET_NR_vfork
11743     case TARGET_NR_vfork:
11744         return get_errno(do_fork(cpu_env,
11745                          CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
11746                          0, 0, 0, 0));
11747 #endif
11748 #ifdef TARGET_NR_ugetrlimit
11749     case TARGET_NR_ugetrlimit:
11750     {
11751 	struct rlimit rlim;
11752 	int resource = target_to_host_resource(arg1);
11753 	ret = get_errno(getrlimit(resource, &rlim));
11754 	if (!is_error(ret)) {
11755 	    struct target_rlimit *target_rlim;
11756             if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
11757                 return -TARGET_EFAULT;
11758 	    target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
11759 	    target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
11760             unlock_user_struct(target_rlim, arg2, 1);
11761 	}
11762         return ret;
11763     }
11764 #endif
11765 #ifdef TARGET_NR_truncate64
11766     case TARGET_NR_truncate64:
11767         if (!(p = lock_user_string(arg1)))
11768             return -TARGET_EFAULT;
11769 	ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
11770         unlock_user(p, arg1, 0);
11771         return ret;
11772 #endif
11773 #ifdef TARGET_NR_ftruncate64
11774     case TARGET_NR_ftruncate64:
11775         return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
11776 #endif
11777 #ifdef TARGET_NR_stat64
11778     case TARGET_NR_stat64:
11779         if (!(p = lock_user_string(arg1))) {
11780             return -TARGET_EFAULT;
11781         }
11782         ret = get_errno(stat(path(p), &st));
11783         unlock_user(p, arg1, 0);
11784         if (!is_error(ret))
11785             ret = host_to_target_stat64(cpu_env, arg2, &st);
11786         return ret;
11787 #endif
11788 #ifdef TARGET_NR_lstat64
11789     case TARGET_NR_lstat64:
11790         if (!(p = lock_user_string(arg1))) {
11791             return -TARGET_EFAULT;
11792         }
11793         ret = get_errno(lstat(path(p), &st));
11794         unlock_user(p, arg1, 0);
11795         if (!is_error(ret))
11796             ret = host_to_target_stat64(cpu_env, arg2, &st);
11797         return ret;
11798 #endif
11799 #ifdef TARGET_NR_fstat64
11800     case TARGET_NR_fstat64:
11801         ret = get_errno(fstat(arg1, &st));
11802         if (!is_error(ret))
11803             ret = host_to_target_stat64(cpu_env, arg2, &st);
11804         return ret;
11805 #endif
11806 #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
11807 #ifdef TARGET_NR_fstatat64
11808     case TARGET_NR_fstatat64:
11809 #endif
11810 #ifdef TARGET_NR_newfstatat
11811     case TARGET_NR_newfstatat:
11812 #endif
11813         if (!(p = lock_user_string(arg2))) {
11814             return -TARGET_EFAULT;
11815         }
11816         ret = get_errno(fstatat(arg1, path(p), &st, arg4));
11817         unlock_user(p, arg2, 0);
11818         if (!is_error(ret))
11819             ret = host_to_target_stat64(cpu_env, arg3, &st);
11820         return ret;
11821 #endif
11822 #if defined(TARGET_NR_statx)
11823     case TARGET_NR_statx:
11824         {
11825             struct target_statx *target_stx;
11826             int dirfd = arg1;
11827             int flags = arg3;
11828 
11829             p = lock_user_string(arg2);
11830             if (p == NULL) {
11831                 return -TARGET_EFAULT;
11832             }
11833 #if defined(__NR_statx)
11834             {
11835                 /*
11836                  * It is assumed that struct statx is architecture independent.
11837                  */
11838                 struct target_statx host_stx;
11839                 int mask = arg4;
11840 
11841                 ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
11842                 if (!is_error(ret)) {
11843                     if (host_to_target_statx(&host_stx, arg5) != 0) {
11844                         unlock_user(p, arg2, 0);
11845                         return -TARGET_EFAULT;
11846                     }
11847                 }
11848 
11849                 if (ret != -TARGET_ENOSYS) {
11850                     unlock_user(p, arg2, 0);
11851                     return ret;
11852                 }
11853             }
11854 #endif
11855             ret = get_errno(fstatat(dirfd, path(p), &st, flags));
11856             unlock_user(p, arg2, 0);
11857 
11858             if (!is_error(ret)) {
11859                 if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
11860                     return -TARGET_EFAULT;
11861                 }
11862                 memset(target_stx, 0, sizeof(*target_stx));
11863                 __put_user(major(st.st_dev), &target_stx->stx_dev_major);
11864                 __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
11865                 __put_user(st.st_ino, &target_stx->stx_ino);
11866                 __put_user(st.st_mode, &target_stx->stx_mode);
11867                 __put_user(st.st_uid, &target_stx->stx_uid);
11868                 __put_user(st.st_gid, &target_stx->stx_gid);
11869                 __put_user(st.st_nlink, &target_stx->stx_nlink);
11870                 __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
11871                 __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
11872                 __put_user(st.st_size, &target_stx->stx_size);
11873                 __put_user(st.st_blksize, &target_stx->stx_blksize);
11874                 __put_user(st.st_blocks, &target_stx->stx_blocks);
11875                 __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
11876                 __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
11877                 __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
11878                 unlock_user_struct(target_stx, arg5, 1);
11879             }
11880         }
11881         return ret;
11882 #endif
11883 #ifdef TARGET_NR_lchown
11884     case TARGET_NR_lchown:
11885         if (!(p = lock_user_string(arg1)))
11886             return -TARGET_EFAULT;
11887         ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
11888         unlock_user(p, arg1, 0);
11889         return ret;
11890 #endif
11891 #ifdef TARGET_NR_getuid
11892     case TARGET_NR_getuid:
11893         return get_errno(high2lowuid(getuid()));
11894 #endif
11895 #ifdef TARGET_NR_getgid
11896     case TARGET_NR_getgid:
11897         return get_errno(high2lowgid(getgid()));
11898 #endif
11899 #ifdef TARGET_NR_geteuid
11900     case TARGET_NR_geteuid:
11901         return get_errno(high2lowuid(geteuid()));
11902 #endif
11903 #ifdef TARGET_NR_getegid
11904     case TARGET_NR_getegid:
11905         return get_errno(high2lowgid(getegid()));
11906 #endif
11907     case TARGET_NR_setreuid:
11908         return get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
11909     case TARGET_NR_setregid:
11910         return get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
11911     case TARGET_NR_getgroups:
11912         { /* the same code as for TARGET_NR_getgroups32 */
11913             int gidsetsize = arg1;
11914             target_id *target_grouplist;
11915             g_autofree gid_t *grouplist = NULL;
11916             int i;
11917 
11918             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11919                 return -TARGET_EINVAL;
11920             }
11921             if (gidsetsize > 0) {
11922                 grouplist = g_try_new(gid_t, gidsetsize);
11923                 if (!grouplist) {
11924                     return -TARGET_ENOMEM;
11925                 }
11926             }
11927             ret = get_errno(getgroups(gidsetsize, grouplist));
11928             if (!is_error(ret) && gidsetsize > 0) {
11929                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
11930                                              gidsetsize * sizeof(target_id), 0);
11931                 if (!target_grouplist) {
11932                     return -TARGET_EFAULT;
11933                 }
11934                 for (i = 0; i < ret; i++) {
11935                     target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
11936                 }
11937                 unlock_user(target_grouplist, arg2,
11938                             gidsetsize * sizeof(target_id));
11939             }
11940             return ret;
11941         }
11942     case TARGET_NR_setgroups:
11943         { /* the same code as for TARGET_NR_setgroups32 */
11944             int gidsetsize = arg1;
11945             target_id *target_grouplist;
11946             g_autofree gid_t *grouplist = NULL;
11947             int i;
11948 
11949             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
11950                 return -TARGET_EINVAL;
11951             }
11952             if (gidsetsize > 0) {
11953                 grouplist = g_try_new(gid_t, gidsetsize);
11954                 if (!grouplist) {
11955                     return -TARGET_ENOMEM;
11956                 }
11957                 target_grouplist = lock_user(VERIFY_READ, arg2,
11958                                              gidsetsize * sizeof(target_id), 1);
11959                 if (!target_grouplist) {
11960                     return -TARGET_EFAULT;
11961                 }
11962                 for (i = 0; i < gidsetsize; i++) {
11963                     grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
11964                 }
11965                 unlock_user(target_grouplist, arg2,
11966                             gidsetsize * sizeof(target_id));
11967             }
11968             return get_errno(setgroups(gidsetsize, grouplist));
11969         }
11970     case TARGET_NR_fchown:
11971         return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
11972 #if defined(TARGET_NR_fchownat)
11973     case TARGET_NR_fchownat:
11974         if (!(p = lock_user_string(arg2)))
11975             return -TARGET_EFAULT;
11976         ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
11977                                  low2highgid(arg4), arg5));
11978         unlock_user(p, arg2, 0);
11979         return ret;
11980 #endif
11981 #ifdef TARGET_NR_setresuid
11982     case TARGET_NR_setresuid:
11983         return get_errno(sys_setresuid(low2highuid(arg1),
11984                                        low2highuid(arg2),
11985                                        low2highuid(arg3)));
11986 #endif
11987 #ifdef TARGET_NR_getresuid
11988     case TARGET_NR_getresuid:
11989         {
11990             uid_t ruid, euid, suid;
11991             ret = get_errno(getresuid(&ruid, &euid, &suid));
11992             if (!is_error(ret)) {
11993                 if (put_user_id(high2lowuid(ruid), arg1)
11994                     || put_user_id(high2lowuid(euid), arg2)
11995                     || put_user_id(high2lowuid(suid), arg3))
11996                     return -TARGET_EFAULT;
11997             }
11998         }
11999         return ret;
12000 #endif
12001 #ifdef TARGET_NR_getresgid
12002     case TARGET_NR_setresgid:
12003         return get_errno(sys_setresgid(low2highgid(arg1),
12004                                        low2highgid(arg2),
12005                                        low2highgid(arg3)));
12006 #endif
12007 #ifdef TARGET_NR_getresgid
12008     case TARGET_NR_getresgid:
12009         {
12010             gid_t rgid, egid, sgid;
12011             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12012             if (!is_error(ret)) {
12013                 if (put_user_id(high2lowgid(rgid), arg1)
12014                     || put_user_id(high2lowgid(egid), arg2)
12015                     || put_user_id(high2lowgid(sgid), arg3))
12016                     return -TARGET_EFAULT;
12017             }
12018         }
12019         return ret;
12020 #endif
12021 #ifdef TARGET_NR_chown
12022     case TARGET_NR_chown:
12023         if (!(p = lock_user_string(arg1)))
12024             return -TARGET_EFAULT;
12025         ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
12026         unlock_user(p, arg1, 0);
12027         return ret;
12028 #endif
12029     case TARGET_NR_setuid:
12030         return get_errno(sys_setuid(low2highuid(arg1)));
12031     case TARGET_NR_setgid:
12032         return get_errno(sys_setgid(low2highgid(arg1)));
12033     case TARGET_NR_setfsuid:
12034         return get_errno(setfsuid(arg1));
12035     case TARGET_NR_setfsgid:
12036         return get_errno(setfsgid(arg1));
12037 
12038 #ifdef TARGET_NR_lchown32
12039     case TARGET_NR_lchown32:
12040         if (!(p = lock_user_string(arg1)))
12041             return -TARGET_EFAULT;
12042         ret = get_errno(lchown(p, arg2, arg3));
12043         unlock_user(p, arg1, 0);
12044         return ret;
12045 #endif
12046 #ifdef TARGET_NR_getuid32
12047     case TARGET_NR_getuid32:
12048         return get_errno(getuid());
12049 #endif
12050 
12051 #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
12052    /* Alpha specific */
12053     case TARGET_NR_getxuid:
12054          {
12055             uid_t euid;
12056             euid=geteuid();
12057             cpu_env->ir[IR_A4]=euid;
12058          }
12059         return get_errno(getuid());
12060 #endif
12061 #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
12062    /* Alpha specific */
12063     case TARGET_NR_getxgid:
12064          {
12065             uid_t egid;
12066             egid=getegid();
12067             cpu_env->ir[IR_A4]=egid;
12068          }
12069         return get_errno(getgid());
12070 #endif
12071 #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
12072     /* Alpha specific */
12073     case TARGET_NR_osf_getsysinfo:
12074         ret = -TARGET_EOPNOTSUPP;
12075         switch (arg1) {
12076           case TARGET_GSI_IEEE_FP_CONTROL:
12077             {
12078                 uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
12079                 uint64_t swcr = cpu_env->swcr;
12080 
12081                 swcr &= ~SWCR_STATUS_MASK;
12082                 swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
12083 
12084                 if (put_user_u64 (swcr, arg2))
12085                         return -TARGET_EFAULT;
12086                 ret = 0;
12087             }
12088             break;
12089 
12090           /* case GSI_IEEE_STATE_AT_SIGNAL:
12091              -- Not implemented in linux kernel.
12092              case GSI_UACPROC:
12093              -- Retrieves current unaligned access state; not much used.
12094              case GSI_PROC_TYPE:
12095              -- Retrieves implver information; surely not used.
12096              case GSI_GET_HWRPB:
12097              -- Grabs a copy of the HWRPB; surely not used.
12098           */
12099         }
12100         return ret;
12101 #endif
12102 #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
12103     /* Alpha specific */
12104     case TARGET_NR_osf_setsysinfo:
12105         ret = -TARGET_EOPNOTSUPP;
12106         switch (arg1) {
12107           case TARGET_SSI_IEEE_FP_CONTROL:
12108             {
12109                 uint64_t swcr, fpcr;
12110 
12111                 if (get_user_u64 (swcr, arg2)) {
12112                     return -TARGET_EFAULT;
12113                 }
12114 
12115                 /*
12116                  * The kernel calls swcr_update_status to update the
12117                  * status bits from the fpcr at every point that it
12118                  * could be queried.  Therefore, we store the status
12119                  * bits only in FPCR.
12120                  */
12121                 cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
12122 
12123                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12124                 fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
12125                 fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
12126                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12127                 ret = 0;
12128             }
12129             break;
12130 
12131           case TARGET_SSI_IEEE_RAISE_EXCEPTION:
12132             {
12133                 uint64_t exc, fpcr, fex;
12134 
12135                 if (get_user_u64(exc, arg2)) {
12136                     return -TARGET_EFAULT;
12137                 }
12138                 exc &= SWCR_STATUS_MASK;
12139                 fpcr = cpu_alpha_load_fpcr(cpu_env);
12140 
12141                 /* Old exceptions are not signaled.  */
12142                 fex = alpha_ieee_fpcr_to_swcr(fpcr);
12143                 fex = exc & ~fex;
12144                 fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
12145                 fex &= (cpu_env)->swcr;
12146 
12147                 /* Update the hardware fpcr.  */
12148                 fpcr |= alpha_ieee_swcr_to_fpcr(exc);
12149                 cpu_alpha_store_fpcr(cpu_env, fpcr);
12150 
12151                 if (fex) {
12152                     int si_code = TARGET_FPE_FLTUNK;
12153                     target_siginfo_t info;
12154 
12155                     if (fex & SWCR_TRAP_ENABLE_DNO) {
12156                         si_code = TARGET_FPE_FLTUND;
12157                     }
12158                     if (fex & SWCR_TRAP_ENABLE_INE) {
12159                         si_code = TARGET_FPE_FLTRES;
12160                     }
12161                     if (fex & SWCR_TRAP_ENABLE_UNF) {
12162                         si_code = TARGET_FPE_FLTUND;
12163                     }
12164                     if (fex & SWCR_TRAP_ENABLE_OVF) {
12165                         si_code = TARGET_FPE_FLTOVF;
12166                     }
12167                     if (fex & SWCR_TRAP_ENABLE_DZE) {
12168                         si_code = TARGET_FPE_FLTDIV;
12169                     }
12170                     if (fex & SWCR_TRAP_ENABLE_INV) {
12171                         si_code = TARGET_FPE_FLTINV;
12172                     }
12173 
12174                     info.si_signo = SIGFPE;
12175                     info.si_errno = 0;
12176                     info.si_code = si_code;
12177                     info._sifields._sigfault._addr = (cpu_env)->pc;
12178                     queue_signal(cpu_env, info.si_signo,
12179                                  QEMU_SI_FAULT, &info);
12180                 }
12181                 ret = 0;
12182             }
12183             break;
12184 
12185           /* case SSI_NVPAIRS:
12186              -- Used with SSIN_UACPROC to enable unaligned accesses.
12187              case SSI_IEEE_STATE_AT_SIGNAL:
12188              case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
12189              -- Not implemented in linux kernel
12190           */
12191         }
12192         return ret;
12193 #endif
12194 #ifdef TARGET_NR_osf_sigprocmask
12195     /* Alpha specific.  */
12196     case TARGET_NR_osf_sigprocmask:
12197         {
12198             abi_ulong mask;
12199             int how;
12200             sigset_t set, oldset;
12201 
12202             switch(arg1) {
12203             case TARGET_SIG_BLOCK:
12204                 how = SIG_BLOCK;
12205                 break;
12206             case TARGET_SIG_UNBLOCK:
12207                 how = SIG_UNBLOCK;
12208                 break;
12209             case TARGET_SIG_SETMASK:
12210                 how = SIG_SETMASK;
12211                 break;
12212             default:
12213                 return -TARGET_EINVAL;
12214             }
12215             mask = arg2;
12216             target_to_host_old_sigset(&set, &mask);
12217             ret = do_sigprocmask(how, &set, &oldset);
12218             if (!ret) {
12219                 host_to_target_old_sigset(&mask, &oldset);
12220                 ret = mask;
12221             }
12222         }
12223         return ret;
12224 #endif
12225 
12226 #ifdef TARGET_NR_getgid32
12227     case TARGET_NR_getgid32:
12228         return get_errno(getgid());
12229 #endif
12230 #ifdef TARGET_NR_geteuid32
12231     case TARGET_NR_geteuid32:
12232         return get_errno(geteuid());
12233 #endif
12234 #ifdef TARGET_NR_getegid32
12235     case TARGET_NR_getegid32:
12236         return get_errno(getegid());
12237 #endif
12238 #ifdef TARGET_NR_setreuid32
12239     case TARGET_NR_setreuid32:
12240         return get_errno(setreuid(arg1, arg2));
12241 #endif
12242 #ifdef TARGET_NR_setregid32
12243     case TARGET_NR_setregid32:
12244         return get_errno(setregid(arg1, arg2));
12245 #endif
12246 #ifdef TARGET_NR_getgroups32
12247     case TARGET_NR_getgroups32:
12248         { /* the same code as for TARGET_NR_getgroups */
12249             int gidsetsize = arg1;
12250             uint32_t *target_grouplist;
12251             g_autofree gid_t *grouplist = NULL;
12252             int i;
12253 
12254             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12255                 return -TARGET_EINVAL;
12256             }
12257             if (gidsetsize > 0) {
12258                 grouplist = g_try_new(gid_t, gidsetsize);
12259                 if (!grouplist) {
12260                     return -TARGET_ENOMEM;
12261                 }
12262             }
12263             ret = get_errno(getgroups(gidsetsize, grouplist));
12264             if (!is_error(ret) && gidsetsize > 0) {
12265                 target_grouplist = lock_user(VERIFY_WRITE, arg2,
12266                                              gidsetsize * 4, 0);
12267                 if (!target_grouplist) {
12268                     return -TARGET_EFAULT;
12269                 }
12270                 for (i = 0; i < ret; i++) {
12271                     target_grouplist[i] = tswap32(grouplist[i]);
12272                 }
12273                 unlock_user(target_grouplist, arg2, gidsetsize * 4);
12274             }
12275             return ret;
12276         }
12277 #endif
12278 #ifdef TARGET_NR_setgroups32
12279     case TARGET_NR_setgroups32:
12280         { /* the same code as for TARGET_NR_setgroups */
12281             int gidsetsize = arg1;
12282             uint32_t *target_grouplist;
12283             g_autofree gid_t *grouplist = NULL;
12284             int i;
12285 
12286             if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
12287                 return -TARGET_EINVAL;
12288             }
12289             if (gidsetsize > 0) {
12290                 grouplist = g_try_new(gid_t, gidsetsize);
12291                 if (!grouplist) {
12292                     return -TARGET_ENOMEM;
12293                 }
12294                 target_grouplist = lock_user(VERIFY_READ, arg2,
12295                                              gidsetsize * 4, 1);
12296                 if (!target_grouplist) {
12297                     return -TARGET_EFAULT;
12298                 }
12299                 for (i = 0; i < gidsetsize; i++) {
12300                     grouplist[i] = tswap32(target_grouplist[i]);
12301                 }
12302                 unlock_user(target_grouplist, arg2, 0);
12303             }
12304             return get_errno(setgroups(gidsetsize, grouplist));
12305         }
12306 #endif
12307 #ifdef TARGET_NR_fchown32
12308     case TARGET_NR_fchown32:
12309         return get_errno(fchown(arg1, arg2, arg3));
12310 #endif
12311 #ifdef TARGET_NR_setresuid32
12312     case TARGET_NR_setresuid32:
12313         return get_errno(sys_setresuid(arg1, arg2, arg3));
12314 #endif
12315 #ifdef TARGET_NR_getresuid32
12316     case TARGET_NR_getresuid32:
12317         {
12318             uid_t ruid, euid, suid;
12319             ret = get_errno(getresuid(&ruid, &euid, &suid));
12320             if (!is_error(ret)) {
12321                 if (put_user_u32(ruid, arg1)
12322                     || put_user_u32(euid, arg2)
12323                     || put_user_u32(suid, arg3))
12324                     return -TARGET_EFAULT;
12325             }
12326         }
12327         return ret;
12328 #endif
12329 #ifdef TARGET_NR_setresgid32
12330     case TARGET_NR_setresgid32:
12331         return get_errno(sys_setresgid(arg1, arg2, arg3));
12332 #endif
12333 #ifdef TARGET_NR_getresgid32
12334     case TARGET_NR_getresgid32:
12335         {
12336             gid_t rgid, egid, sgid;
12337             ret = get_errno(getresgid(&rgid, &egid, &sgid));
12338             if (!is_error(ret)) {
12339                 if (put_user_u32(rgid, arg1)
12340                     || put_user_u32(egid, arg2)
12341                     || put_user_u32(sgid, arg3))
12342                     return -TARGET_EFAULT;
12343             }
12344         }
12345         return ret;
12346 #endif
12347 #ifdef TARGET_NR_chown32
12348     case TARGET_NR_chown32:
12349         if (!(p = lock_user_string(arg1)))
12350             return -TARGET_EFAULT;
12351         ret = get_errno(chown(p, arg2, arg3));
12352         unlock_user(p, arg1, 0);
12353         return ret;
12354 #endif
12355 #ifdef TARGET_NR_setuid32
12356     case TARGET_NR_setuid32:
12357         return get_errno(sys_setuid(arg1));
12358 #endif
12359 #ifdef TARGET_NR_setgid32
12360     case TARGET_NR_setgid32:
12361         return get_errno(sys_setgid(arg1));
12362 #endif
12363 #ifdef TARGET_NR_setfsuid32
12364     case TARGET_NR_setfsuid32:
12365         return get_errno(setfsuid(arg1));
12366 #endif
12367 #ifdef TARGET_NR_setfsgid32
12368     case TARGET_NR_setfsgid32:
12369         return get_errno(setfsgid(arg1));
12370 #endif
12371 #ifdef TARGET_NR_mincore
12372     case TARGET_NR_mincore:
12373         {
12374             void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
12375             if (!a) {
12376                 return -TARGET_ENOMEM;
12377             }
12378             p = lock_user_string(arg3);
12379             if (!p) {
12380                 ret = -TARGET_EFAULT;
12381             } else {
12382                 ret = get_errno(mincore(a, arg2, p));
12383                 unlock_user(p, arg3, ret);
12384             }
12385             unlock_user(a, arg1, 0);
12386         }
12387         return ret;
12388 #endif
12389 #ifdef TARGET_NR_arm_fadvise64_64
12390     case TARGET_NR_arm_fadvise64_64:
12391         /* arm_fadvise64_64 looks like fadvise64_64 but
12392          * with different argument order: fd, advice, offset, len
12393          * rather than the usual fd, offset, len, advice.
12394          * Note that offset and len are both 64-bit so appear as
12395          * pairs of 32-bit registers.
12396          */
12397         ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
12398                             target_offset64(arg5, arg6), arg2);
12399         return -host_to_target_errno(ret);
12400 #endif
12401 
12402 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12403 
12404 #ifdef TARGET_NR_fadvise64_64
12405     case TARGET_NR_fadvise64_64:
12406 #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
12407         /* 6 args: fd, advice, offset (high, low), len (high, low) */
12408         ret = arg2;
12409         arg2 = arg3;
12410         arg3 = arg4;
12411         arg4 = arg5;
12412         arg5 = arg6;
12413         arg6 = ret;
12414 #else
12415         /* 6 args: fd, offset (high, low), len (high, low), advice */
12416         if (regpairs_aligned(cpu_env, num)) {
12417             /* offset is in (3,4), len in (5,6) and advice in 7 */
12418             arg2 = arg3;
12419             arg3 = arg4;
12420             arg4 = arg5;
12421             arg5 = arg6;
12422             arg6 = arg7;
12423         }
12424 #endif
12425         ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
12426                             target_offset64(arg4, arg5), arg6);
12427         return -host_to_target_errno(ret);
12428 #endif
12429 
12430 #ifdef TARGET_NR_fadvise64
12431     case TARGET_NR_fadvise64:
12432         /* 5 args: fd, offset (high, low), len, advice */
12433         if (regpairs_aligned(cpu_env, num)) {
12434             /* offset is in (3,4), len in 5 and advice in 6 */
12435             arg2 = arg3;
12436             arg3 = arg4;
12437             arg4 = arg5;
12438             arg5 = arg6;
12439         }
12440         ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
12441         return -host_to_target_errno(ret);
12442 #endif
12443 
12444 #else /* not a 32-bit ABI */
12445 #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
12446 #ifdef TARGET_NR_fadvise64_64
12447     case TARGET_NR_fadvise64_64:
12448 #endif
12449 #ifdef TARGET_NR_fadvise64
12450     case TARGET_NR_fadvise64:
12451 #endif
12452 #ifdef TARGET_S390X
12453         switch (arg4) {
12454         case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
12455         case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
12456         case 6: arg4 = POSIX_FADV_DONTNEED; break;
12457         case 7: arg4 = POSIX_FADV_NOREUSE; break;
12458         default: break;
12459         }
12460 #endif
12461         return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
12462 #endif
12463 #endif /* end of 64-bit ABI fadvise handling */
12464 
12465 #ifdef TARGET_NR_madvise
12466     case TARGET_NR_madvise:
12467         return target_madvise(arg1, arg2, arg3);
12468 #endif
12469 #ifdef TARGET_NR_fcntl64
12470     case TARGET_NR_fcntl64:
12471     {
12472         int cmd;
12473         struct flock64 fl;
12474         from_flock64_fn *copyfrom = copy_from_user_flock64;
12475         to_flock64_fn *copyto = copy_to_user_flock64;
12476 
12477 #ifdef TARGET_ARM
12478         if (!cpu_env->eabi) {
12479             copyfrom = copy_from_user_oabi_flock64;
12480             copyto = copy_to_user_oabi_flock64;
12481         }
12482 #endif
12483 
12484         cmd = target_to_host_fcntl_cmd(arg2);
12485         if (cmd == -TARGET_EINVAL) {
12486             return cmd;
12487         }
12488 
12489         switch(arg2) {
12490         case TARGET_F_GETLK64:
12491             ret = copyfrom(&fl, arg3);
12492             if (ret) {
12493                 break;
12494             }
12495             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12496             if (ret == 0) {
12497                 ret = copyto(arg3, &fl);
12498             }
12499 	    break;
12500 
12501         case TARGET_F_SETLK64:
12502         case TARGET_F_SETLKW64:
12503             ret = copyfrom(&fl, arg3);
12504             if (ret) {
12505                 break;
12506             }
12507             ret = get_errno(safe_fcntl(arg1, cmd, &fl));
12508 	    break;
12509         default:
12510             ret = do_fcntl(arg1, arg2, arg3);
12511             break;
12512         }
12513         return ret;
12514     }
12515 #endif
12516 #ifdef TARGET_NR_cacheflush
12517     case TARGET_NR_cacheflush:
12518         /* self-modifying code is handled automatically, so nothing needed */
12519         return 0;
12520 #endif
12521 #ifdef TARGET_NR_getpagesize
12522     case TARGET_NR_getpagesize:
12523         return TARGET_PAGE_SIZE;
12524 #endif
12525     case TARGET_NR_gettid:
12526         return get_errno(sys_gettid());
12527 #ifdef TARGET_NR_readahead
12528     case TARGET_NR_readahead:
12529 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
12530         if (regpairs_aligned(cpu_env, num)) {
12531             arg2 = arg3;
12532             arg3 = arg4;
12533             arg4 = arg5;
12534         }
12535         ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
12536 #else
12537         ret = get_errno(readahead(arg1, arg2, arg3));
12538 #endif
12539         return ret;
12540 #endif
12541 #ifdef CONFIG_ATTR
12542 #ifdef TARGET_NR_setxattr
12543     case TARGET_NR_listxattr:
12544     case TARGET_NR_llistxattr:
12545     {
12546         void *p, *b = 0;
12547         if (arg2) {
12548             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12549             if (!b) {
12550                 return -TARGET_EFAULT;
12551             }
12552         }
12553         p = lock_user_string(arg1);
12554         if (p) {
12555             if (num == TARGET_NR_listxattr) {
12556                 ret = get_errno(listxattr(p, b, arg3));
12557             } else {
12558                 ret = get_errno(llistxattr(p, b, arg3));
12559             }
12560         } else {
12561             ret = -TARGET_EFAULT;
12562         }
12563         unlock_user(p, arg1, 0);
12564         unlock_user(b, arg2, arg3);
12565         return ret;
12566     }
12567     case TARGET_NR_flistxattr:
12568     {
12569         void *b = 0;
12570         if (arg2) {
12571             b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
12572             if (!b) {
12573                 return -TARGET_EFAULT;
12574             }
12575         }
12576         ret = get_errno(flistxattr(arg1, b, arg3));
12577         unlock_user(b, arg2, arg3);
12578         return ret;
12579     }
12580     case TARGET_NR_setxattr:
12581     case TARGET_NR_lsetxattr:
12582         {
12583             void *p, *n, *v = 0;
12584             if (arg3) {
12585                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12586                 if (!v) {
12587                     return -TARGET_EFAULT;
12588                 }
12589             }
12590             p = lock_user_string(arg1);
12591             n = lock_user_string(arg2);
12592             if (p && n) {
12593                 if (num == TARGET_NR_setxattr) {
12594                     ret = get_errno(setxattr(p, n, v, arg4, arg5));
12595                 } else {
12596                     ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
12597                 }
12598             } else {
12599                 ret = -TARGET_EFAULT;
12600             }
12601             unlock_user(p, arg1, 0);
12602             unlock_user(n, arg2, 0);
12603             unlock_user(v, arg3, 0);
12604         }
12605         return ret;
12606     case TARGET_NR_fsetxattr:
12607         {
12608             void *n, *v = 0;
12609             if (arg3) {
12610                 v = lock_user(VERIFY_READ, arg3, arg4, 1);
12611                 if (!v) {
12612                     return -TARGET_EFAULT;
12613                 }
12614             }
12615             n = lock_user_string(arg2);
12616             if (n) {
12617                 ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
12618             } else {
12619                 ret = -TARGET_EFAULT;
12620             }
12621             unlock_user(n, arg2, 0);
12622             unlock_user(v, arg3, 0);
12623         }
12624         return ret;
12625     case TARGET_NR_getxattr:
12626     case TARGET_NR_lgetxattr:
12627         {
12628             void *p, *n, *v = 0;
12629             if (arg3) {
12630                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12631                 if (!v) {
12632                     return -TARGET_EFAULT;
12633                 }
12634             }
12635             p = lock_user_string(arg1);
12636             n = lock_user_string(arg2);
12637             if (p && n) {
12638                 if (num == TARGET_NR_getxattr) {
12639                     ret = get_errno(getxattr(p, n, v, arg4));
12640                 } else {
12641                     ret = get_errno(lgetxattr(p, n, v, arg4));
12642                 }
12643             } else {
12644                 ret = -TARGET_EFAULT;
12645             }
12646             unlock_user(p, arg1, 0);
12647             unlock_user(n, arg2, 0);
12648             unlock_user(v, arg3, arg4);
12649         }
12650         return ret;
12651     case TARGET_NR_fgetxattr:
12652         {
12653             void *n, *v = 0;
12654             if (arg3) {
12655                 v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
12656                 if (!v) {
12657                     return -TARGET_EFAULT;
12658                 }
12659             }
12660             n = lock_user_string(arg2);
12661             if (n) {
12662                 ret = get_errno(fgetxattr(arg1, n, v, arg4));
12663             } else {
12664                 ret = -TARGET_EFAULT;
12665             }
12666             unlock_user(n, arg2, 0);
12667             unlock_user(v, arg3, arg4);
12668         }
12669         return ret;
12670     case TARGET_NR_removexattr:
12671     case TARGET_NR_lremovexattr:
12672         {
12673             void *p, *n;
12674             p = lock_user_string(arg1);
12675             n = lock_user_string(arg2);
12676             if (p && n) {
12677                 if (num == TARGET_NR_removexattr) {
12678                     ret = get_errno(removexattr(p, n));
12679                 } else {
12680                     ret = get_errno(lremovexattr(p, n));
12681                 }
12682             } else {
12683                 ret = -TARGET_EFAULT;
12684             }
12685             unlock_user(p, arg1, 0);
12686             unlock_user(n, arg2, 0);
12687         }
12688         return ret;
12689     case TARGET_NR_fremovexattr:
12690         {
12691             void *n;
12692             n = lock_user_string(arg2);
12693             if (n) {
12694                 ret = get_errno(fremovexattr(arg1, n));
12695             } else {
12696                 ret = -TARGET_EFAULT;
12697             }
12698             unlock_user(n, arg2, 0);
12699         }
12700         return ret;
12701 #endif
12702 #endif /* CONFIG_ATTR */
12703 #ifdef TARGET_NR_set_thread_area
12704     case TARGET_NR_set_thread_area:
12705 #if defined(TARGET_MIPS)
12706       cpu_env->active_tc.CP0_UserLocal = arg1;
12707       return 0;
12708 #elif defined(TARGET_CRIS)
12709       if (arg1 & 0xff)
12710           ret = -TARGET_EINVAL;
12711       else {
12712           cpu_env->pregs[PR_PID] = arg1;
12713           ret = 0;
12714       }
12715       return ret;
12716 #elif defined(TARGET_I386) && defined(TARGET_ABI32)
12717       return do_set_thread_area(cpu_env, arg1);
12718 #elif defined(TARGET_M68K)
12719       {
12720           TaskState *ts = cpu->opaque;
12721           ts->tp_value = arg1;
12722           return 0;
12723       }
12724 #else
12725       return -TARGET_ENOSYS;
12726 #endif
12727 #endif
12728 #ifdef TARGET_NR_get_thread_area
12729     case TARGET_NR_get_thread_area:
12730 #if defined(TARGET_I386) && defined(TARGET_ABI32)
12731         return do_get_thread_area(cpu_env, arg1);
12732 #elif defined(TARGET_M68K)
12733         {
12734             TaskState *ts = cpu->opaque;
12735             return ts->tp_value;
12736         }
12737 #else
12738         return -TARGET_ENOSYS;
12739 #endif
12740 #endif
12741 #ifdef TARGET_NR_getdomainname
12742     case TARGET_NR_getdomainname:
12743         return -TARGET_ENOSYS;
12744 #endif
12745 
12746 #ifdef TARGET_NR_clock_settime
12747     case TARGET_NR_clock_settime:
12748     {
12749         struct timespec ts;
12750 
12751         ret = target_to_host_timespec(&ts, arg2);
12752         if (!is_error(ret)) {
12753             ret = get_errno(clock_settime(arg1, &ts));
12754         }
12755         return ret;
12756     }
12757 #endif
12758 #ifdef TARGET_NR_clock_settime64
12759     case TARGET_NR_clock_settime64:
12760     {
12761         struct timespec ts;
12762 
12763         ret = target_to_host_timespec64(&ts, arg2);
12764         if (!is_error(ret)) {
12765             ret = get_errno(clock_settime(arg1, &ts));
12766         }
12767         return ret;
12768     }
12769 #endif
12770 #ifdef TARGET_NR_clock_gettime
12771     case TARGET_NR_clock_gettime:
12772     {
12773         struct timespec ts;
12774         ret = get_errno(clock_gettime(arg1, &ts));
12775         if (!is_error(ret)) {
12776             ret = host_to_target_timespec(arg2, &ts);
12777         }
12778         return ret;
12779     }
12780 #endif
12781 #ifdef TARGET_NR_clock_gettime64
12782     case TARGET_NR_clock_gettime64:
12783     {
12784         struct timespec ts;
12785         ret = get_errno(clock_gettime(arg1, &ts));
12786         if (!is_error(ret)) {
12787             ret = host_to_target_timespec64(arg2, &ts);
12788         }
12789         return ret;
12790     }
12791 #endif
12792 #ifdef TARGET_NR_clock_getres
12793     case TARGET_NR_clock_getres:
12794     {
12795         struct timespec ts;
12796         ret = get_errno(clock_getres(arg1, &ts));
12797         if (!is_error(ret)) {
12798             host_to_target_timespec(arg2, &ts);
12799         }
12800         return ret;
12801     }
12802 #endif
12803 #ifdef TARGET_NR_clock_getres_time64
12804     case TARGET_NR_clock_getres_time64:
12805     {
12806         struct timespec ts;
12807         ret = get_errno(clock_getres(arg1, &ts));
12808         if (!is_error(ret)) {
12809             host_to_target_timespec64(arg2, &ts);
12810         }
12811         return ret;
12812     }
12813 #endif
12814 #ifdef TARGET_NR_clock_nanosleep
12815     case TARGET_NR_clock_nanosleep:
12816     {
12817         struct timespec ts;
12818         if (target_to_host_timespec(&ts, arg3)) {
12819             return -TARGET_EFAULT;
12820         }
12821         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12822                                              &ts, arg4 ? &ts : NULL));
12823         /*
12824          * if the call is interrupted by a signal handler, it fails
12825          * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
12826          * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
12827          */
12828         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12829             host_to_target_timespec(arg4, &ts)) {
12830               return -TARGET_EFAULT;
12831         }
12832 
12833         return ret;
12834     }
12835 #endif
12836 #ifdef TARGET_NR_clock_nanosleep_time64
12837     case TARGET_NR_clock_nanosleep_time64:
12838     {
12839         struct timespec ts;
12840 
12841         if (target_to_host_timespec64(&ts, arg3)) {
12842             return -TARGET_EFAULT;
12843         }
12844 
12845         ret = get_errno(safe_clock_nanosleep(arg1, arg2,
12846                                              &ts, arg4 ? &ts : NULL));
12847 
12848         if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
12849             host_to_target_timespec64(arg4, &ts)) {
12850             return -TARGET_EFAULT;
12851         }
12852         return ret;
12853     }
12854 #endif
12855 
12856 #if defined(TARGET_NR_set_tid_address)
12857     case TARGET_NR_set_tid_address:
12858     {
12859         TaskState *ts = cpu->opaque;
12860         ts->child_tidptr = arg1;
12861         /* do not call host set_tid_address() syscall, instead return tid() */
12862         return get_errno(sys_gettid());
12863     }
12864 #endif
12865 
12866     case TARGET_NR_tkill:
12867         return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
12868 
12869     case TARGET_NR_tgkill:
12870         return get_errno(safe_tgkill((int)arg1, (int)arg2,
12871                          target_to_host_signal(arg3)));
12872 
12873 #ifdef TARGET_NR_set_robust_list
12874     case TARGET_NR_set_robust_list:
12875     case TARGET_NR_get_robust_list:
12876         /* The ABI for supporting robust futexes has userspace pass
12877          * the kernel a pointer to a linked list which is updated by
12878          * userspace after the syscall; the list is walked by the kernel
12879          * when the thread exits. Since the linked list in QEMU guest
12880          * memory isn't a valid linked list for the host and we have
12881          * no way to reliably intercept the thread-death event, we can't
12882          * support these. Silently return ENOSYS so that guest userspace
12883          * falls back to a non-robust futex implementation (which should
12884          * be OK except in the corner case of the guest crashing while
12885          * holding a mutex that is shared with another process via
12886          * shared memory).
12887          */
12888         return -TARGET_ENOSYS;
12889 #endif
12890 
12891 #if defined(TARGET_NR_utimensat)
12892     case TARGET_NR_utimensat:
12893         {
12894             struct timespec *tsp, ts[2];
12895             if (!arg3) {
12896                 tsp = NULL;
12897             } else {
12898                 if (target_to_host_timespec(ts, arg3)) {
12899                     return -TARGET_EFAULT;
12900                 }
12901                 if (target_to_host_timespec(ts + 1, arg3 +
12902                                             sizeof(struct target_timespec))) {
12903                     return -TARGET_EFAULT;
12904                 }
12905                 tsp = ts;
12906             }
12907             if (!arg2)
12908                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12909             else {
12910                 if (!(p = lock_user_string(arg2))) {
12911                     return -TARGET_EFAULT;
12912                 }
12913                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12914                 unlock_user(p, arg2, 0);
12915             }
12916         }
12917         return ret;
12918 #endif
12919 #ifdef TARGET_NR_utimensat_time64
12920     case TARGET_NR_utimensat_time64:
12921         {
12922             struct timespec *tsp, ts[2];
12923             if (!arg3) {
12924                 tsp = NULL;
12925             } else {
12926                 if (target_to_host_timespec64(ts, arg3)) {
12927                     return -TARGET_EFAULT;
12928                 }
12929                 if (target_to_host_timespec64(ts + 1, arg3 +
12930                                      sizeof(struct target__kernel_timespec))) {
12931                     return -TARGET_EFAULT;
12932                 }
12933                 tsp = ts;
12934             }
12935             if (!arg2)
12936                 ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
12937             else {
12938                 p = lock_user_string(arg2);
12939                 if (!p) {
12940                     return -TARGET_EFAULT;
12941                 }
12942                 ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
12943                 unlock_user(p, arg2, 0);
12944             }
12945         }
12946         return ret;
12947 #endif
12948 #ifdef TARGET_NR_futex
12949     case TARGET_NR_futex:
12950         return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
12951 #endif
12952 #ifdef TARGET_NR_futex_time64
12953     case TARGET_NR_futex_time64:
12954         return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
12955 #endif
12956 #ifdef CONFIG_INOTIFY
12957 #if defined(TARGET_NR_inotify_init)
12958     case TARGET_NR_inotify_init:
12959         ret = get_errno(inotify_init());
12960         if (ret >= 0) {
12961             fd_trans_register(ret, &target_inotify_trans);
12962         }
12963         return ret;
12964 #endif
12965 #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
12966     case TARGET_NR_inotify_init1:
12967         ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
12968                                           fcntl_flags_tbl)));
12969         if (ret >= 0) {
12970             fd_trans_register(ret, &target_inotify_trans);
12971         }
12972         return ret;
12973 #endif
12974 #if defined(TARGET_NR_inotify_add_watch)
12975     case TARGET_NR_inotify_add_watch:
12976         p = lock_user_string(arg2);
12977         ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
12978         unlock_user(p, arg2, 0);
12979         return ret;
12980 #endif
12981 #if defined(TARGET_NR_inotify_rm_watch)
12982     case TARGET_NR_inotify_rm_watch:
12983         return get_errno(inotify_rm_watch(arg1, arg2));
12984 #endif
12985 #endif
12986 
12987 #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
12988     case TARGET_NR_mq_open:
12989         {
12990             struct mq_attr posix_mq_attr;
12991             struct mq_attr *pposix_mq_attr;
12992             int host_flags;
12993 
12994             host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
12995             pposix_mq_attr = NULL;
12996             if (arg4) {
12997                 if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
12998                     return -TARGET_EFAULT;
12999                 }
13000                 pposix_mq_attr = &posix_mq_attr;
13001             }
13002             p = lock_user_string(arg1 - 1);
13003             if (!p) {
13004                 return -TARGET_EFAULT;
13005             }
13006             ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
13007             unlock_user (p, arg1, 0);
13008         }
13009         return ret;
13010 
13011     case TARGET_NR_mq_unlink:
13012         p = lock_user_string(arg1 - 1);
13013         if (!p) {
13014             return -TARGET_EFAULT;
13015         }
13016         ret = get_errno(mq_unlink(p));
13017         unlock_user (p, arg1, 0);
13018         return ret;
13019 
13020 #ifdef TARGET_NR_mq_timedsend
13021     case TARGET_NR_mq_timedsend:
13022         {
13023             struct timespec ts;
13024 
13025             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13026             if (arg5 != 0) {
13027                 if (target_to_host_timespec(&ts, arg5)) {
13028                     return -TARGET_EFAULT;
13029                 }
13030                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13031                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13032                     return -TARGET_EFAULT;
13033                 }
13034             } else {
13035                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13036             }
13037             unlock_user (p, arg2, arg3);
13038         }
13039         return ret;
13040 #endif
13041 #ifdef TARGET_NR_mq_timedsend_time64
13042     case TARGET_NR_mq_timedsend_time64:
13043         {
13044             struct timespec ts;
13045 
13046             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13047             if (arg5 != 0) {
13048                 if (target_to_host_timespec64(&ts, arg5)) {
13049                     return -TARGET_EFAULT;
13050                 }
13051                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
13052                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13053                     return -TARGET_EFAULT;
13054                 }
13055             } else {
13056                 ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
13057             }
13058             unlock_user(p, arg2, arg3);
13059         }
13060         return ret;
13061 #endif
13062 
13063 #ifdef TARGET_NR_mq_timedreceive
13064     case TARGET_NR_mq_timedreceive:
13065         {
13066             struct timespec ts;
13067             unsigned int prio;
13068 
13069             p = lock_user (VERIFY_READ, arg2, arg3, 1);
13070             if (arg5 != 0) {
13071                 if (target_to_host_timespec(&ts, arg5)) {
13072                     return -TARGET_EFAULT;
13073                 }
13074                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13075                                                      &prio, &ts));
13076                 if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
13077                     return -TARGET_EFAULT;
13078                 }
13079             } else {
13080                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13081                                                      &prio, NULL));
13082             }
13083             unlock_user (p, arg2, arg3);
13084             if (arg4 != 0)
13085                 put_user_u32(prio, arg4);
13086         }
13087         return ret;
13088 #endif
13089 #ifdef TARGET_NR_mq_timedreceive_time64
13090     case TARGET_NR_mq_timedreceive_time64:
13091         {
13092             struct timespec ts;
13093             unsigned int prio;
13094 
13095             p = lock_user(VERIFY_READ, arg2, arg3, 1);
13096             if (arg5 != 0) {
13097                 if (target_to_host_timespec64(&ts, arg5)) {
13098                     return -TARGET_EFAULT;
13099                 }
13100                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13101                                                      &prio, &ts));
13102                 if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
13103                     return -TARGET_EFAULT;
13104                 }
13105             } else {
13106                 ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
13107                                                      &prio, NULL));
13108             }
13109             unlock_user(p, arg2, arg3);
13110             if (arg4 != 0) {
13111                 put_user_u32(prio, arg4);
13112             }
13113         }
13114         return ret;
13115 #endif
13116 
13117     /* Not implemented for now... */
13118 /*     case TARGET_NR_mq_notify: */
13119 /*         break; */
13120 
13121     case TARGET_NR_mq_getsetattr:
13122         {
13123             struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
13124             ret = 0;
13125             if (arg2 != 0) {
13126                 copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
13127                 ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
13128                                            &posix_mq_attr_out));
13129             } else if (arg3 != 0) {
13130                 ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
13131             }
13132             if (ret == 0 && arg3 != 0) {
13133                 copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
13134             }
13135         }
13136         return ret;
13137 #endif
13138 
13139 #ifdef CONFIG_SPLICE
13140 #ifdef TARGET_NR_tee
13141     case TARGET_NR_tee:
13142         {
13143             ret = get_errno(tee(arg1,arg2,arg3,arg4));
13144         }
13145         return ret;
13146 #endif
13147 #ifdef TARGET_NR_splice
13148     case TARGET_NR_splice:
13149         {
13150             loff_t loff_in, loff_out;
13151             loff_t *ploff_in = NULL, *ploff_out = NULL;
13152             if (arg2) {
13153                 if (get_user_u64(loff_in, arg2)) {
13154                     return -TARGET_EFAULT;
13155                 }
13156                 ploff_in = &loff_in;
13157             }
13158             if (arg4) {
13159                 if (get_user_u64(loff_out, arg4)) {
13160                     return -TARGET_EFAULT;
13161                 }
13162                 ploff_out = &loff_out;
13163             }
13164             ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
13165             if (arg2) {
13166                 if (put_user_u64(loff_in, arg2)) {
13167                     return -TARGET_EFAULT;
13168                 }
13169             }
13170             if (arg4) {
13171                 if (put_user_u64(loff_out, arg4)) {
13172                     return -TARGET_EFAULT;
13173                 }
13174             }
13175         }
13176         return ret;
13177 #endif
13178 #ifdef TARGET_NR_vmsplice
13179 	case TARGET_NR_vmsplice:
13180         {
13181             struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
13182             if (vec != NULL) {
13183                 ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
13184                 unlock_iovec(vec, arg2, arg3, 0);
13185             } else {
13186                 ret = -host_to_target_errno(errno);
13187             }
13188         }
13189         return ret;
13190 #endif
13191 #endif /* CONFIG_SPLICE */
13192 #ifdef CONFIG_EVENTFD
13193 #if defined(TARGET_NR_eventfd)
13194     case TARGET_NR_eventfd:
13195         ret = get_errno(eventfd(arg1, 0));
13196         if (ret >= 0) {
13197             fd_trans_register(ret, &target_eventfd_trans);
13198         }
13199         return ret;
13200 #endif
13201 #if defined(TARGET_NR_eventfd2)
13202     case TARGET_NR_eventfd2:
13203     {
13204         int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
13205         if (arg2 & TARGET_O_NONBLOCK) {
13206             host_flags |= O_NONBLOCK;
13207         }
13208         if (arg2 & TARGET_O_CLOEXEC) {
13209             host_flags |= O_CLOEXEC;
13210         }
13211         ret = get_errno(eventfd(arg1, host_flags));
13212         if (ret >= 0) {
13213             fd_trans_register(ret, &target_eventfd_trans);
13214         }
13215         return ret;
13216     }
13217 #endif
13218 #endif /* CONFIG_EVENTFD  */
13219 #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
13220     case TARGET_NR_fallocate:
13221 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13222         ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
13223                                   target_offset64(arg5, arg6)));
13224 #else
13225         ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
13226 #endif
13227         return ret;
13228 #endif
13229 #if defined(CONFIG_SYNC_FILE_RANGE)
13230 #if defined(TARGET_NR_sync_file_range)
13231     case TARGET_NR_sync_file_range:
13232 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13233 #if defined(TARGET_MIPS)
13234         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13235                                         target_offset64(arg5, arg6), arg7));
13236 #else
13237         ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
13238                                         target_offset64(arg4, arg5), arg6));
13239 #endif /* !TARGET_MIPS */
13240 #else
13241         ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
13242 #endif
13243         return ret;
13244 #endif
13245 #if defined(TARGET_NR_sync_file_range2) || \
13246     defined(TARGET_NR_arm_sync_file_range)
13247 #if defined(TARGET_NR_sync_file_range2)
13248     case TARGET_NR_sync_file_range2:
13249 #endif
13250 #if defined(TARGET_NR_arm_sync_file_range)
13251     case TARGET_NR_arm_sync_file_range:
13252 #endif
13253         /* This is like sync_file_range but the arguments are reordered */
13254 #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
13255         ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
13256                                         target_offset64(arg5, arg6), arg2));
13257 #else
13258         ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
13259 #endif
13260         return ret;
13261 #endif
13262 #endif
13263 #if defined(TARGET_NR_signalfd4)
13264     case TARGET_NR_signalfd4:
13265         return do_signalfd4(arg1, arg2, arg4);
13266 #endif
13267 #if defined(TARGET_NR_signalfd)
13268     case TARGET_NR_signalfd:
13269         return do_signalfd4(arg1, arg2, 0);
13270 #endif
13271 #if defined(CONFIG_EPOLL)
13272 #if defined(TARGET_NR_epoll_create)
13273     case TARGET_NR_epoll_create:
13274         return get_errno(epoll_create(arg1));
13275 #endif
13276 #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
13277     case TARGET_NR_epoll_create1:
13278         return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
13279 #endif
13280 #if defined(TARGET_NR_epoll_ctl)
13281     case TARGET_NR_epoll_ctl:
13282     {
13283         struct epoll_event ep;
13284         struct epoll_event *epp = 0;
13285         if (arg4) {
13286             if (arg2 != EPOLL_CTL_DEL) {
13287                 struct target_epoll_event *target_ep;
13288                 if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
13289                     return -TARGET_EFAULT;
13290                 }
13291                 ep.events = tswap32(target_ep->events);
13292                 /*
13293                  * The epoll_data_t union is just opaque data to the kernel,
13294                  * so we transfer all 64 bits across and need not worry what
13295                  * actual data type it is.
13296                  */
13297                 ep.data.u64 = tswap64(target_ep->data.u64);
13298                 unlock_user_struct(target_ep, arg4, 0);
13299             }
13300             /*
13301              * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
13302              * non-null pointer, even though this argument is ignored.
13303              *
13304              */
13305             epp = &ep;
13306         }
13307         return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
13308     }
13309 #endif
13310 
13311 #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
13312 #if defined(TARGET_NR_epoll_wait)
13313     case TARGET_NR_epoll_wait:
13314 #endif
13315 #if defined(TARGET_NR_epoll_pwait)
13316     case TARGET_NR_epoll_pwait:
13317 #endif
13318     {
13319         struct target_epoll_event *target_ep;
13320         struct epoll_event *ep;
13321         int epfd = arg1;
13322         int maxevents = arg3;
13323         int timeout = arg4;
13324 
13325         if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
13326             return -TARGET_EINVAL;
13327         }
13328 
13329         target_ep = lock_user(VERIFY_WRITE, arg2,
13330                               maxevents * sizeof(struct target_epoll_event), 1);
13331         if (!target_ep) {
13332             return -TARGET_EFAULT;
13333         }
13334 
13335         ep = g_try_new(struct epoll_event, maxevents);
13336         if (!ep) {
13337             unlock_user(target_ep, arg2, 0);
13338             return -TARGET_ENOMEM;
13339         }
13340 
13341         switch (num) {
13342 #if defined(TARGET_NR_epoll_pwait)
13343         case TARGET_NR_epoll_pwait:
13344         {
13345             sigset_t *set = NULL;
13346 
13347             if (arg5) {
13348                 ret = process_sigsuspend_mask(&set, arg5, arg6);
13349                 if (ret != 0) {
13350                     break;
13351                 }
13352             }
13353 
13354             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13355                                              set, SIGSET_T_SIZE));
13356 
13357             if (set) {
13358                 finish_sigsuspend_mask(ret);
13359             }
13360             break;
13361         }
13362 #endif
13363 #if defined(TARGET_NR_epoll_wait)
13364         case TARGET_NR_epoll_wait:
13365             ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
13366                                              NULL, 0));
13367             break;
13368 #endif
13369         default:
13370             ret = -TARGET_ENOSYS;
13371         }
13372         if (!is_error(ret)) {
13373             int i;
13374             for (i = 0; i < ret; i++) {
13375                 target_ep[i].events = tswap32(ep[i].events);
13376                 target_ep[i].data.u64 = tswap64(ep[i].data.u64);
13377             }
13378             unlock_user(target_ep, arg2,
13379                         ret * sizeof(struct target_epoll_event));
13380         } else {
13381             unlock_user(target_ep, arg2, 0);
13382         }
13383         g_free(ep);
13384         return ret;
13385     }
13386 #endif
13387 #endif
13388 #ifdef TARGET_NR_prlimit64
13389     case TARGET_NR_prlimit64:
13390     {
13391         /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
13392         struct target_rlimit64 *target_rnew, *target_rold;
13393         struct host_rlimit64 rnew, rold, *rnewp = 0;
13394         int resource = target_to_host_resource(arg2);
13395 
13396         if (arg3 && (resource != RLIMIT_AS &&
13397                      resource != RLIMIT_DATA &&
13398                      resource != RLIMIT_STACK)) {
13399             if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
13400                 return -TARGET_EFAULT;
13401             }
13402             __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
13403             __get_user(rnew.rlim_max, &target_rnew->rlim_max);
13404             unlock_user_struct(target_rnew, arg3, 0);
13405             rnewp = &rnew;
13406         }
13407 
13408         ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
13409         if (!is_error(ret) && arg4) {
13410             if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
13411                 return -TARGET_EFAULT;
13412             }
13413             __put_user(rold.rlim_cur, &target_rold->rlim_cur);
13414             __put_user(rold.rlim_max, &target_rold->rlim_max);
13415             unlock_user_struct(target_rold, arg4, 1);
13416         }
13417         return ret;
13418     }
13419 #endif
13420 #ifdef TARGET_NR_gethostname
13421     case TARGET_NR_gethostname:
13422     {
13423         char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
13424         if (name) {
13425             ret = get_errno(gethostname(name, arg2));
13426             unlock_user(name, arg1, arg2);
13427         } else {
13428             ret = -TARGET_EFAULT;
13429         }
13430         return ret;
13431     }
13432 #endif
13433 #ifdef TARGET_NR_atomic_cmpxchg_32
13434     case TARGET_NR_atomic_cmpxchg_32:
13435     {
13436         /* should use start_exclusive from main.c */
13437         abi_ulong mem_value;
13438         if (get_user_u32(mem_value, arg6)) {
13439             target_siginfo_t info;
13440             info.si_signo = SIGSEGV;
13441             info.si_errno = 0;
13442             info.si_code = TARGET_SEGV_MAPERR;
13443             info._sifields._sigfault._addr = arg6;
13444             queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
13445             ret = 0xdeadbeef;
13446 
13447         }
13448         if (mem_value == arg2)
13449             put_user_u32(arg1, arg6);
13450         return mem_value;
13451     }
13452 #endif
13453 #ifdef TARGET_NR_atomic_barrier
13454     case TARGET_NR_atomic_barrier:
13455         /* Like the kernel implementation and the
13456            qemu arm barrier, no-op this? */
13457         return 0;
13458 #endif
13459 
13460 #ifdef TARGET_NR_timer_create
13461     case TARGET_NR_timer_create:
13462     {
13463         /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
13464 
13465         struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
13466 
13467         int clkid = arg1;
13468         int timer_index = next_free_host_timer();
13469 
13470         if (timer_index < 0) {
13471             ret = -TARGET_EAGAIN;
13472         } else {
13473             timer_t *phtimer = g_posix_timers  + timer_index;
13474 
13475             if (arg2) {
13476                 phost_sevp = &host_sevp;
13477                 ret = target_to_host_sigevent(phost_sevp, arg2);
13478                 if (ret != 0) {
13479                     free_host_timer_slot(timer_index);
13480                     return ret;
13481                 }
13482             }
13483 
13484             ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
13485             if (ret) {
13486                 free_host_timer_slot(timer_index);
13487             } else {
13488                 if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
13489                     timer_delete(*phtimer);
13490                     free_host_timer_slot(timer_index);
13491                     return -TARGET_EFAULT;
13492                 }
13493             }
13494         }
13495         return ret;
13496     }
13497 #endif
13498 
13499 #ifdef TARGET_NR_timer_settime
13500     case TARGET_NR_timer_settime:
13501     {
13502         /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
13503          * struct itimerspec * old_value */
13504         target_timer_t timerid = get_timer_id(arg1);
13505 
13506         if (timerid < 0) {
13507             ret = timerid;
13508         } else if (arg3 == 0) {
13509             ret = -TARGET_EINVAL;
13510         } else {
13511             timer_t htimer = g_posix_timers[timerid];
13512             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13513 
13514             if (target_to_host_itimerspec(&hspec_new, arg3)) {
13515                 return -TARGET_EFAULT;
13516             }
13517             ret = get_errno(
13518                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13519             if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
13520                 return -TARGET_EFAULT;
13521             }
13522         }
13523         return ret;
13524     }
13525 #endif
13526 
13527 #ifdef TARGET_NR_timer_settime64
13528     case TARGET_NR_timer_settime64:
13529     {
13530         target_timer_t timerid = get_timer_id(arg1);
13531 
13532         if (timerid < 0) {
13533             ret = timerid;
13534         } else if (arg3 == 0) {
13535             ret = -TARGET_EINVAL;
13536         } else {
13537             timer_t htimer = g_posix_timers[timerid];
13538             struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
13539 
13540             if (target_to_host_itimerspec64(&hspec_new, arg3)) {
13541                 return -TARGET_EFAULT;
13542             }
13543             ret = get_errno(
13544                           timer_settime(htimer, arg2, &hspec_new, &hspec_old));
13545             if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
13546                 return -TARGET_EFAULT;
13547             }
13548         }
13549         return ret;
13550     }
13551 #endif
13552 
13553 #ifdef TARGET_NR_timer_gettime
13554     case TARGET_NR_timer_gettime:
13555     {
13556         /* args: timer_t timerid, struct itimerspec *curr_value */
13557         target_timer_t timerid = get_timer_id(arg1);
13558 
13559         if (timerid < 0) {
13560             ret = timerid;
13561         } else if (!arg2) {
13562             ret = -TARGET_EFAULT;
13563         } else {
13564             timer_t htimer = g_posix_timers[timerid];
13565             struct itimerspec hspec;
13566             ret = get_errno(timer_gettime(htimer, &hspec));
13567 
13568             if (host_to_target_itimerspec(arg2, &hspec)) {
13569                 ret = -TARGET_EFAULT;
13570             }
13571         }
13572         return ret;
13573     }
13574 #endif
13575 
13576 #ifdef TARGET_NR_timer_gettime64
13577     case TARGET_NR_timer_gettime64:
13578     {
13579         /* args: timer_t timerid, struct itimerspec64 *curr_value */
13580         target_timer_t timerid = get_timer_id(arg1);
13581 
13582         if (timerid < 0) {
13583             ret = timerid;
13584         } else if (!arg2) {
13585             ret = -TARGET_EFAULT;
13586         } else {
13587             timer_t htimer = g_posix_timers[timerid];
13588             struct itimerspec hspec;
13589             ret = get_errno(timer_gettime(htimer, &hspec));
13590 
13591             if (host_to_target_itimerspec64(arg2, &hspec)) {
13592                 ret = -TARGET_EFAULT;
13593             }
13594         }
13595         return ret;
13596     }
13597 #endif
13598 
13599 #ifdef TARGET_NR_timer_getoverrun
13600     case TARGET_NR_timer_getoverrun:
13601     {
13602         /* args: timer_t timerid */
13603         target_timer_t timerid = get_timer_id(arg1);
13604 
13605         if (timerid < 0) {
13606             ret = timerid;
13607         } else {
13608             timer_t htimer = g_posix_timers[timerid];
13609             ret = get_errno(timer_getoverrun(htimer));
13610         }
13611         return ret;
13612     }
13613 #endif
13614 
13615 #ifdef TARGET_NR_timer_delete
13616     case TARGET_NR_timer_delete:
13617     {
13618         /* args: timer_t timerid */
13619         target_timer_t timerid = get_timer_id(arg1);
13620 
13621         if (timerid < 0) {
13622             ret = timerid;
13623         } else {
13624             timer_t htimer = g_posix_timers[timerid];
13625             ret = get_errno(timer_delete(htimer));
13626             free_host_timer_slot(timerid);
13627         }
13628         return ret;
13629     }
13630 #endif
13631 
13632 #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
13633     case TARGET_NR_timerfd_create:
13634         ret = get_errno(timerfd_create(arg1,
13635                         target_to_host_bitmask(arg2, fcntl_flags_tbl)));
13636         if (ret >= 0) {
13637             fd_trans_register(ret, &target_timerfd_trans);
13638         }
13639         return ret;
13640 #endif
13641 
13642 #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
13643     case TARGET_NR_timerfd_gettime:
13644         {
13645             struct itimerspec its_curr;
13646 
13647             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13648 
13649             if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
13650                 return -TARGET_EFAULT;
13651             }
13652         }
13653         return ret;
13654 #endif
13655 
13656 #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
13657     case TARGET_NR_timerfd_gettime64:
13658         {
13659             struct itimerspec its_curr;
13660 
13661             ret = get_errno(timerfd_gettime(arg1, &its_curr));
13662 
13663             if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
13664                 return -TARGET_EFAULT;
13665             }
13666         }
13667         return ret;
13668 #endif
13669 
13670 #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
13671     case TARGET_NR_timerfd_settime:
13672         {
13673             struct itimerspec its_new, its_old, *p_new;
13674 
13675             if (arg3) {
13676                 if (target_to_host_itimerspec(&its_new, arg3)) {
13677                     return -TARGET_EFAULT;
13678                 }
13679                 p_new = &its_new;
13680             } else {
13681                 p_new = NULL;
13682             }
13683 
13684             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13685 
13686             if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
13687                 return -TARGET_EFAULT;
13688             }
13689         }
13690         return ret;
13691 #endif
13692 
13693 #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
13694     case TARGET_NR_timerfd_settime64:
13695         {
13696             struct itimerspec its_new, its_old, *p_new;
13697 
13698             if (arg3) {
13699                 if (target_to_host_itimerspec64(&its_new, arg3)) {
13700                     return -TARGET_EFAULT;
13701                 }
13702                 p_new = &its_new;
13703             } else {
13704                 p_new = NULL;
13705             }
13706 
13707             ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
13708 
13709             if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
13710                 return -TARGET_EFAULT;
13711             }
13712         }
13713         return ret;
13714 #endif
13715 
13716 #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
13717     case TARGET_NR_ioprio_get:
13718         return get_errno(ioprio_get(arg1, arg2));
13719 #endif
13720 
13721 #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
13722     case TARGET_NR_ioprio_set:
13723         return get_errno(ioprio_set(arg1, arg2, arg3));
13724 #endif
13725 
13726 #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
13727     case TARGET_NR_setns:
13728         return get_errno(setns(arg1, arg2));
13729 #endif
13730 #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
13731     case TARGET_NR_unshare:
13732         return get_errno(unshare(arg1));
13733 #endif
13734 #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
13735     case TARGET_NR_kcmp:
13736         return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
13737 #endif
13738 #ifdef TARGET_NR_swapcontext
13739     case TARGET_NR_swapcontext:
13740         /* PowerPC specific.  */
13741         return do_swapcontext(cpu_env, arg1, arg2, arg3);
13742 #endif
13743 #ifdef TARGET_NR_memfd_create
13744     case TARGET_NR_memfd_create:
13745         p = lock_user_string(arg1);
13746         if (!p) {
13747             return -TARGET_EFAULT;
13748         }
13749         ret = get_errno(memfd_create(p, arg2));
13750         fd_trans_unregister(ret);
13751         unlock_user(p, arg1, 0);
13752         return ret;
13753 #endif
13754 #if defined TARGET_NR_membarrier && defined __NR_membarrier
13755     case TARGET_NR_membarrier:
13756         return get_errno(membarrier(arg1, arg2));
13757 #endif
13758 
13759 #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
13760     case TARGET_NR_copy_file_range:
13761         {
13762             loff_t inoff, outoff;
13763             loff_t *pinoff = NULL, *poutoff = NULL;
13764 
13765             if (arg2) {
13766                 if (get_user_u64(inoff, arg2)) {
13767                     return -TARGET_EFAULT;
13768                 }
13769                 pinoff = &inoff;
13770             }
13771             if (arg4) {
13772                 if (get_user_u64(outoff, arg4)) {
13773                     return -TARGET_EFAULT;
13774                 }
13775                 poutoff = &outoff;
13776             }
13777             /* Do not sign-extend the count parameter. */
13778             ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
13779                                                  (abi_ulong)arg5, arg6));
13780             if (!is_error(ret) && ret > 0) {
13781                 if (arg2) {
13782                     if (put_user_u64(inoff, arg2)) {
13783                         return -TARGET_EFAULT;
13784                     }
13785                 }
13786                 if (arg4) {
13787                     if (put_user_u64(outoff, arg4)) {
13788                         return -TARGET_EFAULT;
13789                     }
13790                 }
13791             }
13792         }
13793         return ret;
13794 #endif
13795 
13796 #if defined(TARGET_NR_pivot_root)
13797     case TARGET_NR_pivot_root:
13798         {
13799             void *p2;
13800             p = lock_user_string(arg1); /* new_root */
13801             p2 = lock_user_string(arg2); /* put_old */
13802             if (!p || !p2) {
13803                 ret = -TARGET_EFAULT;
13804             } else {
13805                 ret = get_errno(pivot_root(p, p2));
13806             }
13807             unlock_user(p2, arg2, 0);
13808             unlock_user(p, arg1, 0);
13809         }
13810         return ret;
13811 #endif
13812 
13813 #if defined(TARGET_NR_riscv_hwprobe)
13814     case TARGET_NR_riscv_hwprobe:
13815         return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
13816 #endif
13817 
13818     default:
13819         qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
13820         return -TARGET_ENOSYS;
13821     }
13822     return ret;
13823 }
13824 
13825 abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
13826                     abi_long arg2, abi_long arg3, abi_long arg4,
13827                     abi_long arg5, abi_long arg6, abi_long arg7,
13828                     abi_long arg8)
13829 {
13830     CPUState *cpu = env_cpu(cpu_env);
13831     abi_long ret;
13832 
13833 #ifdef DEBUG_ERESTARTSYS
13834     /* Debug-only code for exercising the syscall-restart code paths
13835      * in the per-architecture cpu main loops: restart every syscall
13836      * the guest makes once before letting it through.
13837      */
13838     {
13839         static bool flag;
13840         flag = !flag;
13841         if (flag) {
13842             return -QEMU_ERESTARTSYS;
13843         }
13844     }
13845 #endif
13846 
13847     record_syscall_start(cpu, num, arg1,
13848                          arg2, arg3, arg4, arg5, arg6, arg7, arg8);
13849 
13850     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13851         print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
13852     }
13853 
13854     ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
13855                       arg5, arg6, arg7, arg8);
13856 
13857     if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
13858         print_syscall_ret(cpu_env, num, ret, arg1, arg2,
13859                           arg3, arg4, arg5, arg6);
13860     }
13861 
13862     record_syscall_return(cpu, num, ret);
13863     return ret;
13864 }
13865